Compare commits
39 commits
190e28d895
...
6129e77698
Author | SHA1 | Date | |
---|---|---|---|
6129e77698 | |||
9faf7fe49b | |||
374b9d522c | |||
dd0194e57f | |||
83b1a739ae | |||
|
b0693e509c | ||
d9e0caa79e | |||
fa11f88adc | |||
cfd396dc8b | |||
4a622082dc | |||
cd74d0554f | |||
2739757426 | |||
3af084be32 | |||
62167be908 | |||
8cddeeed6d | |||
f9ccd0a577 | |||
b6e67f47bb | |||
7c16fff350 | |||
7ddce78b66 | |||
728217838d | |||
5b13031bc2 | |||
1f1b44febc | |||
4d2e2b806f | |||
aae0f71901 | |||
|
3bf2162ab1 | ||
9b51d9c534 | |||
f7162b07e2 | |||
6f61bb2e45 | |||
71399e0ad3 | |||
|
ac5b5737f5 | ||
7f97dfadd2 | |||
2d654e4744 | |||
c3b39f2689 | |||
3b9e63bb28 | |||
93fa156c04 | |||
7405be3280 | |||
4e37622a12 | |||
bf95bef5bc | |||
60862ca35f |
4
.github/workflows/deploy-prod.yml
vendored
|
@ -69,7 +69,3 @@ jobs:
|
|||
upload: "true"
|
||||
remove: "true"
|
||||
purgePullZone: "true"
|
||||
- name: Deploy GMI to Agate
|
||||
run: |
|
||||
rsync -avz --delete --exclude='*.html' --exclude='*.css' --exclude='*.js' -e ssh public/ deploy@${{ secrets.GMI_HOST }}:${{ secrets.GMI_CONTENT_PATH }}
|
||||
|
||||
|
|
|
@ -7,33 +7,21 @@ DefaultContentLanguage = "en"
|
|||
enableInlineShortcodes = true
|
||||
enableRobotsTXT = true
|
||||
|
||||
# define gemini media type
|
||||
[mediaTypes]
|
||||
[mediaTypes.'text/gemini']
|
||||
suffixes = ["gmi"]
|
||||
|
||||
# Automatically add content sections to main menu
|
||||
# sectionPagesMenu = "main"
|
||||
|
||||
[outputs]
|
||||
home = ['html', 'rss', 'gemini']
|
||||
home = ['html', 'rss']
|
||||
section = ['html']
|
||||
taxonomy = ['html']
|
||||
term = ['html', 'rss', 'gemini']
|
||||
page = ['html', 'rss', 'gemini']
|
||||
term = ['html', 'rss']
|
||||
page = ['html', 'rss']
|
||||
|
||||
# rename rss output from index.xml to feed.xml
|
||||
[outputFormats]
|
||||
[outputFormats.rss]
|
||||
mediatype = "application/rss"
|
||||
baseName = "feed"
|
||||
# gemini output
|
||||
[outputFormats.gemini]
|
||||
mediatype = "text/gemini"
|
||||
isPlainText = true
|
||||
isHTML = false
|
||||
protocol = "gemini://"
|
||||
permalinkable = true
|
||||
|
||||
[permalinks]
|
||||
posts = ":filename"
|
||||
|
|
|
@ -19,7 +19,7 @@ reply = true
|
|||
name = "John Bowdre"
|
||||
email = "jbowdre@omg.lol"
|
||||
username = "jbowdre"
|
||||
fedi = "@jbowdre@social.lol"
|
||||
fedi = "@john@srsbsns.lol"
|
||||
|
||||
[theme]
|
||||
palette = "runtimeterror"
|
||||
|
@ -46,6 +46,7 @@ taglines = [
|
|||
"coding crimes",
|
||||
"connection reset by peer",
|
||||
"converting caffeine into code",
|
||||
"cowardly refusing to display an empty web page",
|
||||
"creating new and exciting bugs",
|
||||
"cyclic dependency detected",
|
||||
"destructor cannot be overloaded",
|
||||
|
@ -123,6 +124,7 @@ taglines = [
|
|||
"write error: no space left on device",
|
||||
"you can't handle the exception",
|
||||
"you gotta stop letting your mama test you, man",
|
||||
"your browser is deprecated. please upgrade.",
|
||||
]
|
||||
|
||||
# Sidebar: social links
|
||||
|
@ -145,11 +147,6 @@ icon = "fa-solid fa-pen-to-square"
|
|||
title = "Weblog"
|
||||
url = "https://srsbsns.lol"
|
||||
|
||||
[[socialLinks]]
|
||||
icon = "fa-solid fa-satellite"
|
||||
title = "Gemini Capsule"
|
||||
url = "gemini://capsule.jbowdre.lol"
|
||||
|
||||
[[socialLinks]]
|
||||
icon = "fa-solid fa-circle-user"
|
||||
title = "CounterSocial"
|
||||
|
@ -158,7 +155,7 @@ url = "https://counter.social/@john_b"
|
|||
[[socialLinks]]
|
||||
icon = "fa fa-mastodon"
|
||||
title = "Mastodon"
|
||||
url = "https://social.lol/@jbowdre"
|
||||
url = "https://goto.srsbsns.lol/@john"
|
||||
|
||||
[[socialLinks]]
|
||||
icon = "fa-solid fa-envelope"
|
||||
|
@ -203,10 +200,6 @@ url = "https://proven.lol/cd10d3"
|
|||
title = "CounterSocial"
|
||||
url = "https://counter.social/@john_b"
|
||||
|
||||
[[verifyLinks]]
|
||||
title = "Mastodon"
|
||||
url = "https://social.lol/@jbowdre"
|
||||
|
||||
[[verifyLinks]]
|
||||
title = "GitHub"
|
||||
url = "https://github.com/jbowdre"
|
||||
|
|
|
@ -28,14 +28,13 @@ And in the free time I have left, I game on my Steam Deck.
|
|||
### See what I've been up to on:
|
||||
- [GitHub](https://github.com/jbowdre)
|
||||
- [Weblog](https://srsbsns.lol)
|
||||
- [Gemlog](https://capsule.jbowdre.lol/gemlog/)
|
||||
- [status.lol](https://status.jbowdre.lol)
|
||||
- [social.lol](https://social.lol/@jbowdre)
|
||||
- [Fediverse](https://goto.srsbsns.lol/@john)
|
||||
- [CounterSocial](https://counter.social/@john_b)
|
||||
- [/now](https://now.jbowdre.lol)
|
||||
|
||||
### Connect with me via:
|
||||
- [SimpleX Chat](/simplex/)
|
||||
- [SimpleX Chat](https://l.runtimeterror.dev/simplex-chat-invite)
|
||||
- [Signal](https://signal.me/#eu/lyHZbMnlM16O0w48j3rshYBofO0K-iXOt9LGwln7TS-fNKEHCrxH3La325q8IjRU)
|
||||
- [Matrix](https://matrix.to/#/@jbowdre:omg.lol)
|
||||
- [XMPP](https://conversations.im/i/jbowdre@omg.lol?omemo-sid-1374125881=a620f3c57733601a6646f6f13a71c86fc9be8dd4126fd158ef3e0a26beb0b434)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: "/changelog"
|
||||
date: "2024-05-26T21:19:08Z"
|
||||
lastmod: "2024-08-21T03:11:27Z"
|
||||
lastmod: "2024-10-20T03:51:49Z"
|
||||
description: "Maybe I should keep a log of all my site-related tinkering?"
|
||||
featured: false
|
||||
toc: false
|
||||
|
@ -10,6 +10,9 @@ categories: slashes
|
|||
---
|
||||
*Running list of config/layout changes to the site. The full changelog is of course [on GitHub](https://github.com/jbowdre/runtimeterror/commits/main/).*
|
||||
|
||||
**2024-10-19:**
|
||||
- Shut down Gemini mirror, removed links pointing to it
|
||||
|
||||
**2024-08-20:**
|
||||
- Added anchor links on section headings
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: "/colophon"
|
||||
date: "2024-05-26T22:30:58Z"
|
||||
lastmod: "2024-08-02T21:16:41Z"
|
||||
lastmod: "2024-10-20T03:51:35Z"
|
||||
description: "There's a lot that goes into this site. Let me tell you how it works."
|
||||
featured: false
|
||||
toc: true
|
||||
|
@ -21,7 +21,6 @@ categories: slashes
|
|||
- displays my latest status from [omg.lol](https://home.omg.lol/referred-by/jbowdre).
|
||||
- resolves via [Bunny DNS](https://bunny.net/dns/).
|
||||
- is published to / hosted on [Bunny Storage](https://bunny.net/storage/) and [Bunny CDN](https://bunny.net/cdn/) with a [GitHub Actions workflow](//further-down-the-bunny-hole/).
|
||||
- has a [Gemini](https://geminiprotocol.net) mirror at `gemini://gmi.runtimeterror.dev`. This is generated from a [Hugo gemtext post layout](https://github.com/jbowdre/runtimeterror/blob/main/layouts/_default/single.gmi), deployed to a [Vultr](https://www.vultr.com/) VPS through that same GitHub Actions workflow, and served with [Agate](https://github.com/mbrubeck/agate).
|
||||
|
||||
The post content is licensed under [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/); the site code is under the [MIT License](https://github.com/jbowdre/runtimeterror/blob/main/LICENSE).
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: "/homelab"
|
||||
date: "2024-05-26T21:30:51Z"
|
||||
lastmod: "2024-08-13T02:12:54Z"
|
||||
lastmod: "2024-10-23T02:10:13Z"
|
||||
aliases:
|
||||
- playground
|
||||
description: "The systems I use for fun and enrichment."
|
||||
|
@ -14,6 +14,8 @@ categories: slashes
|
|||
|
||||
Everything is connected to my [Tailscale](https://tailscale.com) tailnet, with a GitOps-managed ACL to allow access as needed. This lets me access and manage systems without really caring if they're local or remote. [Tailscale is magic](/secure-networking-made-simple-with-tailscale/).
|
||||
|
||||
The Docker containers are (generally) managed with [Portainer](https://www.portainer.io/) using configs [on GitHub](https://github.com/jbowdre/compositions).
|
||||
|
||||
### On Premise
|
||||
|
||||
**Proxmox VE 8 Cluster**
|
||||
|
@ -30,14 +32,16 @@ Everything is connected to my [Tailscale](https://tailscale.com) tailnet, with a
|
|||
- [Unifi USW Flex XG 10GbE Switch](https://store.ui.com/us/en/collections/unifi-switching-utility-10-gbps-ethernet/products/unifi-flex-xg)
|
||||
|
||||
The Proxmox cluster hosts a number of VMs and LXC containers:
|
||||
- `doc`: Ubuntu 22.04 Docker host for various on-prem container workloads, served via [Tailscale Serve](/tailscale-ssh-serve-funnel/#tailscale-serve) / [Cloudflare Tunnel](/publish-services-cloudflare-tunnel/):
|
||||
- `doc`: Ubuntu 22.04 Docker host for various on-prem container workloads, served via [Tailscale Serve](/tailscale-ssh-serve-funnel/#tailscale-serve) / [Caddy + Tailscale](/caddy-tailscale-alternative-cloudflare-tunnel/):
|
||||
- [Calibre Web](https://github.com/janeczku/calibre-web) for managing my ebooks
|
||||
- [Crowdsec](https://www.crowdsec.net/) log processor
|
||||
- [Cyberchef](https://github.com/gchq/CyberChef), the Cyber Swiss Army Knife
|
||||
- [Golink](https://github.com/tailscale/golink), a private shortlink service for tailnets
|
||||
- [Hashicorp Vault](https://www.vaultproject.io/) for secrets management
|
||||
- [Miniflux](https://miniflux.app/) feed reader
|
||||
- [Heimdall](https://github.com/linuxserver/Heimdall), an application dashboard and launcher
|
||||
- [IT-Tools](https://github.com/CorentinTh/it-tools) for handy online development-related tools
|
||||
- [Linkding](https://github.com/sissbruecker/linkding) bookmark manager serving [links.bowdre.net](https://links.bowdre.net/bookmarks/shared)
|
||||
- [RIPE Atlas Probe](https://www.ripe.net/analyse/internet-measurements/ripe-atlas/) for measuring internet connectivity
|
||||
- [SilverBullet](https://silverbullet.md), a web-based personal knowledge management system
|
||||
- [SilverBullet](https://silverbullet.md), a web-based personal knowledge management system ([post](/publish-silverbullet-notes-quartz/))
|
||||
- [Tailscale Golink](https://github.com/tailscale/golink), a private shortlink service ([post](/tailscale-golink-private-shortlinks-tailnet/))
|
||||
- `files`: Ubuntu 20.04 file server. Serves (selected) files semi-publicly through [Tailscale Funnel](/tailscale-ssh-serve-funnel/#tailscale-funnel)
|
||||
- `hassos`: [Home Assistant OS](https://www.home-assistant.io/installation/), manages all my "smart home" stuff ([post](/automating-camera-notifications-home-assistant-ntfy/))
|
||||
|
@ -63,23 +67,22 @@ This triad of cute little single-board computers will *eventually* be a combinat
|
|||
|
||||
I like to know what's flying overhead, and I'm also feeding flight data to [flightaware.com](https://flightaware.com) and [adsb.fi](https://adsb.fi).
|
||||
|
||||
### Cloud
|
||||
**Federated Raspberry Pi**
|
||||
- Raspberry Pi 4 Model B
|
||||
- 64GB Sandisk USB Drive
|
||||
|
||||
**[Oracle Cloud Infrastructure](https://www.oracle.com/cloud/free/)**
|
||||
- `git`: Ubuntu 22.04 [Forgejo](https://forgejo.org/) server for [git.bowdre.net](https://git.bowdre.net/explore/repos)
|
||||
- `smp2`: Ubuntu 22.04 [SimpleX](/simplex/) server
|
||||
Runs [GoToSocial](https://gotosocial.org/) in Docker to host my personal Mastodon-compatible ActivityPub server, [goto.srsbsns.lol](https://goto.srsbsns.lol) ([post](https://srsbsns.lol/going-to-gotosocial/)).
|
||||
|
||||
### Cloud
|
||||
|
||||
**[Google Cloud Platform](https://cloud.google.com/free/docs/free-cloud-features)**
|
||||
- `smp`: Ubuntu 22.04 [SimpleX](/simplex/) server
|
||||
- `smp1`: Ubuntu 22.04 [SimpleX](/simplex/) server
|
||||
|
||||
**[Vultr](https://www.vultr.com)**
|
||||
- `volly`: Ubuntu 22.04 Docker host for various workloads, served either through [Caddy](https://caddyserver.com/) or [Cloudflare Tunnel](/publish-services-cloudflare-tunnel/):
|
||||
- [Agate](https://github.com/mbrubeck/agate) Gemini server ([post](/gemini-capsule-gempost-github-actions/))
|
||||
- [Crowdsec](https://www.crowdsec.net) security engine
|
||||
- [Kineto](https://github.com/beelux/kineto) Gemini-to-HTTP proxy ([post](/gemini-capsule-gempost-github-actions/))
|
||||
- [Linkding](https://github.com/sissbruecker/linkding) bookmark manager serving [links.bowdre.net](https://links.bowdre.net/bookmarks/shared)
|
||||
- `volly`: Ubuntu 22.04 Docker host for various workloads, served either through [Caddy](https://caddyserver.com/) or [Caddy + Tailscale](/caddy-tailscale-alternative-cloudflare-tunnel/):
|
||||
- [Forgejo](https://forgejo.org/) server for [git.bowdre.net](https://git.bowdre.net/explore/repos) ([post](/gitea-self-hosted-git-server/))
|
||||
- [ntfy](https://ntfy.sh/) notification service ([post](/easy-push-notifications-with-ntfy/))
|
||||
- [SearXNG](https://docs.searxng.org/) self-hosted metasearch engine serving [grep.vpota.to](https://grep.vpota.to) ([post](https://srsbsns.lol/post/self-hosting-a-search-engine-iyjdlk6y))
|
||||
- [SimpleX](/simplex/) server (`smp2`)
|
||||
- [Uptime Kuma](https://github.com/louislam/uptime-kuma) for monitoring internal services (via Tailscale)
|
||||
- [vault-unseal](https://github.com/lrstanley/vault-unseal) to auto-unseal my on-prem Vault instance
|
||||
|
|
|
@ -71,7 +71,7 @@ I can then go to Service Broker and drag the new fields onto the Custom Form can
|
|||
![Service Broker form](unhgNySSzz.png)
|
||||
|
||||
### vRO workflow
|
||||
Okay, so I've got the information I want to pass on to vCenter. Now I need to whip up a new workflow in vRO that will actually do that (after [telling vRO how to connect to the vCenter](/vra8-custom-provisioning-part-two#interlude-connecting-vro-to-vcenter), of course). I'll want to call this after the VM has been provisioned, so I'll cleverly call the workflow "VM Post-Provisioning".
|
||||
Okay, so I've got the information I want to pass on to vCenter. Now I need to whip up a new workflow in vRO that will actually do that (after [telling vRO how to connect to the vCenter](/vra8-custom-provisioning-part-two/#interlude-connecting-vro-to-vcenter), of course). I'll want to call this after the VM has been provisioned, so I'll cleverly call the workflow "VM Post-Provisioning".
|
||||
![Naming the new workflow](X9JhgWx8x.png)
|
||||
|
||||
The workflow will have a single input from vRA, `inputProperties` of type `Properties`.
|
||||
|
|
|
@ -44,7 +44,7 @@ Gateway=192.168.1.1
|
|||
DNS=192.168.1.5
|
||||
```
|
||||
|
||||
By the way, that `192.168.1.5` address is my Windows DC/DNS server that I use for [my homelab environment](/vmware-home-lab-on-intel-nuc-9#basic-infrastructure). That's the DNS server that's configured on my Google Wifi router, and it will continue to handle resolution for local addresses.
|
||||
By the way, that `192.168.1.5` address is my Windows DC/DNS server that I use for [my homelab environment](/vmware-home-lab-on-intel-nuc-9/#basic-infrastructure). That's the DNS server that's configured on my Google Wifi router, and it will continue to handle resolution for local addresses.
|
||||
|
||||
I also disabled DHCP by setting `DHCP=no` in `/etc/systemd/network/99-dhcp-en.network`:
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ categories: Code
|
|||
tags:
|
||||
- api
|
||||
- automation
|
||||
- cicd
|
||||
- containers
|
||||
- docker
|
||||
- iac
|
||||
|
@ -135,7 +136,7 @@ For more examples and ideas, visit:
|
|||
So the Docker piece is sorted; now for setting up the runner.
|
||||
|
||||
#### Install/Configure Runner
|
||||
I know I've been talking about a singular runner, but I'm actually seting up multiple instances of the runner on the same host to allow running jobs in parallel. I could probably support four simultaneous builds in my homelab but I'll settle two runners for now (after all, I only have two build flavors so far anyway).
|
||||
I know I've been talking about a singular runner, but I'm actually setting up multiple instances of the runner on the same host to allow running jobs in parallel. I could probably support four simultaneous builds in my homelab but I'll start with just two runners for now (after all, I only have two build flavors so far anyway).
|
||||
|
||||
Each runner instance needs its own directory so I create those under `/opt/github/`:
|
||||
|
||||
|
@ -438,6 +439,7 @@ If it fails for some reason, the `Retry on failure` step will try again, just in
|
|||
Here's the complete `.github/workflows/build.yml`, all in one code block:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers":true}
|
||||
name: Build VM Templates
|
||||
|
||||
on:
|
||||
|
|
|
@ -24,7 +24,7 @@ I figured I could combine the excellent [Reolink integration for Home Assistant]
|
|||
|
||||
### Alert on motion detection
|
||||
{{% notice note "Ntfy Integration" %}}
|
||||
Since manually configuring ntfy in Home Assistant via the [RESTful Notifications integration](/easy-push-notifications-with-ntfy#notify-configuration), I found that a [ntfy-specific integration](https://github.com/ivanmihov/homeassistant-ntfy.sh) was available through the [Home Assistant Community Store](https://hacs.xyz/) addon. That setup is a bit more flexible so I've switched my setup to use it instead:
|
||||
Since manually configuring ntfy in Home Assistant via the [RESTful Notifications integration](/easy-push-notifications-with-ntfy/#notify-configuration), I found that a [ntfy-specific integration](https://github.com/ivanmihov/homeassistant-ntfy.sh) was available through the [Home Assistant Community Store](https://hacs.xyz/) addon. That setup is a bit more flexible so I've switched my setup to use it instead:
|
||||
```yaml
|
||||
# configuration.yaml
|
||||
notify:
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
---
|
||||
title: "Caddy + Tailscale as an Alternative to Cloudflare Tunnel"
|
||||
date: "2024-09-22T19:12:52Z"
|
||||
# lastmod: 2024-09-22
|
||||
description: "Combining the magic of Caddy and Tailscale to serve web content from my homelab - and declaring independence from Cloudflare in the process."
|
||||
featured: false
|
||||
toc: true
|
||||
reply: true
|
||||
categories: Self-Hosting
|
||||
tags:
|
||||
- caddy
|
||||
- cloud
|
||||
- containers
|
||||
- docker
|
||||
- networking
|
||||
- selfhosting
|
||||
- tailscale
|
||||
---
|
||||
Earlier this year, I [shared how I used Cloudflare Tunnel](/publish-services-cloudflare-tunnel/) to publish some self-hosted resources on the internet without needing to expose any part of my home network. Since then, I've [moved many resources to bunny.net](https://srsbsns.lol/i-just-hopped-to-bunnynet/) ([including this website](/further-down-the-bunny-hole/)). I left some domains at Cloudflare, primarily just to benefit from the convenience of Cloudflare Tunnel, but I wasn't thrilled about being so dependent upon a single company that controls so much of the internet.
|
||||
|
||||
However a [post on Tailscale's blog this week](https://tailscale.com/blog/last-reverse-proxy-you-need) reminded me that there was another easy approach using solutions I'm already using heavily: [Caddy](/tags/caddy) and [Tailscale](/tags/tailscale). Caddy is a modern web server (that works great as a reverse proxy with automatic HTTPS), and Tailscale [makes secure networking simple](/secure-networking-made-simple-with-tailscale/). Combining the two allows me to securely serve web services without any messy firewall configurations.
|
||||
|
||||
So here's how I ditched Cloudflare Tunnel in favor of Caddy + Tailscale.
|
||||
|
||||
### Docker Compose config
|
||||
To keep things simple, I'll deploy the [same speedtest app I used to demo Cloudflare Tunnel](https://runtimeterror.dev/publish-services-cloudflare-tunnel/#speedtest-demo) on a Docker host located in my [homelab](/homelab).
|
||||
|
||||
Here's a basic config to run [openspeedtest](https://github.com/openspeedtest/Docker-Image) on HTTP only (defaults to port `3000`):
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers":true}
|
||||
services:
|
||||
speedtest:
|
||||
image: openspeedtest/latest
|
||||
container_name: speedtest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 3000:3000
|
||||
```
|
||||
|
||||
### A Tailscale sidecar
|
||||
I can easily add [Tailscale in a sidecar container](/tailscale-serve-docker-compose-sidecar/) to make my new speedtest available within my tailnet:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers":true}
|
||||
services:
|
||||
speedtest:
|
||||
image: openspeedtest/latest
|
||||
container_name: speedtest
|
||||
restart: unless-stopped
|
||||
ports: # [tl! --:1]
|
||||
- 3000:3000
|
||||
network_mode: service:tailscale # [tl! ++]
|
||||
|
||||
tailscale: # [tl! ++:12]
|
||||
image: tailscale/tailscale:latest
|
||||
container_name: speedtest-tailscale
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
TS_AUTHKEY: ${TS_AUTHKEY:?err}
|
||||
TS_HOSTNAME: ${TS_HOSTNAME:-ts-docker}
|
||||
TS_STATE_DIR: /var/lib/tailscale/
|
||||
volumes:
|
||||
- ./ts_data:/var/lib/tailscale/
|
||||
```
|
||||
|
||||
Note that I no longer need to ask the host to expose port `3000` from the container; instead, I bridge the `speedtest` container's network with that of the `tailscale` container.
|
||||
|
||||
And I create a simple `.env` file with the secrets required for connecting to Tailscale using a [pre-authentication key](https://tailscale.com/kb/1085/auth-keys):
|
||||
|
||||
```shell
|
||||
# torchlight! {"lineNumbers":true}
|
||||
TS_AUTHKEY=tskey-auth-somestring-somelongerstring
|
||||
TS_HOSTNAME=speedtest
|
||||
```
|
||||
|
||||
After a quick `docker compose up -d` I can access my new speedtest at `http://speedtest.tailnet-name.ts.net:3000`. Next I just need to put it behind Caddy.
|
||||
|
||||
### Caddy configuration
|
||||
I already have [Caddy](https://caddyserver.com/) running on a server in [Vultr](https://www.vultr.com/) ([referral link](https://www.vultr.com/?ref=9488431)) so I'll be using that to front my new speedtest server. I add a DNS record in Bunny for `speed.runtimeterror.dev` pointed to the server's public IP address, and then add a corresponding block to my `/etc/caddy/Caddyfile` configuration:
|
||||
|
||||
|
||||
```text
|
||||
speed.runtimeterror.dev {
|
||||
bind 192.0.2.1 # replace with server's public interface address
|
||||
reverse_proxy http://speedtest.tailnet-name.ts.net:3000
|
||||
}
|
||||
```
|
||||
|
||||
{{% notice note "Caddy binding" %}}
|
||||
Since I'm already using Tailscale Serve for other services on this server, I use the `bind` directive to explicitly tell Caddy to listen on the server's public IP address. By default, it will try to listen on *all* interfaces and that would conflict with `tailscaled` that's already bound to the tailnet-internal IP.
|
||||
{{% /notice %}}
|
||||
|
||||
The `reverse_proxy` directive points to speedtest's HTTP endpoint within my tailnet; all traffic between tailnet addresses is already encrypted, and I can just let Caddy obtain and serve the SSL certificate automagically.
|
||||
|
||||
Now I just need to reload the Caddyfile:
|
||||
|
||||
```shell
|
||||
sudo caddy reload -c /etc/caddy/Caddyfile # [tl! .cmd]
|
||||
INFO using config from file {"file": "/etc/caddy/Caddyfile"} # [tl! .nocopy:1]
|
||||
INFO adapted config to JSON {"adapter": "caddyfile"}
|
||||
```
|
||||
|
||||
And I can try out my speedtest at `https://speed.runtimeterror.dev`:
|
||||
|
||||
![OpenSpeedTest results showing a download speed of 194.1 Mbps, upload speed of 147.8 Mbps, and ping of 20 ms with 0.6 ms jitter. A graph displays connection speed over time.](speedtest.png)
|
||||
|
||||
*Not bad!*
|
||||
|
||||
### Conclusion
|
||||
Combining the powers (and magic) of Caddy and Tailscale makes it easy to publicly serve content from private resources without compromising on security *or* extending vendor lock-in. This will dramatically simplify migrating the rest of my domains from Cloudflare to Bunny.
|
After Width: | Height: | Size: 222 KiB |
|
@ -22,7 +22,7 @@ For a while now, I've been using an [OpenVPN Access Server](https://openvpn.net/
|
|||
|
||||
I found that solution in [WireGuard](https://www.wireguard.com/), which provides an extremely efficient secure tunnel implemented directly in the Linux kernel. It has a much smaller (and easier-to-audit) codebase, requires minimal configuration, and uses the latest crypto wizardry to securely connect multiple systems. It took me an hour or so of fumbling to get WireGuard deployed and configured on a fresh (and minimal) Ubuntu 20.04 VM running on my ESXi 7 homelab host, and I was pretty happy with the performance, stability, and resource usage of the new setup. That new VM idled at a full _tenth_ of the memory usage of my OpenVPN AS, and it only required a single port to be forwarded into my home network.
|
||||
|
||||
Of course, I soon realized that the setup could be _even better:_ I'm now running a WireGuard server on the Google Cloud free tier, and I've configured the [VyOS virtual router I use for my homelab stuff](/vmware-home-lab-on-intel-nuc-9#networking) to connect to that cloud-hosted server to create a secure tunnel between the two without needing to punch any holes in my local network (or consume any additional resources). I can then connect my client devices to the WireGuard server in the cloud. From there, traffic intended for my home network gets relayed to the VyOS router, and internet-bound traffic leaves Google Cloud directly. So my self-managed VPN isn't just good for accessing my home lab remotely, but also more generally for encrypting traffic when on WiFi networks I don't control - allowing me to replace the paid ProtonVPN subscription I had been using for that purpose.
|
||||
Of course, I soon realized that the setup could be _even better:_ I'm now running a WireGuard server on the Google Cloud free tier, and I've configured the [VyOS virtual router I use for my homelab stuff](/vmware-home-lab-on-intel-nuc-9/#networking) to connect to that cloud-hosted server to create a secure tunnel between the two without needing to punch any holes in my local network (or consume any additional resources). I can then connect my client devices to the WireGuard server in the cloud. From there, traffic intended for my home network gets relayed to the VyOS router, and internet-bound traffic leaves Google Cloud directly. So my self-managed VPN isn't just good for accessing my home lab remotely, but also more generally for encrypting traffic when on WiFi networks I don't control - allowing me to replace the paid ProtonVPN subscription I had been using for that purpose.
|
||||
|
||||
It's a pretty slick setup, if I do say so myself. Anyway, this post will discuss how I implemented this, and what I learned along the way.
|
||||
|
||||
|
@ -57,7 +57,7 @@ The other defaults are fine, but I'll holding off on clicking the friendly blue
|
|||
##### Network Configuration
|
||||
Expanding the **Networking** section of the request form lets me add a new `wireguard` network tag, which will make it easier to target the instance with a firewall rule later. I also want to enable the _IP Forwarding_ option so that the instance will be able to do router-like things.
|
||||
|
||||
By default, the new instance will get assigned a public IP address that I can use to access it externally - but this address is _ephemeral_ so it will change periodically. Normally I'd overcome this by [using ddclient to manage its dynamic DNS record](/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance#configure-dynamic-dns), but (looking ahead) [VyOS's WireGuard interface configuration](https://docs.vyos.io/en/latest/configuration/interfaces/wireguard.html#interface-configuration) unfortunately only supports connecting to an IP rather than a hostname. That means I'll need to reserve a _static_ IP address for my instance.
|
||||
By default, the new instance will get assigned a public IP address that I can use to access it externally - but this address is _ephemeral_ so it will change periodically. Normally I'd overcome this by [using ddclient to manage its dynamic DNS record](/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance/#configure-dynamic-dns), but (looking ahead) [VyOS's WireGuard interface configuration](https://docs.vyos.io/en/latest/configuration/interfaces/wireguard.html#interface-configuration) unfortunately only supports connecting to an IP rather than a hostname. That means I'll need to reserve a _static_ IP address for my instance.
|
||||
|
||||
I can do that by clicking on the _Default_ network interface to expand the configuration. While I'm here, I'll first change the **Network Service Tier** from _Premium_ to _Standard_ to save a bit of money on network egress fees. _(This might be a good time to mention that while the compute instance itself is free, I will have to spend [about $3/mo for the public IP](https://cloud.google.com/vpc/network-pricing#:~:text=internal%20IP%20addresses.-,External%20IP%20address%20pricing,-You%20are%20charged), as well as [$0.085/GiB for internet egress via the Standard tier](https://cloud.google.com/vpc/network-pricing#:~:text=or%20Cloud%20Interconnect.-,Standard%20Tier%20pricing,-Egress%20pricing%20is) (versus [$0.12/GiB on the Premium tier](https://cloud.google.com/vpc/network-pricing#:~:text=Premium%20Tier%20pricing)). So not entirely free, but still pretty damn cheap for a cloud-hosted VPN that I control completely.)_
|
||||
|
||||
|
@ -487,7 +487,7 @@ Two quick pre-requisites first:
|
|||
1. Open the WireGuard Android app, tap the three-dot menu button at the top right, expand the Advanced section, and enable the _Allow remote control apps_ so that Tasker will be permitted to control WireGuard.
|
||||
2. Exclude the WireGuard app from Android's battery optimization so that it doesn't have any problems running in the background. On (Pixel-flavored) Android 12, this can be done by going to **Settings > Apps > See all apps > WireGuard > Battery** and selecting the _Unrestricted_ option.
|
||||
|
||||
On to the Tasker config. The only changes will be in the [VPN on Strange Wifi](/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker#vpn-on-strange-wifi) profile. I'll remove the OpenVPN-related actions from the Enter and Exit tasks and replace them with the built-in **Tasker > Tasker Function WireGuard Set Tunnel** action.
|
||||
On to the Tasker config. The only changes will be in the [VPN on Strange Wifi](/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/#vpn-on-strange-wifi) profile. I'll remove the OpenVPN-related actions from the Enter and Exit tasks and replace them with the built-in **Tasker > Tasker Function WireGuard Set Tunnel** action.
|
||||
|
||||
For the Enter task, I'll set the tunnel status to `true` and specify the name of the tunnel as configured in the WireGuard app; the Exit task gets the status set to `false` to disable the tunnel. Both actions will be conditional upon the `%TRUSTED_WIFI` variable being unset.
|
||||
![Tasker setup](20211028_tasker_setup.png)
|
||||
|
|
|
@ -89,7 +89,7 @@ Cool! Now I just need to do that same thing, but from vRealize Orchestrator. Fir
|
|||
|
||||
### Template changes
|
||||
#### Cloud Template
|
||||
Similar to the template changes I made for [optionally joining deployed servers to the Active Directory domain](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template), I'll just be adding a simple boolean checkbox to the `inputs` section of the template in Cloud Assembly:
|
||||
Similar to the template changes I made for [optionally joining deployed servers to the Active Directory domain](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/#cloud-template), I'll just be adding a simple boolean checkbox to the `inputs` section of the template in Cloud Assembly:
|
||||
```yaml
|
||||
formatVersion: 1
|
||||
inputs:
|
||||
|
|
|
@ -40,7 +40,7 @@ Now I can finally click the blue **Create Instance** button at the bottom of the
|
|||
![Logged in!](5PD1H7b1O.png)
|
||||
|
||||
### DNS setup
|
||||
According to [Oracle's docs](https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm), the public IP assigned to my instance is mine until I terminate the instance. It should even remain assigned if I stop or restart the instance, just as long as I don't delete the virtual NIC attached to it. So I'll skip the [`ddclient`-based dynamic DNS configuration I've used in the past](/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance#configure-dynamic-dns) and instead go straight to my registrar's DNS management portal and create a new `A` record for `matrix.bowdre.net` with the instance's public IP.
|
||||
According to [Oracle's docs](https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm), the public IP assigned to my instance is mine until I terminate the instance. It should even remain assigned if I stop or restart the instance, just as long as I don't delete the virtual NIC attached to it. So I'll skip the [`ddclient`-based dynamic DNS configuration I've used in the past](/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance/#configure-dynamic-dns) and instead go straight to my registrar's DNS management portal and create a new `A` record for `matrix.bowdre.net` with the instance's public IP.
|
||||
|
||||
While I'm managing DNS, it might be good to take a look at the requirements for [federating my new server](https://github.com/matrix-org/synapse/blob/master/docs/federate.md#setting-up-federation) with the other Matrix servers out there. I'd like for users identities on my server to be identified by the `bowdre.net` domain (`@user:bowdre.net`) rather than the full `matrix.bowdre.net` FQDN (`@user:matrix.bowdre.net` is kind of cumbersome). The standard way to do this to leverage [`.well-known` delegation](https://github.com/matrix-org/synapse/blob/master/docs/delegate.md#well-known-delegation), where the URL at `http://bowdre.net/.well-known/matrix/server` would return a JSON structure telling other Matrix servers how to connect to mine:
|
||||
```json
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: "Self-Hosted Gemini Capsule with gempost and GitHub Actions"
|
||||
date: "2024-03-23T21:33:19Z"
|
||||
lastmod: "2024-04-05T21:07:38Z"
|
||||
lastmod: "2024-10-26T21:26:17Z"
|
||||
description: "Deploying a Gemini capsule, powered by Agate, gempost, kineto, Tailscale, and GitHub Actions"
|
||||
featured: false
|
||||
toc: true
|
||||
|
@ -13,11 +13,17 @@ tags:
|
|||
- selfhosting
|
||||
- tailscale
|
||||
---
|
||||
|
||||
{{% notice note "Exiting Geminispace..." %}}
|
||||
After several months of experimentation, I decided to stop exploring Gemini. I still think the concept is really neat but ultimately would rather focus my efforts here in the real world. I'll leave this article in place but my capsule has since been deorbited.
|
||||
{{% /notice %}}
|
||||
|
||||
|
||||
I've recently been exploring some indieweb/smolweb technologies, and one of the most interesting things I've come across is [Project Gemini](https://geminiprotocol.net/):
|
||||
|
||||
> Gemini is a new internet technology supporting an electronic library of interconnected text documents. That's not a new idea, but it's not old fashioned either. It's timeless, and deserves tools which treat it as a first class concept, not a vestigial corner case. Gemini isn't about innovation or disruption, it's about providing some respite for those who feel the internet has been disrupted enough already. We're not out to change the world or destroy other technologies. We are out to build a lightweight online space where documents are just documents, in the interests of every reader's privacy, attention and bandwidth.
|
||||
|
||||
I thought it was an interesting idea, so after a bit of experimentation with various hosted options I created a self-hosted [Gemini capsule (Gemini for "web site") to host a lightweight text-focused Gemlog ("weblog")](https://capsule.jbowdre.lol/gemlog/2024-03-05-hello-gemini.gmi). After further tinkering, I arranged to serve the capsule both on the Gemini network as well as the traditional HTTP-based web, and I set up a GitHub Actions workflow to handle posting updates. This post will describe how I did that.
|
||||
I thought it was an interesting idea, so after a bit of experimentation with various hosted options I created a self-hosted Gemini capsule (Gemini for "web site") to host a lightweight text-focused Gemlog ("weblog"). After further tinkering, I arranged to serve the capsule both on the Gemini network as well as the traditional HTTP-based web, and I set up a GitHub Actions workflow to handle posting updates. This post will describe how I did that.
|
||||
|
||||
### Gemini Server: Agate
|
||||
There are a number of different [Gemini server applications](https://github.com/kr1sp1n/awesome-gemini?tab=readme-ov-file#servers) to choose from. I decided to use [Agate](https://github.com/mbrubeck/agate), not just because it was at the top of the Awesome Gemini list but also because seems to be widely recommended, regularly updated, and easy to use. Plus it will automatically generates certs for me, which is nice since Gemini *requires* valid certificates for all connections.
|
||||
|
@ -602,5 +608,5 @@ And the capsule is live at both `https://capsule.jbowdre.lol` and `gemini://caps
|
|||
![Gemini capsule served over gemini://](gemini-capsule.png)
|
||||
|
||||
Come check it out!
|
||||
- [My Capsule on Gemini](gemini://capsule.jbowdre.lol)
|
||||
- [My Capsule on the web](https://capsule.jbowdre.lol)
|
||||
- My Capsule on Gemini
|
||||
- My Capsule on the web
|
|
@ -160,7 +160,8 @@ No users have added their keys to Gitea just yet so if you look at `/home/git/.s
|
|||
|
||||
So I'll go ahead and create that extra command:
|
||||
```shell
|
||||
cat <<"EOF" | sudo tee /usr/local/bin/gitea # [tl! .cmd]
|
||||
# [tl! .cmd:1,1]
|
||||
cat <<EOF | sudo tee /usr/local/bin/gitea
|
||||
#!/bin/sh
|
||||
ssh -p 2222 -o StrictHostKeyChecking=no git@127.0.0.1 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@"
|
||||
EOF
|
||||
|
|
|
@ -52,7 +52,7 @@ I edited the apache config file to bind that new certificate on port 443, and to
|
|||
```
|
||||
After restarting apache, I verified that hitting `http://ipam.lab.bowdre.net` redirected me to `https://ipam.lab.bowdre.net`, and that the connection was secured with the shiny new certificate.
|
||||
|
||||
Remember how I've got a "Home" network as well as [several internal networks](/vmware-home-lab-on-intel-nuc-9#networking) which only exist inside the lab environment? I dropped the phpIPAM instance on the Home network to make it easy to connect to, but it doesn't know how to talk to the internal networks where vRA will actually be deploying the VMs. So I added a static route to let it know that traffic to `172.16.0.0/16` would have to go through the Vyos router at `192.168.1.100`.
|
||||
Remember how I've got a "Home" network as well as [several internal networks](/vmware-home-lab-on-intel-nuc-9/#networking) which only exist inside the lab environment? I dropped the phpIPAM instance on the Home network to make it easy to connect to, but it doesn't know how to talk to the internal networks where vRA will actually be deploying the VMs. So I added a static route to let it know that traffic to `172.16.0.0/16` would have to go through the Vyos router at `192.168.1.100`.
|
||||
|
||||
This is Ubuntu, so I edited `/etc/netplan/99-netcfg-vmware.yaml` to add the `routes` section at the bottom:
|
||||
```yaml
|
||||
|
|
After Width: | Height: | Size: 33 KiB |
423
content/posts/publish-silverbullet-notes-quartz/index.md
Normal file
|
@ -0,0 +1,423 @@
|
|||
---
|
||||
title: "Publishing (Selected) SilverBullet Notes with Quartz and GitHub Actions"
|
||||
date: "2024-09-29T20:27:03Z"
|
||||
# lastmod: 2024-09-28
|
||||
description: "A long note about how I publish short notes from SilverBullet using Quartz, Tailscale, Caddy, and GitHub Actions."
|
||||
featured: false
|
||||
toc: true
|
||||
reply: true
|
||||
categories: Self-Hosting
|
||||
tags:
|
||||
- api
|
||||
- automation
|
||||
- caddy
|
||||
- cicd
|
||||
- selfhosting
|
||||
- tailscale
|
||||
---
|
||||
It's been about two months since I [switched](https://srsbsns.lol/is-silverbullet-the-note-keeping-silver-bullet/) my note-keeping efforts from [Obsidian](https://obsidian.md) to [SilverBullet](https://silverbullet.md/), and I've been really enjoying it. SilverBullet is [easy to deploy with Docker](/silverbullet-self-hosted-knowledge-management/), and it's packed with [useful features](https://silverbullet.md/#Features) without becoming slow or otherwise cumbersome. Being able to access and write my notes from any device with a web browser has been super convenient.
|
||||
|
||||
But one use case I hadn't yet migrated from Obsidian to SilverBullet was managing the notes I share publicly at [notes.runtimeterror.dev](https://notes.runtimeterror.dev) using [Quartz](https://quartz.jzhao.xyz/), a fancy static site generator optimized for building "digital gardens" from Obsidian vaults. I had been using Quartz with a [public repo](https://github.com/jbowdre/notes/tree/867dde54f8a72d2d04bac140a4c54e64dd0f569b) containing the Quartz code with a dedicated (public) Obsidian vault folder [embedded within](https://github.com/jbowdre/notes/tree/867dde54f8a72d2d04bac140a4c54e64dd0f569b/content).
|
||||
|
||||
I played a bit with SilverBullet's [publishing plugin](https://silverbullet.md/Plugs/Pub), which would let me selectively publish notes in certain folders or bearing certain tags, but the HTML it produces is a bit sparse. I didn't want to give up the Quartz niceties like the auto-generated navigation menu and built-in search.
|
||||
|
||||
After a little experimentation I settled on an approach that I think works really well for my needs:
|
||||
- SilverBullet syncs to a private repo via the [Git plug](https://silverbullet.md/Plugs/Git).
|
||||
- Pushes to that private repo trigger a workflow run in my (public) Quartz repo.
|
||||
- A workflow in the Quartz repo clones the private SilverBullet repo to `content/`.
|
||||
- Quartz processes the Markdown files in the `content/` directory and renders HTML for the files with `publish: true` in the front matter as HTML files in `public/`.
|
||||
- The contents of `public/` are transferred to my server via Tailscale, and then served by Caddy.
|
||||
|
||||
This post will describe the entire setup in detail (though not necessarily in that order).
|
||||
|
||||
### Plugging in the Git plug
|
||||
SilverBullet can be extended through the use of [plugs](https://silverbullet.md/Plugs), and installing the [Git plug](https://silverbullet.md/Plugs/Git) should make it easy to sync my SilverBullet content to a private GitHub repo.
|
||||
|
||||
But I should probably initialize my space (the SilverBullet equivalent of a vault/notebook/graph) as a git repo first.
|
||||
|
||||
Recall from my [setup notes](/silverbullet-self-hosted-knowledge-management/#silverbullet-setup) that I'm mounting a folder named `./space` into my SilverBullet container at `/space`. I'll need to turn that into a git repo so I SSH to my Docker host, move into the folder containing my SilverBullet space, and initialize the repo:
|
||||
|
||||
```shell
|
||||
cd /opt/silverbullet/space # [tl! .cmd:1]
|
||||
git init .
|
||||
```
|
||||
|
||||
I'll connect this local git repo to a private GitHub repo, but I'll need to use a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) for the git interactions since the Git plug running inside the SilverBullet container won't have access to my SSH private key. So I [create a new PAT](https://github.com/settings/personal-access-tokens/new), scope it only to my new private repo (`jbowdre/spaaace`), and grant it the `contents: write` permission there. I can then use the PAT when I set up the remote and push my first commit:
|
||||
|
||||
```shell
|
||||
git remote add origin https://github_pat_[...]@github.com/jbowdre/spaaace.git # [tl! .cmd:3]
|
||||
git add .
|
||||
git commit -m "initial commit"
|
||||
git push --set-upstream origin main
|
||||
```
|
||||
|
||||
This stores the authentication token directly inside the local git configuration (in `/opt/silverbullet/space/.git/config`) so that git operations performed within the container will be automatically authenticated.
|
||||
|
||||
Now that my repo is ready, I can go ahead and install and configure the Git plug. I do that by logging into my SilverBullet instance on the web (`https://silverbullet.tailnet-name.ts.net`), pressing `[Ctrl]` + `/` to bring up the command palette, typing/selecting `Plugs: Add`, and pasting in the URI for the Git plug: `github:silverbulletmd/silverbullet-git/git.plug.js`.
|
||||
|
||||
The docs say that I can add the following to my `SETTINGS` file to enable automatic syncing:
|
||||
|
||||
```yaml
|
||||
git:
|
||||
autoCommitMinutes: 5
|
||||
autoSync: true
|
||||
```
|
||||
|
||||
But that doesn't actually seem to work for some reason (at least for me). That's okay, though, because I can easily add a keyboard shortcut (`[Ctrl]` + `[Alt]` + `.`) to quickly sync on-demand:
|
||||
|
||||
```yaml
|
||||
shortcuts:
|
||||
- command: "{[Git: Sync]}"
|
||||
key: "Ctrl-Alt-."
|
||||
```
|
||||
|
||||
{{% notice note "Brace for it..." %}}
|
||||
Note that the command target for the shortcut is wrapped with a square bracket wrapped with a curly brace (`{[ ]}`). It won't work if you do a Go-template-style double-curly-braces (`{{ }}`).
|
||||
|
||||
*Ask me how I know (and how long it took me to find my mistake!).*
|
||||
{{% /notice %}}
|
||||
|
||||
I'll use `[Ctrl]` + `/` to get the command pallette again and run `System: Reload` to activate my change, and then simply pressing `[Ctrl]` + `[Alt]` + `.` will trigger a `git pull` + `git commit` + `git push` (as needed) sequence.
|
||||
|
||||
That takes care of getting my SilverBullet content into GitHub. Now let's see how it gets published.
|
||||
|
||||
### Setting up Quartz
|
||||
"Installing" Quartz is pretty straight forward thanks to the instructions on the [Quartz website](https://quartz.jzhao.xyz/). I just ran these commands on my laptop:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/jackyzha0/quartz.git # [tl! .cmd:3]
|
||||
cd quartz
|
||||
npm i
|
||||
npx quartz create
|
||||
```
|
||||
|
||||
By default, Quartz expects my Obsidian content to be in the `content/` directory (and there's a placeholder file there for now). I'll replace that with my `spaaace` repo for testing but also add that path to the `.gitignore` file to ensure I don't accidentally commit my private notes:
|
||||
|
||||
```shell
|
||||
rm -rf content/ # [tl! .cmd:2]
|
||||
git clone git@github.com:jbowdre/spaaace.git content
|
||||
echo "content" >> .gitignore
|
||||
```
|
||||
|
||||
From there I can move on to configuring Quartz. The [documentation](https://quartz.jzhao.xyz/configuration) has helpful information on some of the configuration options so I'm just going to highlight the changes that are particularly important to this specific setup.
|
||||
|
||||
In the `plugins:` section of `quartz.config.ts`, I enable the [`ExplicitPublish` filter plugin](https://quartz.jzhao.xyz/plugins/ExplicitPublish) to tell Quartz to only render pages with `publish: true` in the frontmatter:
|
||||
|
||||
```ts
|
||||
plugins: {
|
||||
filters: [
|
||||
Plugin.RemoveDrafts(),
|
||||
Plugin.ExplicitPublish(), // [tl! ++]
|
||||
],
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
That will allow me very granular control over which posts are published (and which remain private), but the [Private Pages](https://quartz.jzhao.xyz/features/private-pages) Quartz documentation page warns that the `ExplicitPublish` plugin only filters out Markdown files. All other files (images, PDFs, plain TXTs) will still be processed and served publicly. I don't intend to include screenshots or other media with these short code-heavy notes so I scroll back up in the `quartz.config.ts` file and add a little regex to the `ignorePatterns` section:
|
||||
|
||||
```ts
|
||||
configuration: {
|
||||
ignorePatterns: [
|
||||
"private",
|
||||
"templates",
|
||||
"**/!(*.md)" // [tl! ++]
|
||||
],
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
That will avoid processing any non-Markdown files.
|
||||
|
||||
The rest of the Quartz setup follows the documentation, including the steps to [connect my (public) GitHub repository](https://quartz.jzhao.xyz/setting-up-your-GitHub-repository).
|
||||
|
||||
Before publishing, I can check my work by generating and serving the Quartz content locally:
|
||||
|
||||
```shell
|
||||
npx quartz build --serve # [tl! .cmd ** .nocopy:1,13]
|
||||
|
||||
Quartz v4.4.0
|
||||
|
||||
Cleaned output directory `public` in 29ms
|
||||
Found 198 input files from `content` in 24ms
|
||||
Parsed 198 Markdown files in 4s
|
||||
Filtered out 198 files in 398μs
|
||||
⠋ Emitting output files
|
||||
Warning: you seem to be missing an `index.md` home page file at the root of your `content` folder. This may cause errors when deploying. # [tl! ** ~~]
|
||||
Emitted 8 files to `public` in 44ms
|
||||
Done processing 0 files in 103ms
|
||||
Started a Quartz server listening at http://localhost:8080
|
||||
hint: exit with ctrl+c
|
||||
|
||||
```
|
||||
|
||||
Oops! Remember how the `ExplicitPublish` plugin will only process notes with `publish: true` set in the frontmatter? Since I just imported the notes from my (public) Obsidian vault into SilverBullet none of them have that attribute set yet. (But hey, this is a great way to test that my filters work!)
|
||||
|
||||
Let me run through the notes I want to be public and update them accordingly...
|
||||
|
||||
|
||||
```markdown
|
||||
---
|
||||
title: Trigger remote workflow with GitHub Actions
|
||||
tags: [github]
|
||||
publish: true # [tl! ++]
|
||||
---
|
||||
...
|
||||
```
|
||||
|
||||
And then I'll try again:
|
||||
|
||||
```shell
|
||||
npx quartz build --serve # [tl! .cmd ** .nocopy:1,11]
|
||||
|
||||
Quartz v4.4.0
|
||||
|
||||
Cleaned output directory `public` in 6ms
|
||||
Found 198 input files from `content` in 32ms
|
||||
Parsed 198 Markdown files in 4s # [tl! **:2]
|
||||
Filtered out 123 files in 404μs
|
||||
Emitted 130 files to `public` in 497ms
|
||||
Done processing 198 files in 4s
|
||||
Started a Quartz server listening at http://localhost:8080 # [tl! **]
|
||||
hint: exit with ctrl+c
|
||||
```
|
||||
|
||||
That's more like it!
|
||||
|
||||
![A webpage served at http://localhost:8080/ titled "A note about my notes" includes a short post describing the use of Quartz and SilverBullet for organizing scripts and references. Categories like "ChromeOS" and "Linux" are listed in the left-hand navigation panel.](localhost.jpg)
|
||||
|
||||
But serving my notes from my laptop is only *so* useful. Let's keep going and see what it takes to publish them on the World Wide Web!
|
||||
|
||||
### Publish publicly
|
||||
I've previously written about my [GitHub Actions workflow for publishing my Gemini capsule](/gemini-capsule-gempost-github-actions/#publish-github-actions), and I'm going to reuse a lot of the same ideas here. I'll create a workflow that performs the steps needed to render the HTML to the `public/` directory, establishes a [Tailscale](https://tailscale.com/) tunnel to my server, and transfers the rendered content there. Those static files will then be served with [Caddy](https://caddyserver.com/), taking advantage of its automatic HTTPS abilities.
|
||||
|
||||
#### Server prep
|
||||
The setup on the server is pretty simple. I just create a directory to hold the files, and make sure it's owned by the `deploy` user:
|
||||
|
||||
```shell
|
||||
sudo mkdir /opt/notes # [tl! .cmd:1]
|
||||
sudo chown -R deploy:deploy /opt/notes
|
||||
```
|
||||
|
||||
I'll also go ahead and update my Caddyfile based on the [Quartz documentation](https://quartz.jzhao.xyz/hosting#using-caddy), but I won't reload Caddy just yet (I'll wait until I have some content to serve):
|
||||
|
||||
```Caddyfile
|
||||
notes.runtimeterror.dev {
|
||||
bind 192.0.2.1 # replace with server's public interface address
|
||||
root * /opt/notes/public
|
||||
try_files {path} {path}.html {path}/ =404
|
||||
file_server
|
||||
encode gzip
|
||||
|
||||
handle_errors {
|
||||
rewrite * /{err.status_code}.html
|
||||
file_server
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Tailscale prep
|
||||
The full details of how I configured Tailscale to support this deploy-from-github-actions use case are [available in another post](/gemini-capsule-gempost-github-actions/#tailscale-configuration) so I won't repeat the explanation here. But these are the items I added to my Tailscale ACL to create a set of tags (one for the GitHub runner, one for the server it will deploy to), allow SSH traffic from the runner to the server, and configure [Tailscale SSH](/tailscale-ssh-serve-funnel/#tailscale-ssh) to let the runner log in to the server as the `deploy` user:
|
||||
|
||||
```json
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:gh-bld": ["group:admins"], // github builder
|
||||
"tag:gh-srv": ["group:admins"], // server it can deploy to
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
// github runner can talk to the deployment target
|
||||
"action": "accept",
|
||||
"users": ["tag:gh-bld"],
|
||||
"ports": [
|
||||
"tag:gh-srv:22"
|
||||
],
|
||||
}
|
||||
],
|
||||
"ssh": [
|
||||
{
|
||||
// runner can SSH to the server as the 'deploy' user
|
||||
"action": "accept",
|
||||
"src": ["tag:gh-bld"],
|
||||
"dst": ["tag:gh-srv"],
|
||||
"users": ["deploy"],
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
#### Workin' on a workflow
|
||||
With the prep out of the way, I'm ready to start on my deployment workflow.
|
||||
|
||||
My `.github/workflows/deploy.yaml` starts simply with just setting some defaults, and it configures the workflow to run on pushes to the default branch (`v4`), [`repository_dispatch` events](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#repository_dispatch), and `workflow_dispatch` events (manual executions).
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers":true}
|
||||
name: Deploy Notes
|
||||
|
||||
# run on changes to default (v4) branch, repository_dispatch events, and manual executions
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v4
|
||||
repository_dispatch:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency: # prevent concurrent deploys doing strange things
|
||||
group: deploy
|
||||
cancel-in-progress: false
|
||||
|
||||
# Default to bash
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
```
|
||||
|
||||
The `deploy` job then starts with [checking out](https://github.com/actions/checkout/tree/v4/) the repo where the job is running [*and* the private repo](https://github.com/actions/checkout/tree/v4/?tab=readme-ov-file#checkout-multiple-repos-private) holding my SilverBullet space, which gets cloned to the `content/` directory.
|
||||
|
||||
To be able to fetch the private `jbowdre/spaaace` repo, I'll needed to generate another PAT scoped to that repo. This one only needs read access (no write) to the contents of the repo. The PAT and the repo path get stored as repository secrets.
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true, "lineNumbersStart": 20}
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build and deploy Quartz site
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Quartz
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout notes
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ secrets.SPAAACE_REPO }}
|
||||
token: ${{ secrets.SPAAACE_REPO_PAT }}
|
||||
path: content
|
||||
```
|
||||
|
||||
I can then move on to installing Node and building the Quartz site:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers":true, "lineNumbersStart": 33}
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Build Quartz
|
||||
run: |
|
||||
npm ci
|
||||
npx quartz build
|
||||
```
|
||||
|
||||
I use the [Tailscale GitHub Action](https://tailscale.com/kb/1276/tailscale-github-action) to connect the ephemeral GitHub runner to my tailnet, and apply that ACL tag that grants it SSH access to my web server (and nothing else).
|
||||
|
||||
I've also stored that web server's SSH public key as a repository secret, and I make sure that gets added to the runner's `~/.ssh/known_hosts` file so that it can connect without being prompted to verify the host keys.
|
||||
|
||||
Finally, I use `rsync` to copy the `public/` directory (with all the rendered HTML content) to `/opt/notes/` on the server.
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers":true, "lineNumbersStart":41}
|
||||
- name: Connect to Tailscale
|
||||
uses: tailscale/github-action@v2
|
||||
with:
|
||||
oauth-client-id: ${{ secrets.TS_API_CLIENT_ID }}
|
||||
oauth-secret: ${{ secrets.TS_API_CLIENT_SECRET }}
|
||||
tags: ${{ secrets.TS_TAG }}
|
||||
- name: Configure SSH known hosts
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "${{ secrets.SSH_KNOWN_HOSTS }}" > ~/.ssh/known_hosts
|
||||
chmod 644 ~/.ssh/known_hosts
|
||||
- name: Deploy Quartz
|
||||
run: |
|
||||
rsync -avz --delete -e ssh public/ deploy@${{ secrets.QUARTZ_HOST }}:${{ secrets.QUARTZ_CONTENT_PATH }}
|
||||
```
|
||||
|
||||
After making sure that I've added all the required repository secrets, I can commit and push my code and it *should* trigger the deployment...
|
||||
|
||||
```shell
|
||||
git add . # [tl! .cmd:2]
|
||||
git commit -m "deployment test"
|
||||
git push
|
||||
```
|
||||
|
||||
![A GitHub "Actions" tab for the repository "notes" shows the "Deploy Notes" workflow. A successful "deployment test" is listed.](deploy_success.jpg)
|
||||
|
||||
And I can log back onto my server and confirm that the content is there:
|
||||
|
||||
```shell
|
||||
ls -l /opt/notes/public/ # [tl! .cmd .nocopy:1,17]
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 ChromeOS
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 CICD
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 Containers
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 Development
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 Linux
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 Saltstack
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 static
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 tags
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 VMware
|
||||
drwxr-xr-x - deploy 29 Sep 19:04 Windows
|
||||
.rw-r--r-- 3.9k deploy 29 Sep 19:04 404.html
|
||||
.rw-r--r-- 30k deploy 29 Sep 19:04 index.css
|
||||
.rw-r--r-- 25k deploy 29 Sep 19:04 index.html
|
||||
.rw-r--r-- 4.8k deploy 29 Sep 19:04 index.xml
|
||||
.rw-r--r-- 62k deploy 29 Sep 19:04 postscript.js
|
||||
.rw-r--r-- 903 deploy 29 Sep 19:04 prescript.js
|
||||
.rw-r--r-- 11k deploy 29 Sep 19:04 sitemap.xml
|
||||
```
|
||||
|
||||
Now that I've got some content I can reload Caddy:
|
||||
|
||||
```shell
|
||||
sudo caddy reload -c /etc/caddy/Caddyfile # [tl! .cmd .nocopy:1,2]
|
||||
2024/09/29 19:11:17.705 INFO using config from file {"file": "/etc/caddy/Caddyfile"}
|
||||
2024/09/29 19:11:17.715 INFO adapted config to JSON {"adapter": "caddyfile"}
|
||||
```
|
||||
|
||||
And check to see if the site is up:
|
||||
|
||||
![A webpage served at https://notes.runtimeterror.dev titled "A note about my notes" includes a short post describing the use of Quartz and SilverBullet for organizing scripts and references. Categories like "ChromeOS" and "Linux" are listed in the left-hand navigation panel.](online.jpg)
|
||||
|
||||
Nice, my notes are online!
|
||||
|
||||
### Trigger workflow
|
||||
The last piece of this puzzle is to trigger the deployment workflow whenever my SilverBullet notes get synced to that private repo. Fortunately I [have a note](https://notes.runtimeterror.dev/CICD/Trigger-remote-workflow-with-GitHub-Actions) that describes how to do that.
|
||||
|
||||
I'll set up yet *another* GitHub PAT, this one scoped to the `jbowdre/notes` public repo with permissions to write to the repository contents. Then I just need a workflow in the private `jbowdre/spaaace` repo to make a `POST` to `https://api.github.com/repos/jbowdre/notes/dispatches` whenever a Markdown file is created/updated.
|
||||
|
||||
Here's `.github/workflows/trigger.yaml`:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers":true}
|
||||
name: Trigger Quartz Build
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "**.md"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Trigger
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remote trigger
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: token ${{ secrets.NOTES_REPO_PAT }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/${{ secrets.NOTES_REPO }}/dispatches \
|
||||
-d '{"event_type": "remote-trigger"}'
|
||||
```
|
||||
|
||||
Once I commit and push this, any future changes to a markdown file will tell the GitHub API to kick off the remote workflow in the public repo.
|
||||
|
||||
![A successful GitHub workflow run triggered by repository dispatch.](trigger_success.jpg)
|
||||
|
||||
### Conclusion
|
||||
And that's it! I can now write and publish short notes from *anywhere* thanks to the SilverBullet web app, Quartz, and some GitHub Actions shenanigans. This was a little more work to set up on the front end but my new publishing workflow couldn't be simpler: just write a note and hit `[Ctrl]` + `[Alt]` + `.` to sync the change to the private GitHub repo and kick off the deployment.
|
||||
|
||||
*Now I don't have an excuse to keep sitting on this backlog of quick notes I've been meaning to share...*
|
BIN
content/posts/publish-silverbullet-notes-quartz/localhost.jpg
Normal file
After Width: | Height: | Size: 54 KiB |
BIN
content/posts/publish-silverbullet-notes-quartz/online.jpg
Normal file
After Width: | Height: | Size: 54 KiB |
After Width: | Height: | Size: 9.1 KiB |
|
@ -11,7 +11,7 @@ tags:
|
|||
- vmware
|
||||
title: Run scripts in guest OS with vRA ABX Actions
|
||||
---
|
||||
Thus far in my [vRealize Automation project](/categories/vmware), I've primarily been handing the payload over to vRealize Orchestrator to do the heavy lifting on the back end. This approach works really well for complex multi-part workflows (like when [generating unique hostnames](/vra8-custom-provisioning-part-two#the-vro-workflow)), but it may be overkill for more linear tasks (such as just running some simple commands inside of a deployed guest OS). In this post, I'll explore how I use [vRA Action Based eXtensibility (ABX)](https://blogs.vmware.com/management/2020/09/vra-abx-flow.html) to do just that.
|
||||
Thus far in my [vRealize Automation project](/categories/vmware), I've primarily been handing the payload over to vRealize Orchestrator to do the heavy lifting on the back end. This approach works really well for complex multi-part workflows (like when [generating unique hostnames](/vra8-custom-provisioning-part-two/#the-vro-workflow)), but it may be overkill for more linear tasks (such as just running some simple commands inside of a deployed guest OS). In this post, I'll explore how I use [vRA Action Based eXtensibility (ABX)](https://blogs.vmware.com/management/2020/09/vra-abx-flow.html) to do just that.
|
||||
|
||||
### The Goal
|
||||
My ABX action is going to use PowerCLI to perform a few steps inside a deployed guest OS (Windows-only for this demonstration):
|
||||
|
@ -69,9 +69,9 @@ resources:
|
|||
In the Resources section of the cloud template, I'm going to add a few properties that will tell the ABX script how to connect to the appropriate vCenter and then the VM.
|
||||
- `vCenter`: The vCenter server where the VM will be deployed, and thus the server which PowerCLI will authenticate against. In this case, I've only got one vCenter, but a larger environment might have multiples. Defining this in the cloud template makes it easy to select automagically if needed. (For instance, if I had a `bow-vcsa` and a `dre-vcsa` for my different sites, I could do something like `vCenter: '${input.site}-vcsa.lab.bowdre.net'` here.)
|
||||
- `vCenterUser`: The username with rights to the VM in vCenter. Again, this doesn't have to be a static assignment.
|
||||
- `templateUser`: This is the account that will be used by `Invoke-VmScript` to log in to the guest OS. My template will use the default `Administrator` account for non-domain systems, but the `lab\vra` service account on domain-joined systems (using the `adJoin` input I [set up earlier](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template)).
|
||||
- `templateUser`: This is the account that will be used by `Invoke-VmScript` to log in to the guest OS. My template will use the default `Administrator` account for non-domain systems, but the `lab\vra` service account on domain-joined systems (using the `adJoin` input I [set up earlier](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/#cloud-template)).
|
||||
|
||||
I'll also include the `adminsList` input from earlier so that can get passed to ABX as well. And I'm going to add in an `adJoin` property (mapped to the [existing `input.adJoin`](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template)) so that I'll have that to work with later.
|
||||
I'll also include the `adminsList` input from earlier so that can get passed to ABX as well. And I'm going to add in an `adJoin` property (mapped to the [existing `input.adJoin`](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/#cloud-template)) so that I'll have that to work with later.
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
|
@ -479,7 +479,7 @@ Before I can test the new action, I'll need to first add an extensibility subscr
|
|||
I'll be using this to call my new `configureGuest` action - so I'll name the subscription `Configure Guest`. I tie it to the `Compute Post Provision` event, and bind my action:
|
||||
![Creating the new subscription](20210903_new_subscription_1.png)
|
||||
|
||||
I do have another subsciption on that event already, [`VM Post-Provisioning`](/adding-vm-notes-and-custom-attributes-with-vra8#extensibility-subscription) which is used to modify the VM object with notes and custom attributes. I'd like to make sure that my work inside the guest happens after that other subscription is completed, so I'll enable blocking and give it a priority of `2`:
|
||||
I do have another subsciption on that event already, [`VM Post-Provisioning`](/adding-vm-notes-and-custom-attributes-with-vra8/#extensibility-subscription) which is used to modify the VM object with notes and custom attributes. I'd like to make sure that my work inside the guest happens after that other subscription is completed, so I'll enable blocking and give it a priority of `2`:
|
||||
![Adding blocking to Configure Guest](20210903_new_subscription_2.png)
|
||||
|
||||
After hitting the **Save** button, I go back to that other `VM Post-Provisioning` subscription, set it to enable blocking, and give it a priority of `1`:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: "Tailscale Serve in a Docker Compose Sidecar"
|
||||
date: 2023-12-30
|
||||
lastmod: 2024-02-07
|
||||
lastmod: "2024-10-21T01:37:12Z"
|
||||
description: "Using Docker Compose to deploy containerized applications and make them available via Tailscale Serve and Tailscale Funnel"
|
||||
featured: false
|
||||
toc: true
|
||||
|
@ -43,7 +43,7 @@ miniflux.runtimeterror.dev {
|
|||
|
||||
*and so on...* You get the idea. This approach works well for services I want/need to be public, but it does require me to manage those DNS records and keep track of which app is on which port. That can be kind of tedious.
|
||||
|
||||
And I don't really need all of these services to be public. Not because they're particularly sensitive, but I just don't really have a reason to share my personal [Miniflux](https://github.com/miniflux/v2) or [CyberChef](https://github.com/gchq/CyberChef) instances with the world at large. Those would be great candidates to proxy with [Tailscale Serve](/tailscale-ssh-serve-funnel#tailscale-serve) so they'd only be available on my tailnet. Of course, with that setup I'd then have to differentiate the services based on external port numbers since they'd all be served with the same hostname. That's not ideal either.
|
||||
And I don't really need all of these services to be public. Not because they're particularly sensitive, but I just don't really have a reason to share my personal [Miniflux](https://github.com/miniflux/v2) or [CyberChef](https://github.com/gchq/CyberChef) instances with the world at large. Those would be great candidates to proxy with [Tailscale Serve](/tailscale-ssh-serve-funnel/#tailscale-serve) so they'd only be available on my tailnet. Of course, with that setup I'd then have to differentiate the services based on external port numbers since they'd all be served with the same hostname. That's not ideal either.
|
||||
|
||||
```shell
|
||||
sudo tailscale serve --bg --https 8443 8180 # [tl! .cmd]
|
||||
|
@ -86,6 +86,31 @@ Tailscale [just published a blog post](https://tailscale.com/blog/docker-tailsca
|
|||
|
||||
Replace the ports and protocols and hostnames and such, and you'll be good to go.
|
||||
|
||||
**Update 2024-10-20**: I recently learned that you can use the `${TS_CERT_DOMAIN}` placeholder to avoid having to hardcode a hostname into the `serve-config.json`. That makes the config even easier to reuse:
|
||||
|
||||
```json
|
||||
// torchlight! {"lineNumbers": true}
|
||||
{ // [tl! collapse:start]
|
||||
"TCP": {
|
||||
"443": {
|
||||
"HTTPS": true
|
||||
}
|
||||
},// [tl! collapse:end]
|
||||
"Web": {
|
||||
"${TS_CERT_DOMAIN}:443": { // [tl! collapse:start]
|
||||
"Handlers": {
|
||||
"/": {
|
||||
"Proxy": "http://127.0.0.1:8000"
|
||||
}
|
||||
}
|
||||
}
|
||||
}//, uncomment to enable funnel [tl! collapse:end]
|
||||
// "AllowFunnel": {
|
||||
// "${TS_CERT_DOMAIN}:443": true
|
||||
// }
|
||||
}
|
||||
```
|
||||
|
||||
A compose config using this setup might look something like this:
|
||||
|
||||
```yaml
|
||||
|
|
|
@ -8,6 +8,7 @@ toc: true
|
|||
categories: Backstage
|
||||
tags:
|
||||
- bunny
|
||||
- cicd
|
||||
- cloudflare
|
||||
- hugo
|
||||
- meta
|
||||
|
|
|
@ -12,7 +12,7 @@ title: vRA8 Automatic Deployment Naming - Another Take
|
|||
toc: false
|
||||
---
|
||||
|
||||
A [few days ago](/vra8-custom-provisioning-part-four#automatic-deployment-naming), I shared how I combined a Service Broker Custom Form with a vRO action to automatically generate a unique and descriptive deployment name based on user inputs. That approach works *fine* but while testing some other components I realized that calling that action each time a user makes a selection isn't necessarily ideal. After a bit of experimentation, I settled on what I believe to be a better solution.
|
||||
A [few days ago](/vra8-custom-provisioning-part-four/#automatic-deployment-naming), I shared how I combined a Service Broker Custom Form with a vRO action to automatically generate a unique and descriptive deployment name based on user inputs. That approach works *fine* but while testing some other components I realized that calling that action each time a user makes a selection isn't necessarily ideal. After a bit of experimentation, I settled on what I believe to be a better solution.
|
||||
|
||||
Instead of setting the "Deployment Name" field to use an External Source (vRO), I'm going to configure it to use a Computed Value. This is a bit less flexible, but all the magic happens right there in the form without having to make an expensive vRO call.
|
||||
![Computed Value option](Ivv0ia8oX.png)
|
||||
|
|
|
@ -85,7 +85,7 @@ The last step before testing is to click that *Enable* button to activate the cu
|
|||
Cool! So it's dynamically generating the deployment name based on selections made on the form. Now that it works, I can go back to the custom form and set the "Deployment Name" field to be invisible just like the "Project" one.
|
||||
|
||||
### Per-site network selection
|
||||
So far, vRA has been automatically placing VMs on networks based solely on [which networks are tagged as available](/vra8-custom-provisioning-part-one#using-tags-for-resource-placement) for the selected site. I'd like to give my users a bit more control over which network their VMs get attached to, particularly as some networks may be set aside for different functions or have different firewall rules applied.
|
||||
So far, vRA has been automatically placing VMs on networks based solely on [which networks are tagged as available](/vra8-custom-provisioning-part-one/#using-tags-for-resource-placement) for the selected site. I'd like to give my users a bit more control over which network their VMs get attached to, particularly as some networks may be set aside for different functions or have different firewall rules applied.
|
||||
|
||||
As a quick recap, I've got five networks available for vRA, split across my two sites using tags:
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ Looking back, that's kind of a lot. I can see why I've been working on this for
|
|||
In production, I'll want to be able to deploy to different computer clusters spanning multiple vCenters. That's a bit difficult to do on a single physical server, but I still wanted to be able to simulate that sort of dynamic resource selection. So for development and testing in my lab, I'll be using two sites - `BOW` and `DRE`. I ditched the complicated "just because I can" vSAN I'd built previously and instead spun up two single-host nested clusters, one for each of my sites:
|
||||
![vCenter showing the BOW and DRE clusters](KUCwEgEhN.png)
|
||||
|
||||
Those hosts have one virtual NIC each on a standard switch connected to my home network, and a second NIC each connected to the ["isolated" internal lab network](vmware-home-lab-on-intel-nuc-9#networking) with all the VLANs for the guests to run on:
|
||||
Those hosts have one virtual NIC each on a standard switch connected to my home network, and a second NIC each connected to the ["isolated" internal lab network](vmware-home-lab-on-intel-nuc-9/#networking) with all the VLANs for the guests to run on:
|
||||
![dvSwitch showing attached hosts and dvPortGroups](y8vZEnWqR.png)
|
||||
|
||||
### vRA setup
|
||||
|
|
|
@ -17,7 +17,7 @@ Picking up after [Part Two](/vra8-custom-provisioning-part-two), I now have a pr
|
|||
|
||||
### Active Directory
|
||||
#### Adding an AD endpoint
|
||||
Remember how I [used the built-in vSphere plugin](/vra8-custom-provisioning-part-two#interlude-connecting-vro-to-vcenter) to let vRO query my vCenter(s) for VMs with a specific name? And how that required first configuring the vCenter endpoint(s) in vRO? I'm going to take a very similar approach here.
|
||||
Remember how I [used the built-in vSphere plugin](/vra8-custom-provisioning-part-two/#interlude-connecting-vro-to-vcenter) to let vRO query my vCenter(s) for VMs with a specific name? And how that required first configuring the vCenter endpoint(s) in vRO? I'm going to take a very similar approach here.
|
||||
|
||||
So as before, I'll first need to run the preinstalled "Add an Active Directory server" workflow:
|
||||
![Add an Active Directory server workflow](uUDJXtWKz.png)
|
||||
|
|
|
@ -1,53 +1,4 @@
|
|||
+++
|
||||
reply = false
|
||||
toc = false
|
||||
usePageBundles = false
|
||||
showDate = false
|
||||
showShare = false
|
||||
showReadTime = false
|
||||
timeless = true
|
||||
title = "SimpleX Chat"
|
||||
+++
|
||||
> You can [contact me on SimpleX Chat](https://l.runtimeterror.dev/simplex-chat-invite) by clicking that link or scanning the QR code below.
|
||||
|
||||
![QR code](/images/simplex-invite.png)
|
||||
|
||||
[SimpleX Chat](https://simplex.chat/) is a secure messaging solution with a strong emphasis on user privacy. It's (naturally) end-to-end encrypted, doesn't require (or collect) *any* information about you in order to sign up, doesn't use any persistent user identifiers (not even a randomly-generated one), is fully decentralized, and is *not* affiliated with any cryptocurrency project/scam.
|
||||
|
||||
Incoming messages are routed through a pool of servers so that your conversations don't all follow the same path - and no server knows anything about conversations that aren't routed through it. Servers only hold your messages long enough to ensure they get to you, and those messages exist only in the encrypted database on your device once they've been delivered. (Fortunately, SimpleX makes it easy to back up that database and restore it on a new device so you don't lose any messages or contacts.)
|
||||
|
||||
The app is also packed with other features like disappearing messages, encrypted file transfers, encrypted voice messages, encrypted audio and video calls, decentralized private groups, and a cool incognito mode which connects new conversations to a randomly-generated profile instead of your primary one. There's even a [CLI client](https://github.com/simplex-chat/simplex-chat/blob/stable/docs/CLI.md)!
|
||||
|
||||
## Servers
|
||||
You can easily host your own [simplexmq server](https://github.com/simplex-chat/simplexmq) for handling your inbound message queue, and I've done just that; in fact, I've deployed three! And, as one of my closest internet friends, *you're welcome to use them as well.*
|
||||
|
||||
Just add these in the SimpleX app at **Settings > Network & servers > SMP servers > + Add server...**. Enable the option to use them for new connections, and they'll be added to the pool used for incoming messages in new conversations. If you want to use them immediately for existing conversations, go into each conversation's options menu and use the **Switch receiving address** option. You can also *disable* the option to use the default servers for new conversations if you only want messages to be routed through specific servers, but that does increase the likelikhood of concurrent conversations being routed the same way. More servers, more path options, less metadata in any one place.
|
||||
|
||||
---
|
||||
![QR code](/images/smp-vpota-to.png)
|
||||
|
||||
`smp://kYx5LmVD9FMM8hJN4BQqL4WmeUNZn8ipXsX2UkBoiHE=@smp.vpota.to`
|
||||
|
||||
[![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp-vpota-to-5223)
|
||||
|
||||
[netdata](https://l.runtimeterror.dev/smp_status)
|
||||
|
||||
type: redirect
|
||||
target: https://l.runtimeterror.dev/simplex-chat-invite
|
||||
---
|
||||
|
||||
![QR code](/images/smp1-vpota-to.png)
|
||||
|
||||
`smp://TbUrGydawdVKID0Lvix14UkaN-WarFgqXx4kaEG8Trw=@smp1.vpota.to`
|
||||
|
||||
[![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp1-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp1-vpota-to-5223)
|
||||
|
||||
[netdata](https://l.runtimeterror.dev/smp1_status)
|
||||
|
||||
---
|
||||
|
||||
![QR code](/images/smp2-vpota-to.png)
|
||||
|
||||
`smp://tNfQisxTQ9MhKpFDTbx9RnjgWigtxF1a26jroy5-rR4=@smp2.vpota.to`
|
||||
|
||||
[![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp2-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp2-vpota-to-5223)
|
||||
|
||||
[netdata](https://l.runtimeterror.dev/smp2_status)
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# Quick script to serve gemini locally
|
||||
hugo --environment local -D
|
||||
agate --content public --hostname localhost
|
|
@ -1,12 +0,0 @@
|
|||
# [runtimeterror $]
|
||||
=> /about Adventures in self-hosting and other technological frustrations.
|
||||
{{ $pages := .Pages -}}
|
||||
{{ $pages = where site.RegularPages "Type" "in" site.Params.mainSections -}}
|
||||
|
||||
### Posts
|
||||
{{ range $pages }}
|
||||
=> {{ .RelPermalink }} {{ .Date.Format "2006-01-02" }} {{ .Title }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
=> https://runtimeterror.dev This site on the big web
|
|
@ -1,10 +0,0 @@
|
|||
=> / 💻 [runtimeterror $]
|
||||
# {{ .Title }}
|
||||
|
||||
{{- range .Pages }}
|
||||
=> {{ .RelPermalink }} {{ .Date.Format "2006-01-02" }} {{ .Title }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
=> / Home
|
||||
=> https://runtimeterror.dev{{ replace (replace .RelPermalink "/gemini" "" 1) "index.gmi" "" }} This page on the big web
|
|
@ -1,59 +0,0 @@
|
|||
{{- $scratch := newScratch -}}{{- $scratch.Set "ref" 1 -}}
|
||||
=> / 💻 [runtimeterror $]
|
||||
{{ if .Params.Date }}
|
||||
{{- $postDate := .Date.Format "2006-01-02" }}
|
||||
{{- $updateDate := .Lastmod.Format "2006-01-02" }}
|
||||
{{- $postDate }}{{ if ne $postDate $updateDate }} ~ {{ $updateDate }}{{ end }}
|
||||
{{- end }}
|
||||
# {{ .Title }}
|
||||
{{/* The bulk of this regex magic was inspired by https://brainbaking.com/post/2021/04/using-hugo-to-launch-a-gemini-capsule/ */}}
|
||||
{{ range $content := split .RawContent "\n\n" }}
|
||||
{{- $blockRef := $scratch.Get "ref" -}}
|
||||
{{- $content := $content | replaceRE `#{4,} ` "### " -}}{{/* reduce headings to a max of 3 levels */}}
|
||||
{{- $content := $content | replaceRE `(?m:^- (.+?)$)` "\n* $1" -}}{{/* convert unordered lists */}}
|
||||
{{- $content := $content | replaceRE `(?m:^(?:\d+). (.+?)$)` "* $1" -}}{{/* convert ordered lists */}}
|
||||
{{- $content := $content | replaceRE `\n?\[\^(.+?)\]:\s*.*` "" -}}{{/* remove footnote definitions */}}
|
||||
{{- $content := $content | replaceRE `\[\^(.+?)\]` "" -}}{{/* remove footnote anchors */}}
|
||||
{{- $content := $content | replaceRE `((?m:^(?:\|.*\|)+\n?)+)` "```\n$1\n```\n" -}}{{/* render markdown tables as plaintext ascii */}}
|
||||
{{- $content := $content | replaceRE "(?m:^`([^`]*)`$)" "```\n$1\n```\n" -}}{{/* convert single-line inline code to blocks */}}
|
||||
{{- $content := $content | replaceRE `\{\{%\snotice.*%\}\}` "<-- note -->" -}}{{/* convert hugo notices */}}
|
||||
{{- $content := $content | replaceRE `\{\{%\s/notice.*%\}\}` "<-- /note -->" -}}
|
||||
{{- $content := $content | replaceRE `(?:(?:<!--)|(?:#)|(?:\/\/))\s*torchlight!.*\n` "" -}}{{/* remove torchlight markup */}}
|
||||
{{- $content := $content | replaceRE `(?:(?:<!--)|(?:#)|(?:\/\/))*\s*\[tl!.*\].*` "" -}}
|
||||
{{- $content := $content | replaceRE `(?m:^\[!\[(.*)\]\(.*\)\]\((.*)\)$)` "=> $2 $1" -}}{{/* remove images from uptime links */}}
|
||||
{{- $content := $content | replaceRE `(?m:^\s*(?:(?:\*|\-)\s+)?\[(.*)\]\((.*)\)$)` "=> $2 $1" -}}{{/* convert links already on own line */}}
|
||||
{{- $content := $content | replaceRE `(?m:^!\[(.*)\]\((.+?)\)$)` "=> $2 Image: $1" -}}{{/* convert embedded images */}}
|
||||
{{- $links := findRE `\[.+?\]\(.+?\)` $content -}}
|
||||
{{- $scratch.Set "content" $content -}}
|
||||
{{- range $links -}}
|
||||
{{- $ref := $scratch.Get "ref" -}}
|
||||
{{- $contentInLoop := $scratch.Get "content" -}}
|
||||
{{- $url := (printf "%s #%d" . $ref) -}}
|
||||
{{- $contentInLoop := replace $contentInLoop . $url -}}
|
||||
{{- $scratch.Set "content" $contentInLoop -}}
|
||||
{{- $scratch.Set "ref" (add $ref 1) -}}
|
||||
{{- end -}}
|
||||
{{- $content := $scratch.Get "content" | replaceRE `\[(.+?)\]\((.+?)\) #(\d+)` "$1 [$3]" }}
|
||||
{{- $content | safeHTML }}
|
||||
{{- range $links -}}
|
||||
{{- $ref := $scratch.Get "ref" -}}
|
||||
{{- $url := (printf "%s #%d" . $blockRef) }}
|
||||
=> {{ $url | replaceRE `\[(.+?)\]\((.+?)\) #(\d+)` "$2 [$3] $1" }}
|
||||
{{- $blockRef = add $blockRef 1 -}}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
{{ $subject := printf "Re: %s" .Title -}}
|
||||
{{ $subject := urlquery $subject | replaceRE `\+` "%20" }}
|
||||
|
||||
=> mailto:wheel.east.brief@clkdmail.com?subject={{ $subject }} 📧 Reply by email
|
||||
{{ $related := first 3 (where (where .Site.RegularPages.ByDate.Reverse ".Params.tags" "intersect" .Params.tags) "Permalink" "!=" .Permalink) }}
|
||||
{{ if $related }}
|
||||
## Related articles
|
||||
{{ range $related }}
|
||||
=> {{ replace .RelPermalink "/gemini" "" 1}} {{ .Title }}{{ end }}{{ end }}
|
||||
---
|
||||
|
||||
=> / Home
|
||||
=> https://runtimeterror.dev{{ replace (replace .RelPermalink "/gemini" "" 1) "index.gmi" "" }} This page on the big web
|
Before Width: | Height: | Size: 54 KiB |
Before Width: | Height: | Size: 41 KiB |
Before Width: | Height: | Size: 42 KiB |
Before Width: | Height: | Size: 42 KiB |