mirror of
https://github.com/jbowdre/runtimeterror.git
synced 2024-11-29 18:02:18 +00:00
Merge branch 'main' into drafts
This commit is contained in:
commit
dc2101837f
31 changed files with 407 additions and 230 deletions
2
.envrc
Normal file
2
.envrc
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/usr/bin/env direnv
|
||||||
|
use flake .
|
|
@ -1,15 +1,16 @@
|
||||||
name: Deploy to Neocities
|
name: Deploy to Production
|
||||||
|
|
||||||
# only run on changes to main
|
# only run on changes to main
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: 0 13 * * *
|
- cron: 0 13 * * *
|
||||||
|
workflow_dispatch:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
|
||||||
concurrency: # prevent concurrent deploys doing strange things
|
concurrency: # prevent concurrent deploys doing strange things
|
||||||
group: deploy-to-neocities
|
group: deploy-to-prod
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
# Default to bash
|
# Default to bash
|
||||||
|
@ -40,9 +41,25 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
npm i @torchlight-api/torchlight-cli
|
npm i @torchlight-api/torchlight-cli
|
||||||
npx torchlight
|
npx torchlight
|
||||||
- name: Deploy to Neocities
|
- name: Deploy HTML to Neocities
|
||||||
uses: bcomnes/deploy-to-neocities@v1
|
uses: bcomnes/deploy-to-neocities@v1
|
||||||
with:
|
with:
|
||||||
api_token: ${{ secrets.NEOCITIES_API_TOKEN }}
|
api_token: ${{ secrets.NEOCITIES_API_TOKEN }}
|
||||||
cleanup: true
|
cleanup: true
|
||||||
dist_dir: public
|
dist_dir: public
|
||||||
|
- name: Connect to Tailscale
|
||||||
|
uses: tailscale/github-action@v2
|
||||||
|
with:
|
||||||
|
oauth-client-id: ${{ secrets.TS_API_CLIENT_ID }}
|
||||||
|
oauth-secret: ${{ secrets.TS_API_CLIENT_SECRET }}
|
||||||
|
tags: ${{ secrets.TS_TAG }}
|
||||||
|
- name: Install SSH key
|
||||||
|
uses: shimataro/ssh-key-action@v2
|
||||||
|
with:
|
||||||
|
key: ${{ secrets.SSH_KEY }}
|
||||||
|
name: id_rsa
|
||||||
|
known_hosts: ${{ secrets.SSH_KNOWN_HOSTS }}
|
||||||
|
- name: Deploy GMI to Agate
|
||||||
|
run: |
|
||||||
|
rsync -avz --delete --exclude='*.html' --exclude='*.css' --exclude='*.js' -e ssh public/ deploy@${{ secrets.GMI_HOST }}:${{ secrets.GMI_CONTENT_PATH }}
|
||||||
|
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -4,5 +4,6 @@
|
||||||
/package.json
|
/package.json
|
||||||
/public/
|
/public/
|
||||||
/resources/
|
/resources/
|
||||||
/.env*
|
/.env
|
||||||
|
/.direnv/
|
||||||
|
/.certificates/
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
[![Neocities Deployment Status](https://github.com/jbowdre/runtimeterror/actions/workflows/deploy-to-neocities.yml/badge.svg)](https://github.com/jbowdre/runtimeterror/actions/workflows/deploy-to-neocities.yml)
|
[![Deployment Status](https://github.com/jbowdre/runtimeterror/actions/workflows/deploy-to-prod.yml/badge.svg)](https://github.com/jbowdre/runtimeterror/actions/workflows/deploy-to-prod.yml)
|
|
@ -7,20 +7,33 @@ languageCode = "en"
|
||||||
DefaultContentLanguage = "en"
|
DefaultContentLanguage = "en"
|
||||||
enableInlineShortcodes = true
|
enableInlineShortcodes = true
|
||||||
|
|
||||||
|
# define gemini media type
|
||||||
|
[mediaTypes]
|
||||||
|
[mediaTypes.'text/gemini']
|
||||||
|
suffixes = ["gmi"]
|
||||||
|
|
||||||
# Automatically add content sections to main menu
|
# Automatically add content sections to main menu
|
||||||
# sectionPagesMenu = "main"
|
# sectionPagesMenu = "main"
|
||||||
|
|
||||||
[outputs]
|
[outputs]
|
||||||
home = ['html', 'rss']
|
home = ['html', 'rss', 'gemini']
|
||||||
section = ['html']
|
section = ['html']
|
||||||
taxonomy = ['html',]
|
taxonomy = ['html']
|
||||||
term = ['html', 'rss']
|
term = ['html', 'rss', 'gemini']
|
||||||
|
page = ['html', 'rss', 'gemini']
|
||||||
|
|
||||||
# rename rss output from index.xml to feed.xml
|
# rename rss output from index.xml to feed.xml
|
||||||
[outputFormats]
|
[outputFormats]
|
||||||
[outputFormats.rss]
|
[outputFormats.rss]
|
||||||
mediatype = "application/rss"
|
mediatype = "application/rss"
|
||||||
baseName = "feed"
|
baseName = "feed"
|
||||||
|
# gemini output
|
||||||
|
[outputFormats.gemini]
|
||||||
|
mediatype = "text/gemini"
|
||||||
|
isPlainText = true
|
||||||
|
isHTML = false
|
||||||
|
protocol = "gemini://"
|
||||||
|
permalinkable = true
|
||||||
|
|
||||||
[permalinks]
|
[permalinks]
|
||||||
posts = ":filename"
|
posts = ":filename"
|
||||||
|
|
|
@ -143,9 +143,9 @@ title = "omg.lol"
|
||||||
url = "https://jbowdre.lol"
|
url = "https://jbowdre.lol"
|
||||||
|
|
||||||
[[socialLinks]]
|
[[socialLinks]]
|
||||||
icon = "fa-solid fa-sticky-note"
|
icon = "fa-solid fa-satellite"
|
||||||
title = "Scribbles 'n Bits"
|
title = "Gemlog"
|
||||||
url = "https://scribbles.jbowdre.lol"
|
url = "https://capsule.jbowdre.lol/gemlog/"
|
||||||
|
|
||||||
[[socialLinks]]
|
[[socialLinks]]
|
||||||
icon = "fa-solid fa-circle-user"
|
icon = "fa-solid fa-circle-user"
|
||||||
|
|
|
@ -7,6 +7,7 @@ kudos = false
|
||||||
+++
|
+++
|
||||||
|
|
||||||
We're not sure what you were looking for but it's not here.
|
We're not sure what you were looking for but it's not here.
|
||||||
|
|
||||||
![Animated GIF from the movie "The Naked Gun". A man in the foreground proclaims "Please disperse. Nothing to see here." while a building explodes in the background.](/images/nothing-to-see-here.gif)
|
![Animated GIF from the movie "The Naked Gun". A man in the foreground proclaims "Please disperse. Nothing to see here." while a building explodes in the background.](/images/nothing-to-see-here.gif)
|
||||||
|
|
||||||
Maybe head back [home](/)?
|
Maybe head back [home](/)?
|
||||||
|
|
|
@ -15,28 +15,27 @@ Once I grew up[^2], I found a career in system administration, and I leveraged m
|
||||||
|
|
||||||
I'm now part of a small platform engineering team within that same large corporation, focused on leveraging DevOps thinking and tools to help our internal customers modernize how they operate IT, build code, and ship products, while designing solutions to help them accomplish those goals. It's a great blend of my virtual infrastructure operations background, hobbyist development experience, and hunger for solving problems, and I really enjoy applying these skills to solve interesting challenges at scale.
|
I'm now part of a small platform engineering team within that same large corporation, focused on leveraging DevOps thinking and tools to help our internal customers modernize how they operate IT, build code, and ship products, while designing solutions to help them accomplish those goals. It's a great blend of my virtual infrastructure operations background, hobbyist development experience, and hunger for solving problems, and I really enjoy applying these skills to solve interesting challenges at scale.
|
||||||
|
|
||||||
On my off time, I tinker with new [projects](/categories/self-hosting) in my little homelab (and share some of those adventures here). I also help out on Google's product support forums as a [Product Expert](https://productexperts.withgoogle.com/what-it-is), where I support Pixel phones, earbuds, and watches, as well as Chromebooks (primarily with Linux-related queries). Helping users troubleshoot their issues scratches my problem-solving itch, and it keeps me connected with some really great like-minded tech enthusiasts.
|
On my off time, I tinker with new [projects](/categories/self-hosting) in my little homelab (and share some of those adventures here). I love experimenting with new (to me) technologies, and I've found that actually _using_ something is typically the best way to learn it.
|
||||||
|
|
||||||
On weekends, I race my daily-driven 2014 Subaru BRZ in local [autocross events](https://l.runtimeterror.dev/my-autox-vids) or wrench on my 1974 Volkswagen Karmann Ghia.
|
On weekends, I race my daily-driven 2014 Subaru BRZ in local [autocross events](https://l.runtimeterror.dev/my-autox-vids) or wrench on my 1974 Volkswagen Karmann Ghia.
|
||||||
|
|
||||||
And in the free time I have left, I game on my Steam Deck.
|
And in the free time I have left, I game on my Steam Deck.
|
||||||
|
|
||||||
See what I've been up to on:
|
### See what I've been up to on:
|
||||||
- [GitHub](https://github.com/jbowdre)
|
- [GitHub](https://github.com/jbowdre)
|
||||||
- [Scribbles 'n Bits](https://scribbes.jbowdre.lol)
|
- [Gemlog](https://capsule.jbowdre.lol/gemlog/)
|
||||||
- [status.lol](https://status.jbowdre.lol)
|
- [status.lol](https://status.jbowdre.lol)
|
||||||
- [social.lol](https://social.lol/@jbowdre)
|
- [social.lol](https://social.lol/@jbowdre)
|
||||||
- [CounterSocial](https://counter.social/@john_b)
|
- [CounterSocial](https://counter.social/@john_b)
|
||||||
- [/now](https://now.jbowdre.lol)
|
- [/now](https://now.jbowdre.lol)
|
||||||
|
|
||||||
Connect with me via:
|
### Connect with me via:
|
||||||
- [SimpleX Chat](/simplex/)
|
- [SimpleX Chat](/simplex/)
|
||||||
- [Session](https://p.runtimeterror.dev/session-id)
|
- [Signal](https://signal.me/#eu/lyHZbMnlM16O0w48j3rshYBofO0K-iXOt9LGwln7TS-fNKEHCrxH3La325q8IjRU)
|
||||||
- [Matrix](https://matrix.to/#/@jbowdre:omg.lol)
|
- [Matrix](https://matrix.to/#/@jbowdre:omg.lol)
|
||||||
- [XMPP](xmpp://john@chat.vpota.to)
|
- [XMPP](https://conversations.im/i/jbowdre@omg.lol?omemo-sid-1374125881=a620f3c57733601a6646f6f13a71c86fc9be8dd4126fd158ef3e0a26beb0b434)
|
||||||
- [Electronic Mail](mailto:jbowdre@omg.lol)
|
- [Electronic Mail](mailto:jbowdre@omg.lol)
|
||||||
- [PGP: 613F B70C 4FA7 A077](https://l.runtimeterror.dev/pgp)
|
- [PGP: 613F B70C 4FA7 A077](https://l.runtimeterror.dev/pgp)
|
||||||
|
|
||||||
|
|
||||||
[^1]: Congrats? And also, *thank you.*
|
[^1]: Congrats? And also, *thank you.*
|
||||||
[^2]: A bit. I'm still in the "fake it until you make" it phase of adulthood.
|
[^2]: A bit. I'm still in the "fake it until you make" it phase of adulthood.
|
|
@ -134,6 +134,7 @@ homeassistant:
|
||||||
```
|
```
|
||||||
|
|
||||||
I'm using the [Home Assistant Operating System virtual appliance](https://www.home-assistant.io/installation/alternative#install-home-assistant-operating-system), so `/media` is already symlinked to `/root/media` inside the Home Assistant installation directory. So I'll just log into that shell and create the `snaps` subdirectory:
|
I'm using the [Home Assistant Operating System virtual appliance](https://www.home-assistant.io/installation/alternative#install-home-assistant-operating-system), so `/media` is already symlinked to `/root/media` inside the Home Assistant installation directory. So I'll just log into that shell and create the `snaps` subdirectory:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
mkdir -p /media/snaps # [tl! .cmd_root]
|
mkdir -p /media/snaps # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
@ -149,6 +150,7 @@ Now that I've captured the snap, I need to figure out how to attach it to the no
|
||||||
I can't use the handy `!secret` expansion inside of the shell command, though, so I'll need a workaround to avoid sticking sensitive details directly in my `configuration.yaml`. I can use a dummy sensor to hold the value, and then use the `{{ states('sensor.$sensor_name') }}` template to retrieve it.
|
I can't use the handy `!secret` expansion inside of the shell command, though, so I'll need a workaround to avoid sticking sensitive details directly in my `configuration.yaml`. I can use a dummy sensor to hold the value, and then use the `{{ states('sensor.$sensor_name') }}` template to retrieve it.
|
||||||
|
|
||||||
So here we go:
|
So here we go:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# configuration.yaml [tl! focus:start]
|
# configuration.yaml [tl! focus:start]
|
||||||
|
|
||||||
|
@ -180,6 +182,7 @@ shell_command: # [tl! focus:9 highlight:6,1]
|
||||||
```
|
```
|
||||||
|
|
||||||
Now I just need to replace the service call in the automation with the new `shell_command.ntfy_put` one:
|
Now I just need to replace the service call in the automation with the new `shell_command.ntfy_put` one:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# torchlight! {"lineNumbers": true}
|
# torchlight! {"lineNumbers": true}
|
||||||
# exterior_motion.yaml # [tl! focus]
|
# exterior_motion.yaml # [tl! focus]
|
||||||
|
@ -230,6 +233,7 @@ Well that guy seems sus - but hey, it worked!
|
||||||
Of course, I'll also continue to get notified about that creeper in the backyard about every 15-20 seconds or so. That's not quite what I want. The _easy_ way to prevent an automation from firing constantly would be to [insert a `delay`](https://www.home-assistant.io/docs/scripts/#wait-for-time-to-pass-delay) action, but that would be a global delay rather than per-camera. I don't necessarily need to know every time the weirdo in the backyard moves, but I would like to know if he moves around to the side yard or driveway. So I needed something more flexible than an automation-wide delay.
|
Of course, I'll also continue to get notified about that creeper in the backyard about every 15-20 seconds or so. That's not quite what I want. The _easy_ way to prevent an automation from firing constantly would be to [insert a `delay`](https://www.home-assistant.io/docs/scripts/#wait-for-time-to-pass-delay) action, but that would be a global delay rather than per-camera. I don't necessarily need to know every time the weirdo in the backyard moves, but I would like to know if he moves around to the side yard or driveway. So I needed something more flexible than an automation-wide delay.
|
||||||
|
|
||||||
Instead, I'll create a 5-minute [`timer`](https://www.home-assistant.io/integrations/timer/) for each camera by simply adding this to my `configuration.yaml`:
|
Instead, I'll create a 5-minute [`timer`](https://www.home-assistant.io/integrations/timer/) for each camera by simply adding this to my `configuration.yaml`:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# configuration.yaml
|
# configuration.yaml
|
||||||
timer:
|
timer:
|
||||||
|
@ -310,6 +314,7 @@ That pretty much takes care of my needs for exterior motion alerts, and should k
|
||||||
|
|
||||||
### Managing interior alerts
|
### Managing interior alerts
|
||||||
I've got a few interior cameras which I'd like to monitor too, so I'll start by just copying the exterior automation and updating the entity IDs:
|
I've got a few interior cameras which I'd like to monitor too, so I'll start by just copying the exterior automation and updating the entity IDs:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# torchlight! {"lineNumbers": true}
|
# torchlight! {"lineNumbers": true}
|
||||||
# interior_motion.yaml
|
# interior_motion.yaml
|
||||||
|
@ -361,6 +366,7 @@ But I don't typically want to get alerted by these cameras if my wife or I are h
|
||||||
![calendar](schedule.png)
|
![calendar](schedule.png)
|
||||||
|
|
||||||
So then I'll just add another condition so that the automation will only fire during those calendar events:
|
So then I'll just add another condition so that the automation will only fire during those calendar events:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# torchlight! {"lineNumbers": true}
|
# torchlight! {"lineNumbers": true}
|
||||||
# interior_motion.yaml [tl! focus]
|
# interior_motion.yaml [tl! focus]
|
||||||
|
@ -517,6 +523,7 @@ icon: mdi:alarm-snooze
|
||||||
I can then add that script to the camera dashboard in Home Assistant or pin it to the home controls on my Android phone for easy access.
|
I can then add that script to the camera dashboard in Home Assistant or pin it to the home controls on my Android phone for easy access.
|
||||||
|
|
||||||
I'll also create another script for manually toggling interior alerts for when we're home at an odd time:
|
I'll also create another script for manually toggling interior alerts for when we're home at an odd time:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# torchlight! {"lineNumbers": true}
|
# torchlight! {"lineNumbers": true}
|
||||||
# toggle_interior_alerts.yaml
|
# toggle_interior_alerts.yaml
|
||||||
|
|
|
@ -41,12 +41,12 @@ Okay, enough background; let's get this thing going.
|
||||||
#### Instance Deployment
|
#### Instance Deployment
|
||||||
I started by logging into my Google Cloud account at https://console.cloud.google.com, and proceeded to create a new project (named `wireguard`) to keep my WireGuard-related resources together. I then navigated to **Compute Engine** and [created a new instance](https://console.cloud.google.com/compute/instancesAdd) inside that project. The basic setup is:
|
I started by logging into my Google Cloud account at https://console.cloud.google.com, and proceeded to create a new project (named `wireguard`) to keep my WireGuard-related resources together. I then navigated to **Compute Engine** and [created a new instance](https://console.cloud.google.com/compute/instancesAdd) inside that project. The basic setup is:
|
||||||
|
|
||||||
| Attribute | Value |
|
| Attribute | Value |
|
||||||
| --- | --- |
|
|-----------------|------------------|
|
||||||
| Name | `wireguard` |
|
| Name | `wireguard` |
|
||||||
| Region | `us-east1` (or whichever [free-tier-eligible region](https://cloud.google.com/free/docs/gcp-free-tier/#compute) is closest) |
|
| Region | `us-east1` |
|
||||||
| Machine Type | `e2-micro` |
|
| Machine Type | `e2-micro` |
|
||||||
| Boot Disk Size | 10 GB |
|
| Boot Disk Size | 10 GB |
|
||||||
| Boot Disk Image | Ubuntu 20.04 LTS |
|
| Boot Disk Image | Ubuntu 20.04 LTS |
|
||||||
|
|
||||||
![Instance creation](20211027_instance_creation.png)
|
![Instance creation](20211027_instance_creation.png)
|
||||||
|
@ -325,25 +325,25 @@ _Note: the version of the WireGuard app currently available on the Play Store (v
|
||||||
|
|
||||||
Once it's installed, I open the app and click the "Plus" button to create a new tunnel, and select the _Create from scratch_ option. I click the circle-arrows icon at the right edge of the _Private key_ field, and that automatically generates this peer's private and public key pair. Simply clicking on the _Public key_ field will automatically copy the generated key to my clipboard, which will be useful for sharing it with the server. Otherwise I fill out the **Interface** section similarly to what I've done already:
|
Once it's installed, I open the app and click the "Plus" button to create a new tunnel, and select the _Create from scratch_ option. I click the circle-arrows icon at the right edge of the _Private key_ field, and that automatically generates this peer's private and public key pair. Simply clicking on the _Public key_ field will automatically copy the generated key to my clipboard, which will be useful for sharing it with the server. Otherwise I fill out the **Interface** section similarly to what I've done already:
|
||||||
|
|
||||||
| Parameter | Value |
|
| Parameter | Value |
|
||||||
| --- | --- |
|
|-------------|--------------------|
|
||||||
| Name | `wireguard-gcp` |
|
| Name | `wireguard-gcp` |
|
||||||
| Private key | `{CB_PRIVATE_KEY}` |
|
| Private key | `{CB_PRIVATE_KEY}` |
|
||||||
| Public key | `{CB_PUBLIC_KEY}` |
|
| Public key | `{CB_PUBLIC_KEY}` |
|
||||||
| Addresses | `10.200.200.3/24` |
|
| Addresses | `10.200.200.3/24` |
|
||||||
| Listen port | |
|
| Listen port | |
|
||||||
| DNS servers | `10.200.200.2` |
|
| DNS servers | `10.200.200.2` |
|
||||||
| MTU | |
|
| MTU | |
|
||||||
|
|
||||||
I then click the **Add Peer** button to tell this client about the peer it will be connecting to - the GCP-hosted instance:
|
I then click the **Add Peer** button to tell this client about the peer it will be connecting to - the GCP-hosted instance:
|
||||||
|
|
||||||
| Parameter | Value |
|
| Parameter | Value |
|
||||||
| --- | --- |
|
|----------------------|-------------------------|
|
||||||
| Public key | `{GCP_PUBLIC_KEY}` |
|
| Public key | `{GCP_PUBLIC_KEY}` |
|
||||||
| Pre-shared key | |
|
| Pre-shared key | |
|
||||||
| Persistent keepalive | |
|
| Persistent keepalive | |
|
||||||
| Endpoint | `{GCP_PUBLIC_IP}:51820` |
|
| Endpoint | `{GCP_PUBLIC_IP}:51820` |
|
||||||
| Allowed IPs | `0.0.0.0/0` |
|
| Allowed IPs | `0.0.0.0/0` |
|
||||||
|
|
||||||
I _shouldn't_ need the keepalive for the "Road Warrior" peers connecting to the GCP peer, but I can always set that later if I run into stability issues.
|
I _shouldn't_ need the keepalive for the "Road Warrior" peers connecting to the GCP peer, but I can always set that later if I run into stability issues.
|
||||||
|
|
||||||
|
|
|
@ -259,13 +259,13 @@ I'll call it `dnsConfig` and put it in my `CustomProvisioning` folder.
|
||||||
|
|
||||||
And then I create the following variables:
|
And then I create the following variables:
|
||||||
|
|
||||||
| Variable | Value | Type |
|
| Variable | Value | Type |
|
||||||
| --- | --- | --- |
|
|--------------------|--------------------------|--------------|
|
||||||
| `sshHost` | `win02.lab.bowdre.net` | string |
|
| `sshHost` | `win02.lab.bowdre.net` | string |
|
||||||
| `sshUser` | `vra` | string |
|
| `sshUser` | `vra` | string |
|
||||||
| `sshPass` | `*****` | secureString |
|
| `sshPass` | `*****` | secureString |
|
||||||
| `dnsServer` | `[win01.lab.bowdre.net]` | Array/string |
|
| `dnsServer` | `[win01.lab.bowdre.net]` | Array/string |
|
||||||
| `supportedDomains` | `[lab.bowdre.net]` | Array/string |
|
| `supportedDomains` | `[lab.bowdre.net]` | Array/string |
|
||||||
|
|
||||||
`sshHost` is my new `win02` server that I'm going to connect to via SSH, and `sshUser` and `sshPass` should explain themselves. The `dnsServer` array will tell the script which DNS servers to try to create the record on; this will just be a single server in my lab, but I'm going to construct the script to support multiple servers in case one isn't reachable. And `supported domains` will be used to restrict where I'll be creating records; again, that's just a single domain in my lab, but I'm building this solution to account for the possibility where a VM might need to be deployed on a domain where I can't create a static record in this way so I want it to fail elegantly.
|
`sshHost` is my new `win02` server that I'm going to connect to via SSH, and `sshUser` and `sshPass` should explain themselves. The `dnsServer` array will tell the script which DNS servers to try to create the record on; this will just be a single server in my lab, but I'm going to construct the script to support multiple servers in case one isn't reachable. And `supported domains` will be used to restrict where I'll be creating records; again, that's just a single domain in my lab, but I'm building this solution to account for the possibility where a VM might need to be deployed on a domain where I can't create a static record in this way so I want it to fail elegantly.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
title: "Displaying Data from a Tempest Weather Station on a Static Site"
|
title: "Displaying Data from a Tempest Weather Station on a Static Site"
|
||||||
date: "2024-02-11T20:48:49Z"
|
date: "2024-02-11T20:48:49Z"
|
||||||
# lastmod: 2024-02-10
|
lastmod: "2024-02-23T22:00:22Z"
|
||||||
description: "Using a GitHub Actions workflow to retrieve data from an authenticated API, posting results to a publicly-accessible pastebin, and displaying them on a static web site."
|
description: "Using a GitHub Actions workflow to retrieve data from an authenticated API, posting results to a publicly-accessible pastebin, and displaying them on a static web site."
|
||||||
featured: false
|
featured: false
|
||||||
thumbnail: "finished-product.png"
|
thumbnail: "finished-product.png"
|
||||||
|
@ -674,8 +674,8 @@ const CLASS_MAP_WX = {
|
||||||
'clear-night': 'fa-solid fa-moon',
|
'clear-night': 'fa-solid fa-moon',
|
||||||
'cloudy': 'fa-solid fa-cloud',
|
'cloudy': 'fa-solid fa-cloud',
|
||||||
'foggy': 'fa-solid fa-cloud-showers-smog',
|
'foggy': 'fa-solid fa-cloud-showers-smog',
|
||||||
'partly-cloudy-day': 'fa-solid fa-clouds-sun',
|
'partly-cloudy-day': 'fa-solid fa-cloud-sun',
|
||||||
'partly-cloudy-night': 'fa-solid fa-clouds-moon',
|
'partly-cloudy-night': 'fa-solid fa-cloud-moon',
|
||||||
'possibly-rainy-day': 'fa-solid fa-cloud-sun-rain',
|
'possibly-rainy-day': 'fa-solid fa-cloud-sun-rain',
|
||||||
'possibly-rainy-night': 'fa-solid fa-cloud-moon-rain',
|
'possibly-rainy-night': 'fa-solid fa-cloud-moon-rain',
|
||||||
'possibly-sleet-day': 'fa-solid fa-cloud-meatball',
|
'possibly-sleet-day': 'fa-solid fa-cloud-meatball',
|
||||||
|
|
|
@ -13,23 +13,25 @@ tags:
|
||||||
- meta
|
- meta
|
||||||
- selfhosting
|
- selfhosting
|
||||||
---
|
---
|
||||||
I've lately seen some folks on [social.lol](https://social.lol) posting about their various strategies for automatically generating [Open Graph images](https://ogp.me/) for their [Eleventy](https://11ty.dev) sites. So this weekend I started exploring ways to do that for my [Hugo](https://gohugo.io) site.
|
I've lately seen some folks on [social.lol](https://social.lol) posting about their various strategies for automatically generating [Open Graph images](https://ogp.me/) for their [Eleventy](https://11ty.dev) sites. So this weekend I started exploring how I could do that for my [Hugo](https://gohugo.io) site[^site].
|
||||||
|
|
||||||
During my search, I came across a few different approaches using external services or additional scripts to run at build time, but I was hoping for a way to do this with Hugo's built-in tooling. I eventually came across a tremendously helpful post from Aaro titled [Generating OpenGraph images with Hugo](https://aarol.dev/posts/hugo-og-image/). This solution was exactly what I was after, as it uses Hugo's [image functions](https://gohugo.io/functions/images/filter/) to dynamically create a share image for each page.
|
[^site]: You're looking at it.
|
||||||
|
|
||||||
|
During my search, I came across a few different approaches using external services or additional scripts to run at build time, but I was hoping for a way to do this with Hugo's built-in tooling. I eventually came across a tremendously helpful post from [Aaro](https://aarol.dev/about/) titled [Generating OpenGraph images with Hugo](https://aarol.dev/posts/hugo-og-image/). This solution was exactly what I was after, as it uses Hugo's [image functions](https://gohugo.io/functions/images/filter/) to dynamically create a share image for each page.
|
||||||
|
|
||||||
I ended up borrowing heavily from Aaro's approach while adding a few small variations for my OpenGraph images.
|
I ended up borrowing heavily from Aaro's approach while adding a few small variations for my OpenGraph images.
|
||||||
- When sharing the home page, the image includes the site description.
|
- When sharing the home page, the image includes the site description.
|
||||||
- When sharing a post, the image includes the post title.
|
- When sharing a post, the image includes the post title.
|
||||||
- ... but if the post has a thumbnail[^thumbnail] listed in the front matter, that gets overlaid in the corner.
|
- ... and if the post has a thumbnail[^thumbnail] listed in the front matter, that gets overlaid in the corner.
|
||||||
|
|
||||||
[^thumbnail]: My current theme doesn't make use of the thumbnails, but a previous theme did so I've got a bunch of posts with thumbnails still assigned. And now I've got a use for them again!
|
[^thumbnail]: My current theme doesn't make use of the thumbnails, but a previous theme did so I've got a bunch of posts with thumbnails still assigned. And now I've got a use for them again!
|
||||||
|
|
||||||
Here's how I did it.
|
Here's how I did it.
|
||||||
|
|
||||||
### New resources
|
### New resources
|
||||||
Based on Aaro's suggestions, I used [GIMP](https://www.gimp.org/) to create a 1200x600 image for the base. I'm not a graphic designer[^web] so I kept it simple while trying to match the theme, font, and colors used on the site.
|
Based on Aaro's suggestions, I used [GIMP](https://www.gimp.org/) to create a 1200x600 image for the base. I'm not a graphic designer[^web] so I kept it simple while trying to match the site's theme.
|
||||||
|
|
||||||
I had to install the Fira Mono font [Fira Mono `.ttf`](https://github.com/mozilla/Fira/blob/master/ttf/FiraMono-Regular.ttf) to my `~/.fonts/` folder so I could use it in GIMP.
|
I had to install the Fira Mono font [Fira Mono `.ttf`](https://github.com/mozilla/Fira/blob/master/ttf/FiraMono-Regular.ttf) to my `~/.fonts/` folder so I could use it in GIMP, and I wound up with a decent recreation of the little "logo" at the top of the page.
|
||||||
|
|
||||||
![Red background with a command prompt displaying "[runtimeterror.dev] $" in white and red font.](og_base.png)
|
![Red background with a command prompt displaying "[runtimeterror.dev] $" in white and red font.](og_base.png)
|
||||||
|
|
||||||
|
@ -81,8 +83,10 @@ which is in turn loaded by `layouts/_defaults/baseof.html`:
|
||||||
</head>
|
</head>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
So now the customized OpenGraph content will be loaded for each page.
|
||||||
|
|
||||||
### Aaro's OG image generation
|
### Aaro's OG image generation
|
||||||
[Aaro's code](https://aarol.dev/posts/hugo-og-image/) provided the base functionality for what I need:
|
[Aaro's code](https://aarol.dev/posts/hugo-og-image/) provided the base functionality for what I needed:
|
||||||
|
|
||||||
```jinja-html
|
```jinja-html
|
||||||
{{/* Generate opengraph image */}}
|
{{/* Generate opengraph image */}}
|
||||||
|
@ -121,16 +125,16 @@ which is in turn loaded by `layouts/_defaults/baseof.html`:
|
||||||
|
|
||||||
The [`resources.Get`](https://gohugo.io/functions/resources/get/) bits import the image and font resources to make them available to the [`images.Text`](https://gohugo.io/functions/images/text/) functions, which add the site and page title texts to the image using the designated color, size, placement, and font.
|
The [`resources.Get`](https://gohugo.io/functions/resources/get/) bits import the image and font resources to make them available to the [`images.Text`](https://gohugo.io/functions/images/text/) functions, which add the site and page title texts to the image using the designated color, size, placement, and font.
|
||||||
|
|
||||||
The `resources.Copy` line moves the generated OG image into the post bundle directory and gives it a clean `og.png` name rather than the very-long randomly-generated name it would have by default.
|
The `resources.Copy` line moves the generated OG image alongside the post itself and gives it a clean `og.png` name rather than the very-long randomly-generated name it would have by default.
|
||||||
|
|
||||||
And then the `<meta ... />` lines insert the generated image into the page's `<head>` block so it can be rendered when the link is shared on sites which support OpenGraph.
|
And then the `<meta />` lines insert the generated image into the page's `<head>` block so it can be rendered when the link is shared on sites which support OpenGraph.
|
||||||
|
|
||||||
This is a great starting point for what I wanted to accomplish, but made some changes in my `opengraph.html` partial to tailor it to my needs.
|
This is a great starting point for what I wanted to accomplish, but I made some changes to my `opengraph.html` partial to tailor it to my needs.
|
||||||
|
|
||||||
### My tweaks
|
### My tweaks
|
||||||
As I mentioned earlier, I wanted to have three slightly-different recipes for baking my OG images: one for the homepage, one for standard posts, and one for posts with an associated thumbnail. They all use the same basic code, though, so I wanted to be sure that my setup didn't repeat itself too much.
|
As I mentioned earlier, I wanted to have three slightly-different recipes for baking my OG images: one for the homepage, one for standard posts, and one for posts with an associated thumbnail. They all use the same basic code, though, so I wanted to be sure that my setup didn't repeat itself too much.
|
||||||
|
|
||||||
My code starts with fetching my resources up front, and initializing an empty `$text` variable to hold the description or title:
|
My code starts with fetching my resources up front, and initializing an empty `$text` variable to hold either the site description *or* post title:
|
||||||
|
|
||||||
```jinja-html
|
```jinja-html
|
||||||
{{ $img := resources.Get "og_base.png" }}
|
{{ $img := resources.Get "og_base.png" }}
|
||||||
|
@ -165,11 +169,13 @@ If the page has a `thumbnail` parameter defined in the front matter, Hugo will
|
||||||
The [`resources.Get` function](https://gohugo.io/functions/resources/get/) (little r) I used earlier works on *global* resources, like the image and font stored in the site's `assets/` directory. On the other hand, the [`Resources.Get` method](https://gohugo.io/methods/page/resources/) (big R) is used for loading *page* resources, like the file indicated by the page's `thumbnail` parameter.
|
The [`resources.Get` function](https://gohugo.io/functions/resources/get/) (little r) I used earlier works on *global* resources, like the image and font stored in the site's `assets/` directory. On the other hand, the [`Resources.Get` method](https://gohugo.io/methods/page/resources/) (big R) is used for loading *page* resources, like the file indicated by the page's `thumbnail` parameter.
|
||||||
{{% /notice %}}
|
{{% /notice %}}
|
||||||
|
|
||||||
And since I'm calling this method from inside a `with` branch I have to put a `$` in front of the method. Otherwise, the leading `.` would refer directly to the `thumbnail` parameter (which isn't a page and so doesn't have the method available[^scope]).
|
Since I'm calling this method from inside a `with` block I use a `$` in front of the method name to get the [parent context](https://gohugo.io/functions/go-template/with/#understanding-context). Otherwise, the leading `.` would refer directly to the `thumbnail` parameter (which isn't a page and so doesn't have the method available[^scope]).
|
||||||
|
|
||||||
[^scope]: Hugo scoping is kind of wild.
|
[^scope]: Hugo scoping is kind of wild.
|
||||||
|
|
||||||
Anyhoo, after the thumbnail is loaded, I use the [`Fit` image processing](https://gohugo.io/content-management/image-processing/#fit) to scale down the thumbnail and then call the [`images.Overlay` function](https://gohugo.io/functions/images/overlay/) to *overlay* it near the top right corner of the `og_base.png` image.
|
Anyhoo, after the thumbnail is loaded, I use the [`Fit` image processing method](https://gohugo.io/content-management/image-processing/#fit) to scale down the thumbnail. It is then passed to the [`images.Overlay` function](https://gohugo.io/functions/images/overlay/) to *overlay* it near the top right corner of the `og_base.png` image[^placement].
|
||||||
|
|
||||||
|
[^placement]: The overlay is placed using absolute X and Y coordinates. There's probably a way to tell it "offset the top-right corner of the overlay 20x20 from the top right of the base image" but I ran out of caffeine to figure that out at this time. Let me know if you know a trick!
|
||||||
|
|
||||||
```jinja-html
|
```jinja-html
|
||||||
{{ with $thumbnail }}
|
{{ with $thumbnail }}
|
||||||
|
|
|
@ -61,24 +61,23 @@ All that is to say that (as usual) I'll be embarking upon this project in Hard M
|
||||||
### Bill of Materials
|
### Bill of Materials
|
||||||
Let's start with the gear (hardware and software) I needed to make this work:
|
Let's start with the gear (hardware and software) I needed to make this work:
|
||||||
|
|
||||||
| Hardware | Purpose |
|
| Hardware | Purpose |
|
||||||
| --- | --- |
|
|--------------------------------------------------------|-----------------------------------------------------------------------------|
|
||||||
| [PINE64 Quartz64 Model-A 8GB Single Board Computer](https://pine64.com/product/quartz64-model-a-8gb-single-board-computer/) | kind of the whole point |
|
| [PINE64 Quartz64 Model-A 8GB Single Board Computer](https://pine64.com/product/quartz64-model-a-8gb-single-board-computer/) | kind of the whole point |
|
||||||
| [ROCKPro64 12V 5A US Power Supply](https://pine64.com/product/rockpro64-12v-5a-us-power-supply/) | provies power for the the SBC |
|
| [ROCKPro64 12V 5A US Power Supply](https://pine64.com/product/rockpro64-12v-5a-us-power-supply/) | provides power for the the SBC |
|
||||||
| [Serial Console “Woodpecker” Edition](https://pine64.com/product/serial-console-woodpecker-edition/) | allows for serial console access |
|
| [Serial Console “Woodpecker” Edition](https://pine64.com/product/serial-console-woodpecker-edition/) | allows for serial console access |
|
||||||
| [Google USB-C Adapter](https://www.amazon.com/dp/B071G6NLHJ/) | connects the console adapter to my Chromebook |
|
| [Google USB-C Adapter](https://www.amazon.com/dp/B071G6NLHJ/) | connects the console adapter to my Chromebook |
|
||||||
| [Sandisk 64GB Micro SD Memory Card](https://www.amazon.com/dp/B00M55C1I2) | only holds the firmware; a much smaller size would be fine |
|
| [Sandisk 64GB Micro SD Memory Card](https://www.amazon.com/dp/B00M55C1I2) | only holds the firmware; a much smaller size would be fine |
|
||||||
| [Monoprice USB-C MicroSD Reader](https://www.amazon.com/dp/B00YQM8352/) | to write firmware to the SD card from my Chromebook |
|
| [Monoprice USB-C MicroSD Reader](https://www.amazon.com/dp/B00YQM8352/) | to write firmware to the SD card from my Chromebook |
|
||||||
| [Samsung MUF-256AB/AM FIT Plus 256GB USB 3.1 Drive](https://www.amazon.com/dp/B07D7Q41PM) | ESXi boot device and local VMFS datastore |
|
| [Samsung MUF-256AB/AM FIT Plus 256GB USB 3.1 Drive](https://www.amazon.com/dp/B07D7Q41PM) | ESXi boot device and local VMFS datastore |
|
||||||
| ~~[Cable Matters 3 Port USB 3.0 Hub with Ethernet](https://www.amazon.com/gp/product/B01J6583NK)~~ | ~~for network connectivity and to host the above USB drive~~[^v1.10] |
|
| [3D-printed open enclosure for QUARTZ64](https://www.thingiverse.com/thing:5308499) | protect the board a little bit while allowing for plenty of passive airflow |
|
||||||
| [3D-printed open enclosure for QUARTZ64](https://www.thingiverse.com/thing:5308499) | protect the board a little bit while allowing for plenty of passive airflow |
|
|
||||||
|
|
||||||
| Downloads | Purpose |
|
| Downloads | Purpose |
|
||||||
| --- | --- |
|
|----------------------------------------------------------|-------------------------------------------------------|
|
||||||
| [ESXi ARM Edition](https://customerconnect.vmware.com/downloads/get-download?downloadGroup=ESXI-ARM) (v1.10) | hypervisor |
|
| [ESXi ARM Edition](https://customerconnect.vmware.com/downloads/get-download?downloadGroup=ESXI-ARM) (v1.10) | hypervisor |
|
||||||
| [Tianocore EDK II firmware for Quartz64](https://github.com/jaredmcneill/quartz64_uefi/releases) (2022-07-20) | firmare image |
|
| [Tianocore EDK II firmware for Quartz64](https://github.com/jaredmcneill/quartz64_uefi/releases) (2022-07-20) | firmare image |
|
||||||
| [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) | easy way to write filesystem images to external media |
|
| [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) | easy way to write filesystem images to external media |
|
||||||
| [Beagle Term](https://chrome.google.com/webstore/detail/beagle-term/gkdofhllgfohlddimiiildbgoggdpoea) | for accessing the Quartz64 serial console |
|
| [Beagle Term](https://chrome.google.com/webstore/detail/beagle-term/gkdofhllgfohlddimiiildbgoggdpoea) | for accessing the Quartz64 serial console |
|
||||||
|
|
||||||
### Preparation
|
### Preparation
|
||||||
#### Firmware media
|
#### Firmware media
|
||||||
|
@ -110,10 +109,10 @@ Then it's time to write the image onto the USB drive:
|
||||||
I'll need to use the Quartz64 serial console interface and ["Woodpecker" edition console USB adapter](https://pine64.com/product/serial-console-woodpecker-edition/) to interact with the board until I get ESXi installed and can connect to it with the web interface or SSH. The adapter comes with a short breakout cable, and I connect it thusly:
|
I'll need to use the Quartz64 serial console interface and ["Woodpecker" edition console USB adapter](https://pine64.com/product/serial-console-woodpecker-edition/) to interact with the board until I get ESXi installed and can connect to it with the web interface or SSH. The adapter comes with a short breakout cable, and I connect it thusly:
|
||||||
|
|
||||||
| Quartz64 GPIO pin | Console adapter pin | Wire color |
|
| Quartz64 GPIO pin | Console adapter pin | Wire color |
|
||||||
| --- | --- | --- |
|
|-------------------|---------------------|------------|
|
||||||
| 6 | `GND` | Brown |
|
| 6 | `GND` | Brown |
|
||||||
| 8 | `RXD` | Red |
|
| 8 | `RXD` | Red |
|
||||||
| 10 | `TXD` | Orange |
|
| 10 | `TXD` | Orange |
|
||||||
|
|
||||||
I leave the yellow wire dangling free on both ends since I don't need a `+V` connection for the console to work.
|
I leave the yellow wire dangling free on both ends since I don't need a `+V` connection for the console to work.
|
||||||
![Console connection](console_connection.jpg)
|
![Console connection](console_connection.jpg)
|
||||||
|
@ -122,14 +121,14 @@ To verify that I've got things working, I go ahead and pop the micro SD card con
|
||||||
|
|
||||||
I'll need to use these settings for the connection (which are the defaults selected by Beagle Term):
|
I'll need to use these settings for the connection (which are the defaults selected by Beagle Term):
|
||||||
|
|
||||||
| Setting | Value |
|
| Setting | Value |
|
||||||
| -- | --- |
|
|--------------|----------------|
|
||||||
| Port | `/dev/ttyUSB0` |
|
| Port | `/dev/ttyUSB0` |
|
||||||
| Bitrate | `115200` |
|
| Bitrate | `115200` |
|
||||||
| Data Bit | `8 bit` |
|
| Data Bit | `8 bit` |
|
||||||
| Parity | `none` |
|
| Parity | `none` |
|
||||||
| Stop Bit | `1` |
|
| Stop Bit | `1` |
|
||||||
| Flow Control | `none` |
|
| Flow Control | `none` |
|
||||||
|
|
||||||
![Beagle Term settings](beagle_term_settings.png)
|
![Beagle Term settings](beagle_term_settings.png)
|
||||||
|
|
||||||
|
|
|
@ -78,11 +78,11 @@ I'm very pleased with how this quick little project turned out. Managing my shor
|
||||||
|
|
||||||
And now I can hand out handy-dandy short links!
|
And now I can hand out handy-dandy short links!
|
||||||
|
|
||||||
| Link | Description|
|
| Link | Description |
|
||||||
| --- | --- |
|
|---------------------------------|-----------------------------------------------------------|
|
||||||
| [go.bowdre.net/coso](https://l.runtimeterror.dev/coso) | Follow me on CounterSocial |
|
| [go.bowdre.net/coso](https://l.runtimeterror.dev/coso) | Follow me on CounterSocial |
|
||||||
| [go.bowdre.net/conedoge](https://l.runtimeterror.dev/conedoge) | 2014 Subaru BRZ autocross videos |
|
| [go.bowdre.net/conedoge](https://l.runtimeterror.dev/conedoge) | 2014 Subaru BRZ autocross videos |
|
||||||
| [go.bowdre.net/cooltechshit](https://l.runtimeterror.dev/cooltechshit) | A collection of cool tech shit (references and resources) |
|
| [go.bowdre.net/cooltechshit](https://l.runtimeterror.dev/cooltechshit) | A collection of cool tech shit (references and resources) |
|
||||||
| [go.bowdre.net/stuffiuse](https://l.runtimeterror.dev/stuffiuse) | Things that I use (and think you should use too) |
|
| [go.bowdre.net/stuffiuse](https://l.runtimeterror.dev/stuffiuse) | Things that I use (and think you should use too) |
|
||||||
| [go.bowdre.net/shorterer](https://l.runtimeterror.dev/shorterer) | This post! |
|
| [go.bowdre.net/shorterer](https://l.runtimeterror.dev/shorterer) | This post! |
|
||||||
|
|
||||||
|
|
|
@ -36,13 +36,13 @@ In this post, I'll describe what I did to get Gitea up and running on a tiny ARM
|
||||||
### Create the server
|
### Create the server
|
||||||
I'll be deploying this on a cloud server with these specs:
|
I'll be deploying this on a cloud server with these specs:
|
||||||
|
|
||||||
| | |
|
| | |
|
||||||
| --- | --- |
|
|------------------|-----------------------|
|
||||||
| Shape | `VM.Standard.A1.Flex` |
|
| Shape | `VM.Standard.A1.Flex` |
|
||||||
| Image | Ubuntu 22.04 |
|
| Image | Ubuntu 22.04 |
|
||||||
| CPU Count | 1 |
|
| CPU Count | 1 |
|
||||||
| Memory (GB) | 6 |
|
| Memory (GB) | 6 |
|
||||||
| Boot Volume (GB) | 50 |
|
| Boot Volume (GB) | 50 |
|
||||||
|
|
||||||
I've described the [process of creating a new instance on OCI in a past post](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#instance-creation) so I won't reiterate that here. The only gotcha this time is switching the shape to `VM.Standard.A1.Flex`; the [OCI free tier](https://docs.oracle.com/en-us/iaas/Content/FreeTier/freetier_topic-Always_Free_Resources.htm) allows two AMD Compute VMs (which I've already used up) as well as *up to four* ARM Ampere A1 instances[^free_ampere].
|
I've described the [process of creating a new instance on OCI in a past post](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#instance-creation) so I won't reiterate that here. The only gotcha this time is switching the shape to `VM.Standard.A1.Flex`; the [OCI free tier](https://docs.oracle.com/en-us/iaas/Content/FreeTier/freetier_topic-Always_Free_Resources.htm) allows two AMD Compute VMs (which I've already used up) as well as *up to four* ARM Ampere A1 instances[^free_ampere].
|
||||||
|
|
||||||
|
@ -259,23 +259,23 @@ The format of PostgreSQL data changes with new releases, and that means that the
|
||||||
{{% /notice %}}
|
{{% /notice %}}
|
||||||
|
|
||||||
Let's go through the extra configs in a bit more detail:
|
Let's go through the extra configs in a bit more detail:
|
||||||
| Variable setting | Purpose |
|
| Variable setting | Purpose |
|
||||||
|:--- |:--- |
|
|:-------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|`USER_UID=1003` | User ID of the `git` user on the container host |
|
| `USER_UID=1003` | User ID of the `git` user on the container host |
|
||||||
|`USER_GID=1003` | GroupID of the `git` user on the container host |
|
| `USER_GID=1003` | GroupID of the `git` user on the container host |
|
||||||
|`GITEA____APP_NAME=Gitea` | Sets the title of the site. I shortened it from `Gitea: Git with a cup of tea` because that seems unnecessarily long. |
|
| `GITEA____APP_NAME=Gitea` | Sets the title of the site. I shortened it from `Gitea: Git with a cup of tea` because that seems unnecessarily long. |
|
||||||
|`GITEA__log__MODE=file` | Enable logging |
|
| `GITEA__log__MODE=file` | Enable logging |
|
||||||
|`GITEA__openid__ENABLE_OPENID_SIGNIN=false` | Disable signin through OpenID |
|
| `GITEA__openid__ENABLE_OPENID_SIGNIN=false` | Disable signin through OpenID |
|
||||||
|`GITEA__other__SHOW_FOOTER_VERSION=false` | Anyone who hits the web interface doesn't need to know the version |
|
| `GITEA__other__SHOW_FOOTER_VERSION=false` | Anyone who hits the web interface doesn't need to know the version |
|
||||||
|`GITEA__repository__DEFAULT_PRIVATE=private` | All repos will default to private unless I explicitly override that |
|
| `GITEA__repository__DEFAULT_PRIVATE=private` | All repos will default to private unless I explicitly override that |
|
||||||
|`GITEA__repository__DISABLE_HTTP_GIT=true` | Require that all Git operations occur over SSH |
|
| `GITEA__repository__DISABLE_HTTP_GIT=true` | Require that all Git operations occur over SSH |
|
||||||
|`GITEA__server__DOMAIN=git.bowdre.net` | Domain name of the server |
|
| `GITEA__server__DOMAIN=git.bowdre.net` | Domain name of the server |
|
||||||
|`GITEA__server__SSH_DOMAIN=git.tadpole-jazz.ts.net` | Leverage Tailscale's [MagicDNS](https://tailscale.com/kb/1081/magicdns/) to tell clients how to SSH to the Tailscale internal IP |
|
| `GITEA__server__SSH_DOMAIN=git.tadpole-jazz.ts.net` | Leverage Tailscale's [MagicDNS](https://tailscale.com/kb/1081/magicdns/) to tell clients how to SSH to the Tailscale internal IP |
|
||||||
|`GITEA__server__ROOT_URL=https://git.bowdre.net/` | Public-facing URL |
|
| `GITEA__server__ROOT_URL=https://git.bowdre.net/` | Public-facing URL |
|
||||||
|`GITEA__server__LANDING_PAGE=explore` | Defaults to showing the "Explore" page (listing any public repos) instead of the "Home" page (which just tells about the Gitea project) |
|
| `GITEA__server__LANDING_PAGE=explore` | Defaults to showing the "Explore" page (listing any public repos) instead of the "Home" page (which just tells about the Gitea project) |
|
||||||
|`GITEA__service__DISABLE_REGISTRATION=true` | New users will not be able to self-register for access; they will have to be manually added by the Administrator account that will be created during the initial setup |
|
| `GITEA__service__DISABLE_REGISTRATION=true` | New users will not be able to self-register for access; they will have to be manually added by the Administrator account that will be created during the initial setup |
|
||||||
|`GITEA__service_0X2E_explore__DISABLE_USERS_PAGE=true` | Don't allow browsing of user accounts |
|
| `GITEA__service_0X2E_explore__DISABLE_USERS_PAGE=true` | Don't allow browsing of user accounts |
|
||||||
|`GITEA__ui__DEFAULT_THEME=arc-green` | Default to the darker theme |
|
| `GITEA__ui__DEFAULT_THEME=arc-green` | Default to the darker theme |
|
||||||
|
|
||||||
Beyond the environment variables, I also defined a few additional options to allow the SSH passthrough to function. Mounting the `git` user's SSH config directory into the container will ensure that user keys defined in Gitea will also be reflected outside of the container, and setting the container to listen on local port `2222` will allow it to receive the forwarded SSH connections:
|
Beyond the environment variables, I also defined a few additional options to allow the SSH passthrough to function. Mounting the `git` user's SSH config directory into the container will ensure that user keys defined in Gitea will also be reflected outside of the container, and setting the container to listen on local port `2222` will allow it to receive the forwarded SSH connections:
|
||||||
|
|
||||||
|
|
|
@ -16,10 +16,10 @@ Connecting a deployed Windows VM to an Active Directory domain is pretty easy; j
|
||||||
|
|
||||||
Fortunately, vRA 8 supports adding an Active Directory integration to handle staging computer objects in a designated OU. And vRA 8.3 even [introduced the ability](https://blogs.vmware.com/management/2021/02/whats-new-with-vrealize-automation-8-3-technical-overview.html#:~:text=New%20Active%20Directory%20Cloud%20Template%20Properties) to let blueprints override the relative DN path. That will be helpful in my case since I'll want the servers to be placed in different OUs depending on which site they get deployed to:
|
Fortunately, vRA 8 supports adding an Active Directory integration to handle staging computer objects in a designated OU. And vRA 8.3 even [introduced the ability](https://blogs.vmware.com/management/2021/02/whats-new-with-vrealize-automation-8-3-technical-overview.html#:~:text=New%20Active%20Directory%20Cloud%20Template%20Properties) to let blueprints override the relative DN path. That will be helpful in my case since I'll want the servers to be placed in different OUs depending on which site they get deployed to:
|
||||||
|
|
||||||
| **Site** | **OU** |
|
| **Site** | **OU** |
|
||||||
| --- | --- |
|
|----------|--------------------------------------------|
|
||||||
| `BOW` | `lab.bowdre.net/LAB/BOW/Computers/Servers` |
|
| `BOW` | `lab.bowdre.net/LAB/BOW/Computers/Servers` |
|
||||||
| `DRE` | `lab.bowre.net/LAB/DRE/Computers/Servers` |
|
| `DRE` | `lab.bowre.net/LAB/DRE/Computers/Servers` |
|
||||||
|
|
||||||
|
|
||||||
I didn't find a lot of documentation on how make this work, though, so here's how I've implemented it in my lab (now running vRA 8.4.2).
|
I didn't find a lot of documentation on how make this work, though, so here's how I've implemented it in my lab (now running vRA 8.4.2).
|
||||||
|
|
|
@ -42,26 +42,26 @@ The [cluster deployment steps](/tanzu-community-edition-k8s-homelab/#management-
|
||||||
![Identity Management section](identity_management_1.png)
|
![Identity Management section](identity_management_1.png)
|
||||||
|
|
||||||
**LDAPS Identity Management Source**
|
**LDAPS Identity Management Source**
|
||||||
| Field | Value | Notes |
|
| Field | Value | Notes |
|
||||||
| --- | --- | ---- |
|
|----------------|---------------------------------------------------------------|---------------------------------------------|
|
||||||
| LDAPS Endpoint | `win01.lab.bowdre.net:636` | LDAPS interface of my AD DC |
|
| LDAPS Endpoint | `win01.lab.bowdre.net:636` | LDAPS interface of my AD DC |
|
||||||
| BIND DN | `CN=LDAP Bind,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net` | DN of an account with LDAP read permissions |
|
| BIND DN | `CN=LDAP Bind,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net` | DN of an account with LDAP read permissions |
|
||||||
| BIND Password | `*******` | Password for that account |
|
| BIND Password | `*******` | Password for that account |
|
||||||
|
|
||||||
**User Search Attributes**
|
**User Search Attributes**
|
||||||
| Field | Value | Notes |
|
| Field | Value | Notes |
|
||||||
| --- | --- | --- |
|
|----------|----------------------------------|----------------------------------------------------------------------------------|
|
||||||
| Base DN | `OU=LAB,DC=lab,DC=bowdre,DC=net` | DN for the top-level OU containing my users |
|
| Base DN | `OU=LAB,DC=lab,DC=bowdre,DC=net` | DN for the top-level OU containing my users |
|
||||||
| Filter | `objectClass=(person)` | |
|
| Filter | `objectClass=(person)` | |
|
||||||
| Username | `sAMAccountName` | I want to auth as `john` rather than `john@lab.bowdre.net` (`userPrincipalName`) |
|
| Username | `sAMAccountName` | I want to auth as `john` rather than `john@lab.bowdre.net` (`userPrincipalName`) |
|
||||||
|
|
||||||
**Group Search Attributes**
|
**Group Search Attributes**
|
||||||
| Field | Value | Notes |
|
| Field | Value | Notes |
|
||||||
| --- | --- | --- |
|
|-----------------|-----------------------------------|---------------------------------------------------------------|
|
||||||
| Base DN | `OU=LAB,DC=lab,DC=bowdre,DC=net` | DN for OU containing my users |
|
| Base DN | `OU=LAB,DC=lab,DC=bowdre,DC=net` | DN for OU containing my users |
|
||||||
| Filter | `(objectClass=group)` | |
|
| Filter | `(objectClass=group)` | |
|
||||||
| Name Attribute | `cn` | Common Name |
|
| Name Attribute | `cn` | Common Name |
|
||||||
| User Attribute | `DN` | Distinguished Name (capitalization matters!) |
|
| User Attribute | `DN` | Distinguished Name (capitalization matters!) |
|
||||||
| Group Attribute | `member:1.2.840.113556.1.4.1941:` | Used to enumerate which groups a user is a member of[^member] |
|
| Group Attribute | `member:1.2.840.113556.1.4.1941:` | Used to enumerate which groups a user is a member of[^member] |
|
||||||
|
|
||||||
And I'll copy the contents of the base64-encoded CA certificate I downloaded earlier and paste them into the Root CA Certificate field.
|
And I'll copy the contents of the base64-encoded CA certificate I downloaded earlier and paste them into the Root CA Certificate field.
|
||||||
|
|
|
@ -47,15 +47,15 @@ A few days ago I migrated my original Snikket instance from Google Cloud (GCP) t
|
||||||
### Infrastructure setup
|
### Infrastructure setup
|
||||||
You can refer to my notes from last time for details on how I [created the Ubuntu 20.04 VM](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#instance-creation) and [configured the firewall rules](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#firewall-configuration) both at the cloud infrastructure level as well as within the host using `iptables`. Snikket does need a few additional [firewall ports](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/firewall.md) beyond what was needed for my Matrix setup:
|
You can refer to my notes from last time for details on how I [created the Ubuntu 20.04 VM](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#instance-creation) and [configured the firewall rules](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#firewall-configuration) both at the cloud infrastructure level as well as within the host using `iptables`. Snikket does need a few additional [firewall ports](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/firewall.md) beyond what was needed for my Matrix setup:
|
||||||
|
|
||||||
| Port(s) | Transport | Purpose |
|
| Port(s) | Transport | Purpose |
|
||||||
| --- | --- | --- |
|
|-------------------|-----------|-----------------------------------------------------------------------|
|
||||||
| `80, 443` | TCP | Web interface and group file sharing |
|
| `80, 443` | TCP | Web interface and group file sharing |
|
||||||
| `3478-3479` | TCP/UDP | Audio/Video data proxy negotiation and discovery ([STUN/TURN](https://www.twilio.com/docs/stun-turn/faq)) |
|
| `3478-3479` | TCP/UDP | Audio/Video data proxy negotiation and discovery ([STUN/TURN](https://www.twilio.com/docs/stun-turn/faq)) |
|
||||||
| `5349-5350` | TCP/UDP | Audio/Video data proxy negotiation and discovery (STUN/TURN over TLS) |
|
| `5349-5350` | TCP/UDP | Audio/Video data proxy negotiation and discovery (STUN/TURN over TLS) |
|
||||||
| `5000` | TCP | File transfer proxy |
|
| `5000` | TCP | File transfer proxy |
|
||||||
| `5222` | TCP | Connections from clients |
|
| `5222` | TCP | Connections from clients |
|
||||||
| `5269` | TCP | Connections from other servers |
|
| `5269` | TCP | Connections from other servers |
|
||||||
| `60000-60100`[^4] | UDP | Audio/Video data proxy (TURN data) |
|
| `60000-60100`[^4] | UDP | Audio/Video data proxy (TURN data) |
|
||||||
|
|
||||||
As a gentle reminder, Oracle's `iptables` configuration inserts a `REJECT all` rule at the bottom of each chain. I needed to make sure that each of my `ALLOW` rules get inserted above that point. So I used `iptables -L INPUT --line-numbers` to identify which line held the `REJECT` rule, and then used `iptables -I INPUT [LINE_NUMBER] -m state --state NEW -p [PROTOCOL] --dport [PORT] -j ACCEPT` to insert the new rules above that point.
|
As a gentle reminder, Oracle's `iptables` configuration inserts a `REJECT all` rule at the bottom of each chain. I needed to make sure that each of my `ALLOW` rules get inserted above that point. So I used `iptables -L INPUT --line-numbers` to identify which line held the `REJECT` rule, and then used `iptables -I INPUT [LINE_NUMBER] -m state --state NEW -p [PROTOCOL] --dport [PORT] -j ACCEPT` to insert the new rules above that point.
|
||||||
```shell
|
```shell
|
||||||
|
@ -165,10 +165,10 @@ sudo vi snikket.conf # [tl! .cmd]
|
||||||
|
|
||||||
A basic config only needs two parameters:
|
A basic config only needs two parameters:
|
||||||
|
|
||||||
| Parameter | Description |
|
| Parameter | Description |
|
||||||
| --- | --- |
|
|-----------------------|--------------------------------------------------------------|
|
||||||
| `SNIKKET_DOMAIN` | The fully-qualified domain name that clients will connect to |
|
| `SNIKKET_DOMAIN` | The fully-qualified domain name that clients will connect to |
|
||||||
| `SNIKKET_ADMIN_EMAIL` | An admin contact for the server |
|
| `SNIKKET_ADMIN_EMAIL` | An admin contact for the server |
|
||||||
|
|
||||||
That's it.
|
That's it.
|
||||||
|
|
||||||
|
|
|
@ -133,15 +133,15 @@ Now if I just enter `go/vcenter` I will go to the vSphere UI, while if I enter s
|
||||||
|
|
||||||
Some of my other golinks:
|
Some of my other golinks:
|
||||||
|
|
||||||
| Shortlink | Destination URL | Description |
|
| Shortlink | Destination URL | Description |
|
||||||
| --- | --- | --- |
|
|------------|--------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------|
|
||||||
| `code` | `https://github.com/search?type=code&q=user:jbowdre{{with .Path}}+{{.}}{{end}}` | searches my code on Github |
|
| `code` | `https://github.com/search?type=code&q=user:jbowdre{{with .Path}}+{{.}}{{end}}` | searches my code on Github |
|
||||||
| `ipam` | `https://ipam.lab.bowdre.net/{{with .Path}}tools/search/{{.}}{{end}}` | searches my lab phpIPAM instance |
|
| `ipam` | `https://ipam.lab.bowdre.net/{{with .Path}}tools/search/{{.}}{{end}}` | searches my lab phpIPAM instance |
|
||||||
| `pdb` | `https://www.protondb.com/{{with .Path}}search?q={{.}}{{end}}` | searches [protondb](https://www.protondb.com/), super-handy for checking game compatibility when [Tailscale is installed on a Steam Deck](https://tailscale.com/blog/steam-deck/) |
|
| `pdb` | `https://www.protondb.com/{{with .Path}}search?q={{.}}{{end}}` | searches protondb |
|
||||||
| `tailnet` | `https://login.tailscale.com/admin/machines?q={{.Path}}` | searches my Tailscale admin panel for a machine name |
|
| `tailnet` | `https://login.tailscale.com/admin/machines?q={{.Path}}` | searches my Tailscale admin panel for a machine name |
|
||||||
| `sho` | `https://www.shodan.io/{{with .Path}}search?query={{.}}{{end}}` | searches Shodan for interesting internet-connected systems |
|
| `sho` | `https://www.shodan.io/{{with .Path}}search?query={{.}}{{end}}` | searches Shodan for interesting internet-connected systems |
|
||||||
| `randpass` | `https://www.random.org/passwords/?num=1\u0026len=24\u0026format=plain\u0026rnd=new` | generates a random 24-character string suitable for use as a password (`curl`-friendly) |
|
| `randpass` | `https://www.random.org/passwords/?num=1\u0026len=24\u0026format=plain\u0026rnd=new` | generates a random 24-character string suitable for use as a password (`curl`-friendly) |
|
||||||
| `wx` | `https://wttr.in/{{ .Path }}` | local weather report based on geolocation or weather for a designated city (`curl`-friendly) |
|
| `wx` | `https://wttr.in/{{ .Path }}` | local weather report based on geolocation or weather for a designated city (`curl`-friendly) |
|
||||||
|
|
||||||
#### Back up and restore
|
#### Back up and restore
|
||||||
You can browse to `go/.export` to see a JSON-formatted listing of all configured shortcuts - or, if you're clever, you could do something like `curl http://go/.export -o links.json` to download a copy.
|
You can browse to `go/.export` to see a JSON-formatted listing of all configured shortcuts - or, if you're clever, you could do something like `curl http://go/.export -o links.json` to download a copy.
|
||||||
|
|
|
@ -213,15 +213,15 @@ TS_SERVE_PORT=8080
|
||||||
TS_FUNNEL=1
|
TS_FUNNEL=1
|
||||||
```
|
```
|
||||||
|
|
||||||
| Variable Name | Example | Description |
|
| Variable Name | Example | Description |
|
||||||
| --- | --- | --- |
|
|----------------------------|------------------------------------------|---------------------------------------------------------------------------------------------------------|
|
||||||
| `TS_AUTHKEY` | `tskey-auth-somestring-somelongerstring` | used for unattended auth of the new node, get one [here](https://login.tailscale.com/admin/settings/keys) |
|
| `TS_AUTHKEY` | `tskey-auth-somestring-somelongerstring` | used for unattended auth of the new node, get one [here](https://login.tailscale.com/admin/settings/keys) |
|
||||||
| `TS_HOSTNAME` | `tsdemo` | optional Tailscale hostname for the new node[^hostname] |
|
| `TS_HOSTNAME` | `tsdemo` | optional Tailscale hostname for the new node[^hostname] |
|
||||||
| `TS_STATE_DIR` | `/var/lib/tailscale/` | required directory for storing Tailscale state, this should be mounted to the container for persistence |
|
| `TS_STATE_DIR` | `/var/lib/tailscale/` | required directory for storing Tailscale state, this should be mounted to the container for persistence |
|
||||||
| `TS_TAILSCALED_EXTRA_ARGS` | `--verbose=1`[^verbose] | optional additional [flags](https://tailscale.com/kb/1278/tailscaled#flags-to-tailscaled) for `tailscaled` |
|
| `TS_TAILSCALED_EXTRA_ARGS` | `--verbose=1`[^verbose] | optional additional [flags](https://tailscale.com/kb/1278/tailscaled#flags-to-tailscaled) for `tailscaled` |
|
||||||
| `TS_EXTRA_ARGS` | `--ssh`[^ssh] | optional additional [flags](https://tailscale.com/kb/1241/tailscale-up) for `tailscale up` |
|
| `TS_EXTRA_ARGS` | `--ssh`[^ssh] | optional additional [flags](https://tailscale.com/kb/1241/tailscale-up) for `tailscale up` |
|
||||||
| `TS_SERVE_PORT` | `8080` | optional application port to expose with [Tailscale Serve](https://tailscale.com/kb/1312/serve) |
|
| `TS_SERVE_PORT` | `8080` | optional application port to expose with [Tailscale Serve](https://tailscale.com/kb/1312/serve) |
|
||||||
| `TS_FUNNEL` | `1` | if set (to anything), will proxy `TS_SERVE_PORT` **publicly** with [Tailscale Funnel](https://tailscale.com/kb/1223/funnel) |
|
| `TS_FUNNEL` | `1` | if set (to anything), will proxy `TS_SERVE_PORT` **publicly** with [Tailscale Funnel](https://tailscale.com/kb/1223/funnel) |
|
||||||
|
|
||||||
[^hostname]: This hostname will determine the fully-qualified domain name where the resource will be served: `https://[hostname].[tailnet-name].ts.net`. So you'll want to make sure it's a good one for what you're trying to do.
|
[^hostname]: This hostname will determine the fully-qualified domain name where the resource will be served: `https://[hostname].[tailnet-name].ts.net`. So you'll want to make sure it's a good one for what you're trying to do.
|
||||||
[^verbose]: Passing the `--verbose` flag to `tailscaled` increases the logging verbosity, which can be helpful if you need to troubleshoot.
|
[^verbose]: Passing the `--verbose` flag to `tailscaled` increases the logging verbosity, which can be helpful if you need to troubleshoot.
|
||||||
|
|
|
@ -45,11 +45,11 @@ The Kubernetes node VMs will need to be attached to a network with a DHCP server
|
||||||
|
|
||||||
I'll also need to set aside a few static IPs for this project. These will need to be routable and within the same subnet as the DHCP range, but excluded from that DHCP range.
|
I'll also need to set aside a few static IPs for this project. These will need to be routable and within the same subnet as the DHCP range, but excluded from that DHCP range.
|
||||||
|
|
||||||
| IP Address | Purpose |
|
| IP Address | Purpose |
|
||||||
| --- | --- |
|
|-------------------------------|--------------------------------------|
|
||||||
| `192.168.1.60` | Control plane for Management cluster |
|
| `192.168.1.60` | Control plane for Management cluster |
|
||||||
| `192.168.1.61` | Control plane for Workload cluster |
|
| `192.168.1.61` | Control plane for Workload cluster |
|
||||||
| `192.168.1.64 - 192.168.1.80` | IP range for Workload load balancer |
|
| `192.168.1.64 - 192.168.1.80` | IP range for Workload load balancer |
|
||||||
|
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
|
@ -67,22 +67,22 @@ I've now got a fully-functioning VMware lab, complete with a physical hypervisor
|
||||||
#### Overview
|
#### Overview
|
||||||
My home network uses the generic `192.168.1.0/24` address space, with internet router providing DHCP addresses in the range `.100-.250`. I'm using the range `192.168.1.2-.99` for statically-configured IPs, particularly those within my lab environment. Here are the addresses being used by the lab so far:
|
My home network uses the generic `192.168.1.0/24` address space, with internet router providing DHCP addresses in the range `.100-.250`. I'm using the range `192.168.1.2-.99` for statically-configured IPs, particularly those within my lab environment. Here are the addresses being used by the lab so far:
|
||||||
|
|
||||||
| IP Address | Hostname | Purpose |
|
| IP Address | Hostname | Purpose |
|
||||||
| ---- | ---- | ---- |
|
|----------------|-----------|--------------------|
|
||||||
| `192.168.1.1` | | Gateway |
|
| `192.168.1.1` | | Gateway |
|
||||||
| `192.168.1.5` | `win01` | AD DC, DNS |
|
| `192.168.1.5` | `win01` | AD DC, DNS |
|
||||||
| `192.168.1.11` | `nuchost` | Physical ESXi host |
|
| `192.168.1.11` | `nuchost` | Physical ESXi host |
|
||||||
| `192.168.1.12` | `vcsa` | vCenter Server |
|
| `192.168.1.12` | `vcsa` | vCenter Server |
|
||||||
|
|
||||||
Of course, not everything that I'm going to deploy in the lab will need to be accessible from outside the lab environment. This goes for obvious things like the vMotion and vSAN networks of the nested ESXi hosts, but it will also be useful to have internal networks that can be used by VMs provisioned by vRA. So I'll be creating these networks:
|
Of course, not everything that I'm going to deploy in the lab will need to be accessible from outside the lab environment. This goes for obvious things like the vMotion and vSAN networks of the nested ESXi hosts, but it will also be useful to have internal networks that can be used by VMs provisioned by vRA. So I'll be creating these networks:
|
||||||
|
|
||||||
| VLAN ID | Network | Purpose |
|
| VLAN ID | Network | Purpose |
|
||||||
| ---- | ---- | ---- |
|
|---------|------------------|------------|
|
||||||
| 1610 | `172.16.10.0/24` | Management |
|
| 1610 | `172.16.10.0/24` | Management |
|
||||||
| 1620 | `172.16.20.0/24` | Servers-1 |
|
| 1620 | `172.16.20.0/24` | Servers-1 |
|
||||||
| 1630 | `172.16.30.0/24` | Servers-2 |
|
| 1630 | `172.16.30.0/24` | Servers-2 |
|
||||||
| 1698 | `172.16.98.0/24` | vSAN |
|
| 1698 | `172.16.98.0/24` | vSAN |
|
||||||
| 1699 | `172.16.99.0/24` | vMotion |
|
| 1699 | `172.16.99.0/24` | vMotion |
|
||||||
|
|
||||||
#### vSwitch1
|
#### vSwitch1
|
||||||
I'll start by adding a second vSwitch to the physical host. It doesn't need a physical adapter assigned since this switch will be for internal traffic. I create two port groups: one tagged for the VLAN 1610 Management traffic, which will be useful for attaching VMs on the physical host to the internal network; and the second will use VLAN 4095 to pass all VLAN traffic to the nested ESXi hosts. And again, this vSwitch needs to have its security policy set to allow Promiscuous Mode and Forged Transmits. I also set the vSwitch to support an MTU of 9000 so I can use Jumbo Frames on the vMotion and vSAN networks.
|
I'll start by adding a second vSwitch to the physical host. It doesn't need a physical adapter assigned since this switch will be for internal traffic. I create two port groups: one tagged for the VLAN 1610 Management traffic, which will be useful for attaching VMs on the physical host to the internal network; and the second will use VLAN 4095 to pass all VLAN traffic to the nested ESXi hosts. And again, this vSwitch needs to have its security policy set to allow Promiscuous Mode and Forged Transmits. I also set the vSwitch to support an MTU of 9000 so I can use Jumbo Frames on the vMotion and vSAN networks.
|
||||||
|
@ -182,11 +182,11 @@ Satisfied with my work, I ran the `commit` and `save` commands. BOOM, this serve
|
||||||
### Nested vSAN Cluster
|
### Nested vSAN Cluster
|
||||||
Alright, it's time to start building up the nested environment. To start, I grabbed the latest [Nested ESXi Virtual Appliance .ova](https://williamlam.com/nested-virtualization/nested-esxi-virtual-appliance), courtesy of William Lam. I went ahead and created DNS records for the hosts I'd be deploying, and I mapped out what IPs would be used on each VLAN:
|
Alright, it's time to start building up the nested environment. To start, I grabbed the latest [Nested ESXi Virtual Appliance .ova](https://williamlam.com/nested-virtualization/nested-esxi-virtual-appliance), courtesy of William Lam. I went ahead and created DNS records for the hosts I'd be deploying, and I mapped out what IPs would be used on each VLAN:
|
||||||
|
|
||||||
|Hostname|1610-Management|1698-vSAN|1699-vMotion|
|
| Hostname | 1610-Management | 1698-vSAN | 1699-vMotion |
|
||||||
|----|----|----|----|
|
|-------------------------|-----------------|----------------|----------------|
|
||||||
|`esxi01.lab.bowdre.net`|`172.16.10.21`|`172.16.98.21`|`172.16.99.21`|
|
| `esxi01.lab.bowdre.net` | `172.16.10.21` | `172.16.98.21` | `172.16.99.21` |
|
||||||
|`esxi02.lab.bowdre.net`|`172.16.10.22`|`172.16.98.22`|`172.16.99.22`|
|
| `esxi02.lab.bowdre.net` | `172.16.10.22` | `172.16.98.22` | `172.16.99.22` |
|
||||||
|`esxi03.lab.bowdre.net`|`172.16.10.23`|`172.16.98.23`|`172.16.99.23`|
|
| `esxi03.lab.bowdre.net` | `172.16.10.23` | `172.16.98.23` | `172.16.99.23` |
|
||||||
|
|
||||||
Deploying the virtual appliances is just like any other "Deploy OVF Template" action. I placed the VMs on the `physical-cluster` compute resource, and selected to thin provision the VMDKs on the local datastore. I chose the "Isolated" VM network which uses VLAN 4095 to make all the internal VLANs available on a single portgroup.
|
Deploying the virtual appliances is just like any other "Deploy OVF Template" action. I placed the VMs on the `physical-cluster` compute resource, and selected to thin provision the VMDKs on the local datastore. I chose the "Isolated" VM network which uses VLAN 4095 to make all the internal VLANs available on a single portgroup.
|
||||||
|
|
||||||
|
@ -246,11 +246,11 @@ The [vRealize Easy Installer](https://docs.vmware.com/en/vRealize-Automation/8.2
|
||||||
|
|
||||||
Anyhoo, each of these VMs will need to be resolvable in DNS so I started by creating some A records:
|
Anyhoo, each of these VMs will need to be resolvable in DNS so I started by creating some A records:
|
||||||
|
|
||||||
|FQDN|IP|
|
| FQDN | IP |
|
||||||
|----|----|
|
|----------------------|----------------|
|
||||||
|`lcm.lab.bowdre.net`|`192.168.1.40`|
|
| `lcm.lab.bowdre.net` | `192.168.1.40` |
|
||||||
|`idm.lab.bowdre.net`|`192.168.1.41`|
|
| `idm.lab.bowdre.net` | `192.168.1.41` |
|
||||||
|`vra.lab.bowdre.net`|`192.168.1.42`|
|
| `vra.lab.bowdre.net` | `192.168.1.42` |
|
||||||
|
|
||||||
I then attached the installer ISO to my Windows VM and ran through the installation from there.
|
I then attached the installer ISO to my Windows VM and ran through the installation from there.
|
||||||
![vRealize Easy Installer](42n3aMim5.png)
|
![vRealize Easy Installer](42n3aMim5.png)
|
||||||
|
|
|
@ -89,33 +89,33 @@ So far, vRA has been automatically placing VMs on networks based solely on [whi
|
||||||
|
|
||||||
As a quick recap, I've got five networks available for vRA, split across my two sites using tags:
|
As a quick recap, I've got five networks available for vRA, split across my two sites using tags:
|
||||||
|
|
||||||
|Name |Subnet |Site |Tags |
|
| Name | Subnet | Site | Tags |
|
||||||
| --- | --- | --- | --- |
|
|-----------------|----------------|------|-----------|
|
||||||
| d1620-Servers-1 | 172.16.20.0/24 | BOW | `net:bow` |
|
| d1620-Servers-1 | 172.16.20.0/24 | BOW | `net:bow` |
|
||||||
| d1630-Servers-2 | 172.16.30.0/24 | BOW | `net:bow` |
|
| d1630-Servers-2 | 172.16.30.0/24 | BOW | `net:bow` |
|
||||||
| d1640-Servers-3 | 172.16.40.0/24 | BOW | `net:bow` |
|
| d1640-Servers-3 | 172.16.40.0/24 | BOW | `net:bow` |
|
||||||
| d1650-Servers-4 | 172.16.50.0/24 | DRE | `net:dre` |
|
| d1650-Servers-4 | 172.16.50.0/24 | DRE | `net:dre` |
|
||||||
| d1660-Servers-5 | 172.16.60.0/24 | DRE | `net:dre` |
|
| d1660-Servers-5 | 172.16.60.0/24 | DRE | `net:dre` |
|
||||||
|
|
||||||
I'm going to add additional tags to these networks to further define their purpose.
|
I'm going to add additional tags to these networks to further define their purpose.
|
||||||
|
|
||||||
|Name |Purpose |Tags |
|
| Name | Purpose | Tags |
|
||||||
| --- | --- | --- |
|
|-----------------|------------|------------------------|
|
||||||
| d1620-Servers-1 |Management | `net:bow`, `net:mgmt` |
|
| d1620-Servers-1 | Management | `net:bow`, `net:mgmt` |
|
||||||
| d1630-Servers-2 | Front-end | `net:bow`, `net:front` |
|
| d1630-Servers-2 | Front-end | `net:bow`, `net:front` |
|
||||||
| d1640-Servers-3 | Back-end | `net:bow`, `net:back` |
|
| d1640-Servers-3 | Back-end | `net:bow`, `net:back` |
|
||||||
| d1650-Servers-4 | Front-end | `net:dre`, `net:front` |
|
| d1650-Servers-4 | Front-end | `net:dre`, `net:front` |
|
||||||
| d1660-Servers-5 | Back-end | `net:dre`, `net:back` |
|
| d1660-Servers-5 | Back-end | `net:dre`, `net:back` |
|
||||||
|
|
||||||
I *could* just use those tags to let users pick the appropriate network, but I've found that a lot of times users don't know why they're picking a certain network, they just know the IP range they need to use. So I'll take it a step further and add a giant tag to include the Site, Purpose, and Subnet, and this is what will ultimately be presented to the users:
|
I *could* just use those tags to let users pick the appropriate network, but I've found that a lot of times users don't know why they're picking a certain network, they just know the IP range they need to use. So I'll take it a step further and add a giant tag to include the Site, Purpose, and Subnet, and this is what will ultimately be presented to the users:
|
||||||
|
|
||||||
|Name |Tags |
|
| Name | Tags |
|
||||||
| --- | --- |
|
|-----------------|-----------------------------------------------------|
|
||||||
| d1620-Servers-1 | `net:bow`, `net:mgmt`, `net:bow-mgmt-172.16.20.0` |
|
| d1620-Servers-1 | `net:bow`, `net:mgmt`, `net:bow-mgmt-172.16.20.0` |
|
||||||
| d1630-Servers-2 | `net:bow`, `net:front`, `net:bow-front-172.16.30.0` |
|
| d1630-Servers-2 | `net:bow`, `net:front`, `net:bow-front-172.16.30.0` |
|
||||||
| d1640-Servers-3 | `net:bow`, `net:back`, `net:bow-back-172.16.40.0` |
|
| d1640-Servers-3 | `net:bow`, `net:back`, `net:bow-back-172.16.40.0` |
|
||||||
| d1650-Servers-4 | `net:dre`, `net:front`, `net:dre-front-172.16.50.0` |
|
| d1650-Servers-4 | `net:dre`, `net:front`, `net:dre-front-172.16.50.0` |
|
||||||
| d1660-Servers-5 | `net:dre`, `net:back`, `net:dre-back-172.16.60.0` |
|
| d1660-Servers-5 | `net:dre`, `net:back`, `net:dre-back-172.16.60.0` |
|
||||||
|
|
||||||
![Tagged networks](J_RG9JNPz.png)
|
![Tagged networks](J_RG9JNPz.png)
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ showReadTime = false
|
||||||
timeless = true
|
timeless = true
|
||||||
title = "SimpleX Chat"
|
title = "SimpleX Chat"
|
||||||
+++
|
+++
|
||||||
*You can **[contact me on SimpleX Chat](https://l.runtimeterror.dev/simplex-chat-invite)** by clicking that link or scanning the QR code below.*
|
> You can [contact me on SimpleX Chat](https://l.runtimeterror.dev/simplex-chat-invite) by clicking that link or scanning the QR code below.
|
||||||
|
|
||||||
![QR code](/images/simplex-invite.png)
|
![QR code](/images/simplex-invite.png)
|
||||||
|
|
||||||
|
@ -28,9 +28,9 @@ Just add these in the SimpleX app at **Settings > Network & servers > SMP server
|
||||||
|
|
||||||
`smp://kYx5LmVD9FMM8hJN4BQqL4WmeUNZn8ipXsX2UkBoiHE=@smp.vpota.to`
|
`smp://kYx5LmVD9FMM8hJN4BQqL4WmeUNZn8ipXsX2UkBoiHE=@smp.vpota.to`
|
||||||
|
|
||||||
| | |
|
[![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp-vpota-to-5223)
|
||||||
| --- | --- |
|
|
||||||
| [![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp-vpota-to-5223) | [[netdata](https://l.runtimeterror.dev/smp_status)] |
|
[netdata](https://l.runtimeterror.dev/smp_status)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -38,9 +38,9 @@ Just add these in the SimpleX app at **Settings > Network & servers > SMP server
|
||||||
|
|
||||||
`smp://TbUrGydawdVKID0Lvix14UkaN-WarFgqXx4kaEG8Trw=@smp1.vpota.to`
|
`smp://TbUrGydawdVKID0Lvix14UkaN-WarFgqXx4kaEG8Trw=@smp1.vpota.to`
|
||||||
|
|
||||||
| | |
|
[![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp1-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp1-vpota-to-5223)
|
||||||
| --- | --- |
|
|
||||||
| [![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp1-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp1-vpota-to-5223) | [[netdata](https://l.runtimeterror.dev/smp1_status)] |
|
[netdata](https://l.runtimeterror.dev/smp1_status)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -48,6 +48,6 @@ Just add these in the SimpleX app at **Settings > Network & servers > SMP server
|
||||||
|
|
||||||
`smp://tNfQisxTQ9MhKpFDTbx9RnjgWigtxF1a26jroy5-rR4=@smp2.vpota.to`
|
`smp://tNfQisxTQ9MhKpFDTbx9RnjgWigtxF1a26jroy5-rR4=@smp2.vpota.to`
|
||||||
|
|
||||||
| | |
|
[![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp2-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp2-vpota-to-5223)
|
||||||
| --- | --- |
|
|
||||||
| [![Uptime](https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fjbowdre%2Fupptime%2Fmaster%2Fapi%2Fsmp2-vpota-to-5223%2Fuptime.json)](https://status.runtimeterror.dev/history/smp2-vpota-to-5223) | [[netdata](https://l.runtimeterror.dev/smp2_status)] |
|
[netdata](https://l.runtimeterror.dev/smp2_status)
|
||||||
|
|
27
flake.lock
Normal file
27
flake.lock
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1708807242,
|
||||||
|
"narHash": "sha256-sRTRkhMD4delO/hPxxi+XwLqPn8BuUq6nnj4JqLwOu0=",
|
||||||
|
"owner": "nixos",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "73de017ef2d18a04ac4bfd0c02650007ccb31c2a",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nixos",
|
||||||
|
"ref": "nixos-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
22
flake.nix
Normal file
22
flake.nix
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
{
|
||||||
|
description = "runtimeterror build environment";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs }:
|
||||||
|
let
|
||||||
|
pkgs = import nixpkgs { system = "x86_64-linux"; };
|
||||||
|
in
|
||||||
|
{
|
||||||
|
devShells.x86_64-linux.default = pkgs.mkShell {
|
||||||
|
packages = with pkgs; [
|
||||||
|
agate
|
||||||
|
go
|
||||||
|
hugo
|
||||||
|
nodePackages.npm
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
4
gem-build.sh
Executable file
4
gem-build.sh
Executable file
|
@ -0,0 +1,4 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Quick script to serve gemini locally
|
||||||
|
hugo --environment local -D
|
||||||
|
agate --content public --hostname localhost
|
12
layouts/_default/index.gmi
Normal file
12
layouts/_default/index.gmi
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
# [runtimeterror $]
|
||||||
|
=> /about Adventures in self-hosting and other technological frustrations.
|
||||||
|
{{ $pages := .Pages -}}
|
||||||
|
{{ $pages = where site.RegularPages "Type" "in" site.Params.mainSections -}}
|
||||||
|
|
||||||
|
### Posts
|
||||||
|
{{ range $pages }}
|
||||||
|
=> {{ .RelPermalink }} {{ .Date.Format "2006-01-02" }} {{ .Title }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
---
|
||||||
|
=> https://runtimeterror.dev This site on the big web
|
10
layouts/_default/list.gmi
Normal file
10
layouts/_default/list.gmi
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
=> / 💻 [runtimeterror $]
|
||||||
|
# {{ .Title }}
|
||||||
|
|
||||||
|
{{- range .Pages }}
|
||||||
|
=> {{ .RelPermalink }} {{ .Date.Format "2006-01-02" }} {{ .Title }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
---
|
||||||
|
=> / Home
|
||||||
|
=> https://runtimeterror.dev{{ replace (replace .RelPermalink "/gemini" "" 1) "index.gmi" "" }} This page on the big web
|
57
layouts/_default/single.gmi
Normal file
57
layouts/_default/single.gmi
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
{{- $scratch := newScratch -}}{{- $scratch.Set "ref" 1 -}}
|
||||||
|
=> / 💻 [runtimeterror $]
|
||||||
|
{{ if .Params.Date }}
|
||||||
|
{{- $postDate := .Date.Format "2006-01-02" }}
|
||||||
|
{{- $updateDate := .Lastmod.Format "2006-01-02" }}
|
||||||
|
{{- $postDate }}{{ if ne $postDate $updateDate }} ~ {{ $updateDate }}{{ end }}
|
||||||
|
{{- end }}
|
||||||
|
# {{ .Title }}
|
||||||
|
{{/* The bulk of this regex magic was inspired by https://brainbaking.com/post/2021/04/using-hugo-to-launch-a-gemini-capsule/ */}}
|
||||||
|
{{ range $content := split .RawContent "\n\n" }}
|
||||||
|
{{- $blockRef := $scratch.Get "ref" -}}
|
||||||
|
{{- $content := $content | replaceRE `#{4,} ` "### " -}}{{/* reduce headings to a max of 3 levels */}}
|
||||||
|
{{- $content := $content | replaceRE `(?m:^- (.+?)$)` "\n* $1" -}}{{/* convert unordered lists */}}
|
||||||
|
{{- $content := $content | replaceRE `(?m:^(?:\d+). (.+?)$)` "* $1" -}}{{/* convert ordered lists */}}
|
||||||
|
{{- $content := $content | replaceRE `\n?\[\^(.+?)\]:\s*.*` "" -}}{{/* remove footnote definitions */}}
|
||||||
|
{{- $content := $content | replaceRE `\[\^(.+?)\]` "" -}}{{/* remove footnote anchors */}}
|
||||||
|
{{- $content := $content | replaceRE `((?m:^(?:\|.*\|)+\n?)+)` "```\n$1\n```\n" -}}{{/* render markdown tables as plaintext ascii */}}
|
||||||
|
{{- $content := $content | replaceRE "(?m:^`([^`]*)`$)" "```\n$1\n```\n" -}}{{/* convert single-line inline code to blocks */}}
|
||||||
|
{{- $content := $content | replaceRE `\{\{%\snotice.*%\}\}` "<-- note -->" -}}{{/* convert hugo notices */}}
|
||||||
|
{{- $content := $content | replaceRE `\{\{%\s/notice.*%\}\}` "<-- /note -->" -}}
|
||||||
|
{{- $content := $content | replaceRE `((\/\/)|#)\s*torchlight!.*\n` "" -}}{{/* remove torchlight markup */}}
|
||||||
|
{{- $content := $content | replaceRE `(?:(?:<!--)|(?:#)|(?:\/\/))*\s*\[tl!.*\].*` "" -}}
|
||||||
|
{{- $content := $content | replaceRE `(?m:^\[!\[(.*)\]\(.*\)\]\((.*)\)$)` "=> $2 $1" -}}{{/* remove images from uptime links */}}
|
||||||
|
{{- $content := $content | replaceRE `(?m:^\s*(?:(?:\*|\-)\s+)?\[(.*)\]\((.*)\)$)` "=> $2 $1" -}}{{/* convert links already on own line */}}
|
||||||
|
{{- $content := $content | replaceRE `(?m:^!\[(.*)\]\((.+?)\)$)` "=> $2 Image: $1" -}}{{/* convert embedded images */}}
|
||||||
|
{{- $links := findRE `\[.+?\]\(.+?\)` $content -}}
|
||||||
|
{{- $scratch.Set "content" $content -}}
|
||||||
|
{{- range $links -}}
|
||||||
|
{{- $ref := $scratch.Get "ref" -}}
|
||||||
|
{{- $contentInLoop := $scratch.Get "content" -}}
|
||||||
|
{{- $url := (printf "%s #%d" . $ref) -}}
|
||||||
|
{{- $contentInLoop := replace $contentInLoop . $url -}}
|
||||||
|
{{- $scratch.Set "content" $contentInLoop -}}
|
||||||
|
{{- $scratch.Set "ref" (add $ref 1) -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- $content := $scratch.Get "content" | replaceRE `\[(.+?)\]\((.+?)\) #(\d+)` "$1 [$3]" }}
|
||||||
|
{{- $content | safeHTML }}
|
||||||
|
{{- range $links -}}
|
||||||
|
{{- $ref := $scratch.Get "ref" -}}
|
||||||
|
{{- $url := (printf "%s #%d" . $blockRef) }}
|
||||||
|
=> {{ $url | replaceRE `\[(.+?)\]\((.+?)\) #(\d+)` "$2 [$3] $1" }}
|
||||||
|
{{- $blockRef = add $blockRef 1 -}}
|
||||||
|
{{ end }}
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
---
|
||||||
|
{{ $subject := printf "Re: %s" .Title -}}
|
||||||
|
=> mailto:blog@runtimeterror.dev?subject={{ urlquery $subject | replaceRE `\+` "%20" }} 📧 Reply via email
|
||||||
|
{{ $related := first 3 (where (where .Site.RegularPages.ByDate.Reverse ".Params.tags" "intersect" .Params.tags) "Permalink" "!=" .Permalink) }}
|
||||||
|
{{ if $related }}
|
||||||
|
## Related articles
|
||||||
|
{{ range $related }}
|
||||||
|
=> {{ replace .RelPermalink "/gemini" "" 1}} {{ .Title }}{{ end }}{{ end }}
|
||||||
|
---
|
||||||
|
|
||||||
|
=> / Home
|
||||||
|
=> https://runtimeterror.dev{{ replace (replace .RelPermalink "/gemini" "" 1) "index.gmi" "" }} This page on the big web
|
Loading…
Reference in a new issue