Compare commits
51 commits
849114f3ea
...
bef8bb928a
Author | SHA1 | Date | |
---|---|---|---|
bef8bb928a | |||
d25a0d9bce | |||
dcc98e9dcd | |||
0bb6d35bd5 | |||
662ffec95b | |||
502a1cba44 | |||
d00045e3a7 | |||
85d0e3070e | |||
62dd04f7e8 | |||
f9a7c93897 | |||
0a2795e74d | |||
3536f61fc9 | |||
f42fb4d223 | |||
e7763e0932 | |||
dbd744b16e | |||
be65101f0f | |||
20b3ce4635 | |||
9703b65ac6 | |||
45da3eeee0 | |||
521c6ac15d | |||
8c0ce91997 | |||
20478c4136 | |||
a11cabb5d6 | |||
e2aadc408c | |||
db81899195 | |||
b0d03d86a6 | |||
095ff60fed | |||
7a0f61709f | |||
d034917e46 | |||
c252af3e86 | |||
65ad77ca98 | |||
66ced7a369 | |||
be65cac0ee | |||
5e3cf1dc8c | |||
f3fbcad44d | |||
9b8835a8e8 | |||
d1bd0fd61e | |||
e6062166b9 | |||
2fe2d81979 | |||
12f48593cd | |||
be3a6a86a9 | |||
517d908716 | |||
11ae484114 | |||
273c535e6d | |||
0d0832cab6 | |||
51e99dddcc | |||
745bf1e0ab | |||
6ee422259b | |||
2499f34bfa | |||
eb3ed30398 | |||
172a5eff28 |
|
@ -7,7 +7,7 @@ description: "This is a new post about..."
|
|||
featured: false
|
||||
toc: true
|
||||
comment: true
|
||||
series: Tips # Projects, Scripts
|
||||
series: Tips # Projects, Code
|
||||
tags:
|
||||
- 3dprinting
|
||||
- activedirectory
|
||||
|
|
|
@ -2,7 +2,7 @@ baseURL = "https://runtimeterror.dev"
|
|||
theme = "risotto"
|
||||
title = "runtimeterror"
|
||||
copyright = "© 2018-2023 [runtimeterror](https://runtimeterror.dev)"
|
||||
paginate = 3
|
||||
paginate = 10
|
||||
languageCode = "en"
|
||||
DefaultContentLanguage = "en"
|
||||
enableInlineShortcodes = true
|
||||
|
@ -11,7 +11,16 @@ enableInlineShortcodes = true
|
|||
# sectionPagesMenu = "main"
|
||||
|
||||
[outputs]
|
||||
home = ["HTML", "RSS", "JSON"]
|
||||
home = ['html', 'rss', 'json']
|
||||
section = ['html']
|
||||
taxonomy = ['html',]
|
||||
term = ['html', 'rss']
|
||||
|
||||
# rename rss output from index.xml to feed.xml
|
||||
[outputFormats]
|
||||
[outputFormats.rss]
|
||||
mediatype = "application/rss"
|
||||
baseName = "feed"
|
||||
|
||||
[permalinks]
|
||||
posts = ":filename"
|
||||
|
@ -41,6 +50,9 @@ enableInlineShortcodes = true
|
|||
[services.instagram]
|
||||
disableInlineCSS = true
|
||||
|
||||
[services.rss]
|
||||
limit = 20
|
||||
|
||||
[services.twitter]
|
||||
disableInlineCSS = true
|
||||
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
weight = 1
|
||||
|
||||
[[main]]
|
||||
identifier = "scripts"
|
||||
name = "scripts"
|
||||
url = "/series/scripts/"
|
||||
identifier = "code"
|
||||
name = "code"
|
||||
url = "/series/code/"
|
||||
weight = 1
|
||||
|
||||
[[main]]
|
||||
|
|
|
@ -2,10 +2,9 @@ noindex = false
|
|||
usePageBundles = true
|
||||
description = "while (true) { bugs++; }"
|
||||
mainSections = ["posts"]
|
||||
fallBackOgImage = "images/broken-computer.svg"
|
||||
fallBackOgImage = "images/broken-computer.png"
|
||||
numberOfFeaturedPosts = 5
|
||||
numberOfRelatedPosts = 5
|
||||
author = "jbowdre"
|
||||
|
||||
indexTitle = ".-. ..- -. - .. -- . - . .-. .-. --- .-."
|
||||
|
||||
|
@ -15,9 +14,16 @@ utterancesRepo = "jbowdre/site-comments"
|
|||
utterancesIssueTerm = "og:title"
|
||||
utterancesTheme = "gruvbox-dark"
|
||||
|
||||
analytics = true
|
||||
|
||||
[theme]
|
||||
palette = "runtimeterror"
|
||||
|
||||
[author]
|
||||
name = "John Bowdre"
|
||||
email = "jbowdre@omg.lol"
|
||||
username = "jbowdre"
|
||||
|
||||
# Sidebar: about/bio
|
||||
[about]
|
||||
title = "runtimeterror"
|
||||
|
@ -125,15 +131,30 @@ icon = "fa-solid fa-circle-user"
|
|||
title = "CounterSocial"
|
||||
url = "https://counter.social/@john_b"
|
||||
|
||||
[[socialLinks]]
|
||||
icon = "fa fa-mastodon"
|
||||
title = "Mastodon"
|
||||
url = "https://social.lol/@jbowdre"
|
||||
|
||||
[[socialLinks]]
|
||||
icon = "fa-solid fa-heart"
|
||||
title = "omg.lol"
|
||||
url = "https://jbowdre.omg.lol"
|
||||
|
||||
[[socialLinks]]
|
||||
icon = "fa-solid fa-comments"
|
||||
title = "SimpleX Chat"
|
||||
url = "https://runtimeterror.dev/simplex"
|
||||
url = "/simplex"
|
||||
|
||||
[[socialLinks]]
|
||||
icon = "fa fa-matrix-org"
|
||||
title = "Matrix"
|
||||
url = "https://matrix.to/#/@jbowdre:omg.lol"
|
||||
|
||||
[[socialLinks]]
|
||||
icon = "fa-solid fa-envelope"
|
||||
title = "Email"
|
||||
url = "mailto:ops@runtimeterror.dev"
|
||||
url = "mailto:jbowdre@omg.lol"
|
||||
|
||||
[[powerLinks]]
|
||||
title = "hugo"
|
||||
|
@ -154,3 +175,19 @@ url = "https://torchlight.dev"
|
|||
[[powerLinks]]
|
||||
title = "cabin"
|
||||
url = "https://withcabin.com/privacy/runtimeterror.dev"
|
||||
|
||||
[[verifyLinks]]
|
||||
title = "omg.lol"
|
||||
url = "https://proven.lol/cd10d3"
|
||||
|
||||
[[verifyLinks]]
|
||||
title = "CounterSocial"
|
||||
url = "https://counter.social/@john_b"
|
||||
|
||||
[[verifyLinks]]
|
||||
title = "Mastodon"
|
||||
url = "https://social.lol/@jbowdre"
|
||||
|
||||
[[verifyLinks]]
|
||||
title = "GitHub"
|
||||
url = "https://github.com/jbowdre"
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
comments = false
|
||||
analytics = false
|
|
@ -1 +1,2 @@
|
|||
comments = false
|
||||
analytics = false
|
|
@ -24,9 +24,14 @@ And in the free time I have left, I game on my Steam Deck.
|
|||
See what I've been up to on:
|
||||
- [GitHub](https://github.com/jbowdre)
|
||||
- [CounterSocial](https://counter.social/@john_b)
|
||||
- [status.lol](https://status.lol/jbowdre)
|
||||
- [/now](https://jbowdre.omg.lol/now)
|
||||
|
||||
Securely chat with me via:
|
||||
Connect with me via:
|
||||
- [SimpleX Chat](/simplex/)
|
||||
- [Matrix](https://matrix.to/#/@jbowdre:omg.lol)
|
||||
- [Electronic Mail](mailto:jbowdre@omg.lol)
|
||||
- [PGP: 613F B70C 4FA7 A077](https://home.omg.lol/keychain/jbowdre/pgp)
|
||||
|
||||
|
||||
[^1]: Congrats? And also, *thank you.*
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
date: 2019-05-28
|
||||
type: section
|
||||
layout: "archives"
|
||||
---
|
|
@ -1,5 +1,4 @@
|
|||
+++
|
||||
aliases = ["all_posts", "articles"]
|
||||
title = "Index of Posts"
|
||||
tags = ["index"]
|
||||
+++
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
series: Scripts
|
||||
series: Code
|
||||
date: "2021-04-29T08:34:30Z"
|
||||
usePageBundles: true
|
||||
thumbnail: 20210723-script.png
|
||||
|
|
After Width: | Height: | Size: 22 KiB |
After Width: | Height: | Size: 617 KiB |
After Width: | Height: | Size: 347 KiB |
|
@ -0,0 +1,539 @@
|
|||
---
|
||||
title: "Automating Security Camera Notifications With Home Assistant and Ntfy"
|
||||
date: 2023-11-25
|
||||
lastmod: 2023-11-27
|
||||
description: "Using the power of Home Assistant automations and Ntfy push notifications to level-up security camera motion detections."
|
||||
featured: true
|
||||
alias: automating-security-camera-notifications-with-home-assistant-and-ntfy
|
||||
toc: true
|
||||
comment: true
|
||||
thumbnail: thumbnail.png
|
||||
series: Projects
|
||||
tags:
|
||||
- api
|
||||
- automation
|
||||
- homeassistant
|
||||
---
|
||||
A couple of months ago, I [wrote about](/easy-push-notifications-with-ntfy) how I was using a self-hosted instance of [ntfy](https://ntfy.sh) to help streamline notification pushes from a variety of sources. I closed that post with a quick look at how I had [integrated ntfy into my Home Assistant setup](/easy-push-notifications-with-ntfy/#home-assistant) for some basic notifications.
|
||||
|
||||
I've now used that immense power to enhance the notifications I get from the [Reolink security cameras](https://reolink.com/us/product/rlk16-820d8-a/) scattered around my house. I selected Reolink cameras specifically because I knew it was supported by Home Assistant, and for the on-device animal/person/vehicle detection which allowed for a bit of extra control over which types of motion events would trigger a notification or other action. I've been very happy with this choice, but I have found that the Reolink app itself can be a bit clunky:
|
||||
|
||||
- The app lets you send notifications on a schedule (I only want notifications from the indoor cameras during work hours when no one is home), but doesn't make it easy to override that schedule (like when it's a holiday and we're all at home anyway).
|
||||
- Push notifications don't include an image capture so when I receive a notification about a person in my backyard I have to open the app, go select the correct camera, select the Playback option, and scrub back and forth until I see whatever my camera saw.
|
||||
|
||||
I figured I could combine the excellent [Reolink integration for Home Assistant](https://www.home-assistant.io/integrations/reolink) with Home Assistant's powerful Automation platform and ntfy to get more informative notifications and more flexible alert schedules. Here's the route I took.
|
||||
|
||||
### Alert on motion detection
|
||||
{{% notice note "Ntfy Integration" %}}
|
||||
Since manually configuring ntfy in Home Assistant via the [RESTful Notifications integration](easy-push-notifications-with-ntfy/#notify-configuration), I found that a [ntfy-specific integration](https://github.com/ivanmihov/homeassistant-ntfy.sh) was available through the [Home Assistant Community Store](https://hacs.xyz/) addon. That setup is a bit more flexible so I've switched my setup to use it instead:
|
||||
```yaml
|
||||
# configuration.yaml
|
||||
notify:
|
||||
- name: ntfy
|
||||
platform: rest # [tl! --:8 collapse:8]
|
||||
method: POST_JSON
|
||||
headers:
|
||||
Authorization: !secret ntfy_token
|
||||
data:
|
||||
topic: home_assistant
|
||||
title_param_name: title
|
||||
message_param_name: message
|
||||
resource: ! secret ntfy_url
|
||||
platform: ntfy # [tl! ++:3]
|
||||
url: !secret ntfy_url
|
||||
token: !secret ntfy_token
|
||||
topic: home_assistant
|
||||
```
|
||||
{{% /notice %}}
|
||||
|
||||
The Reolink integration exposes a number of entities for each camera. For triggering a notification on motion detection, I'll be interested in the [binary sensor](https://www.home-assistant.io/integrations/binary_sensor/) entities named like `binary_sensor.$location_$type` (like `binary_sensor.backyard_person` and `binary_sensor.driveway_vehicle`), the state of which will transition from `off` to `on` when the selected motion type is detected.
|
||||
|
||||
So I'll begin by crafting a simple automation which will push out a notification whenever any of the listed cameras detect a person or vehicle[^vehicle]:
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# exterior_motion.yaml
|
||||
alias: Exterior Motion Alerts
|
||||
description: ""
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id:
|
||||
- binary_sensor.backyard_person
|
||||
- binary_sensor.driveway_person
|
||||
- binary_sensor.driveway_vehicle
|
||||
- binary_sensor.east_side_front_person
|
||||
- binary_sensor.east_side_rear_person
|
||||
- binary_sensor.west_side_person
|
||||
from: "off"
|
||||
to: "on"
|
||||
condition: []
|
||||
action:
|
||||
- service: notify.ntfy
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}"
|
||||
```
|
||||
|
||||
[^vehicle]: Hopefully I only need to worry about vehicles in the driveway. _Please don't drive through my backyard, thanks._
|
||||
|
||||
{{% notice tip "Templating" %}}
|
||||
That last line is taking advantage of Jinja templating and [trigger variables](https://www.home-assistant.io/docs/automation/templating/#state) so that the resulting notification displays the friendly name of whichever `binary_sensor` triggered the automation run. This way, I'll see something like "Backyard Person" instead of the entity ID listed earlier.
|
||||
{{% /notice %}}
|
||||
|
||||
I'll step outside and see if it works...
|
||||
![backyard person](backyard_person.png)
|
||||
|
||||
### Capture a snapshot
|
||||
Each Reolink camera also exposes a `camera.$location_sub` entity which represents the video stream from the connected camera. I can add another action to the notification so that it will grab a snapshot, but I'll also need a way to match the `camera` entity to the correct `binary_sensor` entity. I can do that by adding a variable set to the bottom of the automation:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# exterior_motion.yaml [tl! focus]
|
||||
alias: Exterior Motion Alerts
|
||||
description: ""
|
||||
trigger: # [tl! collapse:start]
|
||||
- platform: state
|
||||
entity_id:
|
||||
- binary_sensor.backyard_person
|
||||
- binary_sensor.driveway_person
|
||||
- binary_sensor.driveway_vehicle
|
||||
- binary_sensor.east_side_front_person
|
||||
- binary_sensor.east_side_rear_person
|
||||
- binary_sensor.west_side_person
|
||||
from: "off"
|
||||
to: "on" # [tl! collapse:end]
|
||||
condition: []
|
||||
action:
|
||||
- service: camera.snapshot # [tl! ++:start focus:start]
|
||||
target:
|
||||
entity_id: "{{ cameras[trigger.to_state.entity_id] }}"
|
||||
data:
|
||||
filename: /media/snaps/motion.jpg # [tl! ++:end focus:end]
|
||||
- service: notify.ntfy
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}"
|
||||
variables: # [tl! ++:start focus:start]
|
||||
cameras:
|
||||
binary_sensor.backyard_person: camera.backyard_sub
|
||||
binary_sensor.driveway_person: camera.driveway_sub
|
||||
binary_sensor.driveway_vehicle: camera.driveway_sub
|
||||
binary_sensor.east_side_front_person: camera.east_side_front_sub
|
||||
binary_sensor.east_side_rear_person: camera.east_side_rear_sub
|
||||
binary_sensor.west_side_person: camera.west_side_sub # [tl! ++:end focus:end]
|
||||
```
|
||||
|
||||
That `"{{ cameras[trigger.to_state.entity_id] }}"` template will look up the ID of the triggering `binary_sensor` and return the appropriate `camera` entity, and that will use the [`camera.snapshot` service](https://www.home-assistant.io/integrations/camera/#service-snapshot) to save a snapshot to the desginated location (`/media/snaps/motion.jpg`).
|
||||
|
||||
Before this will actually work, though, I need to reconfigure Home Assistant to allow access to the storage location, and I should also go ahead and pre-create the folder so there aren't any access issues.
|
||||
|
||||
```yaml
|
||||
# configuration.yaml
|
||||
homeassistant:
|
||||
allowlist_external_dirs:
|
||||
- "/media/snaps/"
|
||||
```
|
||||
|
||||
I'm using the [Home Assistant Operating System virtual appliance](https://www.home-assistant.io/installation/alternative#install-home-assistant-operating-system), so `/media` is already symlinked to `/root/media` inside the Home Assistant installation directory. So I'll just log into that shell and create the `snaps` subdirectory:
|
||||
```shell
|
||||
mkdir -p /media/snaps # [tl! .cmd_root]
|
||||
```
|
||||
|
||||
Rather than walking outside each time I want to test this, I'll just use the Home Assistant Developer Tools to manually toggle the state of the `binary_sensor.backyard_person` entity to `on`, and I should then be able to see the snapshot in the Media interface:
|
||||
![backyard snap](backyard_snap.png)
|
||||
|
||||
Woo, look at me making progress!
|
||||
|
||||
### Attach the snapshot
|
||||
Now that I've captured the snap, I need to figure out how to attach it to the notification. Ntfy [supports inline image attachments](https://docs.ntfy.sh/publish/#attach-local-file), which is handy, but it expects those to be delivered via HTTP `PUT` action. Neither my original HTTP `POST` approach or the Ntfy integration support this currently, so I had to use the [`shell_command` integration](https://www.home-assistant.io/integrations/shell_command/) to make the call directly.
|
||||
|
||||
I can't use the handy `!secret` expansion inside of the shell command, though, so I'll need a workaround to avoid sticking sensitive details directly in my `configuration.yaml`. I can use a dummy sensor to hold the value, and then use the `{{ states('sensor.$sensor_name') }}` template to retrieve it.
|
||||
|
||||
So here we go:
|
||||
```yaml
|
||||
# configuration.yaml [tl! focus:start]
|
||||
|
||||
# dummy sensor to make ntfy secrets available to template engine
|
||||
template:
|
||||
- sensor:
|
||||
- name: ntfy_token
|
||||
state: !secret ntfy_token # [tl! highlight]
|
||||
- name: ntfy_url
|
||||
state: !secret ntfy_url # [tl! highlight focus:end]
|
||||
|
||||
notify:
|
||||
- name: ntfy
|
||||
platform: ntfy
|
||||
url: !secret ntfy_url
|
||||
token: !secret ntfy_token
|
||||
topic: home_assistant
|
||||
# [tl! highlight:10,1]
|
||||
shell_command: # [tl! focus:9 highlight:6,1]
|
||||
ntfy_put: >
|
||||
curl
|
||||
--header 'Title: {{ title }}'
|
||||
--header 'Priority: {{ priority }}'
|
||||
--header 'Filename: {{ filename }}'
|
||||
--header 'Authorization: Bearer {{ states('sensor.ntfy_token') }}'
|
||||
--upload-file '{{ file }}'
|
||||
--header 'Message: {{ message }}'
|
||||
--url '{{ states('sensor.ntfy_url') }}/home_assistant'
|
||||
```
|
||||
|
||||
Now I just need to replace the service call in the automation with the new `shell_command.ntfy_put` one:
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# exterior_motion.yaml # [tl! focus]
|
||||
alias: Exterior Motion Alerts
|
||||
description: ""
|
||||
trigger: # [tl! collapse:start]
|
||||
- platform: state
|
||||
entity_id:
|
||||
- binary_sensor.backyard_person
|
||||
- binary_sensor.driveway_person
|
||||
- binary_sensor.driveway_vehicle
|
||||
- binary_sensor.east_side_front_person
|
||||
- binary_sensor.east_side_rear_person
|
||||
- binary_sensor.west_side_person
|
||||
from: "off"
|
||||
to: "on" # [tl! collapse:end]
|
||||
condition: []
|
||||
action:
|
||||
- service: camera.snapshot
|
||||
target:
|
||||
entity_id: "{{ cameras[trigger.to_state.entity_id] }}"
|
||||
data:
|
||||
filename: /media/snaps/motion.jpg
|
||||
- service: notify.ntfy # [tl! --:start focus:start]
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}" # [tl! --:end]
|
||||
- service: shell_command.ntfy_put # [tl! ++:start reindex(-4)]
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}"
|
||||
file: /media/snaps/motion.jpg # [tl! ++:end focus:end]
|
||||
variables: # [tl! collapse:start]
|
||||
cameras:
|
||||
binary_sensor.backyard_person: camera.backyard_sub
|
||||
binary_sensor.driveway_person: camera.driveway_sub
|
||||
binary_sensor.driveway_vehicle: camera.driveway_sub
|
||||
binary_sensor.east_side_front_person: camera.east_side_front_sub
|
||||
binary_sensor.east_side_rear_person: camera.east_side_rear_sub
|
||||
binary_sensor.west_side_person: camera.west_side_sub # [tl! collapse:end]
|
||||
```
|
||||
|
||||
Now when I wander outside...
|
||||
![backyard_person_attached](backyard_person_attached.png)
|
||||
Well that guy seems sus - but hey, it worked!
|
||||
|
||||
### Backoff rate limit
|
||||
Of course, I'll also continue to get notified about that creeper in the backyard about every 15-20 seconds or so. That's not quite what I want. The _easy_ way to prevent an automation from firing constantly would be to [insert a `delay`](https://www.home-assistant.io/docs/scripts/#wait-for-time-to-pass-delay) action, but that would be a global delay rather than per-camera. I don't necessarily need to know every time the weirdo in the backyard moves, but I would like to know if he moves around to the side yard or driveway. So I needed something more flexible than an automation-wide delay.
|
||||
|
||||
Instead, I'll create a 5-minute [`timer`](https://www.home-assistant.io/integrations/timer/) for each camera by simply adding this to my `configuration.yaml`:
|
||||
```yaml
|
||||
# configuration.yaml
|
||||
timer:
|
||||
backyard_person:
|
||||
duration: "00:05:00"
|
||||
driveway_person:
|
||||
duration: "00:05:00"
|
||||
driveway_vehicle:
|
||||
duration: "00:05:00"
|
||||
east_front_person:
|
||||
duration: "00:05:00"
|
||||
east_rear_person:
|
||||
duration: "00:05:00"
|
||||
west_person:
|
||||
duration: "00:05:00"
|
||||
```
|
||||
|
||||
Back in the automation, I'll add a new `timers` variable set which will help to map the `binary_sensor` to the corresponding `timer` object. I can then append an action to start the timer, and a condition so that the automation will only fire if the timer for a given camera is not currently running. I'll also set the automation's `mode` to `single` (so that it will only run once at a time), and set the `max_exceeded` value to `silent` (so that multiple triggers won't raise any errors).
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# exterior_motion.yaml # [tl! focus]
|
||||
alias: Exterior Motion Alerts
|
||||
description: ""
|
||||
trigger: # [tl! collapse:start]
|
||||
- platform: state
|
||||
entity_id:
|
||||
- binary_sensor.backyard_person
|
||||
- binary_sensor.driveway_person
|
||||
- binary_sensor.driveway_vehicle
|
||||
- binary_sensor.east_side_front_person
|
||||
- binary_sensor.east_side_rear_person
|
||||
- binary_sensor.west_side_person
|
||||
from: "off"
|
||||
to: "on" # [tl! collapse:end]
|
||||
condition: [] # [tl! focus:3 --]
|
||||
condition: # [tl! ++:2 reindex(-1)]
|
||||
- condition: template
|
||||
value_template: "{{ is_state(timers[trigger.to_state.entity_id], 'idle') }}"
|
||||
action:
|
||||
- service: camera.snapshot
|
||||
target:
|
||||
entity_id: "{{ cameras[trigger.to_state.entity_id] }}"
|
||||
data:
|
||||
filename: /media/snaps/motion.jpg
|
||||
- service: notify.ntfy
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}"
|
||||
- service: shell_command.ntfy_put
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}"
|
||||
file: /media/snaps/motion.jpg
|
||||
- service: timer.start # [tl! focus:2 ++:2]
|
||||
target:
|
||||
entity_id: "{{ timers[trigger.to_state.entity_id] }}"
|
||||
mode: single # [tl! focus:1 ++:1]
|
||||
max_exceeded: silent
|
||||
variables:
|
||||
cameras: # [tl! collapse:start]
|
||||
binary_sensor.backyard_person: camera.backyard_sub
|
||||
binary_sensor.driveway_person: camera.driveway_sub
|
||||
binary_sensor.driveway_vehicle: camera.driveway_sub
|
||||
binary_sensor.east_side_front_person: camera.east_side_front_sub
|
||||
binary_sensor.east_side_rear_person: camera.east_side_rear_sub
|
||||
binary_sensor.west_side_person: camera.west_side_sub # [tl! collapse:end]
|
||||
timers: # [tl! ++:start focus:start]
|
||||
binary_sensor.backyard_person: timer.backyard_person
|
||||
binary_sensor.driveway_person: timer.driveway_person
|
||||
binary_sensor.driveway_vehicle: timer.driveway_vehicle
|
||||
binary_sensor.east_side_front_person: timer.east_front_person
|
||||
binary_sensor.east_side_rear_person: timer.east_rear_person
|
||||
binary_sensor.west_side_person: timer.west_person# [tl! ++:end focus:end]
|
||||
```
|
||||
|
||||
That pretty much takes care of my needs for exterior motion alerts, and should keep me informed if someone is poking around my house (or, more frequently, making a delivery).
|
||||
|
||||
### Managing interior alerts
|
||||
I've got a few interior cameras which I'd like to monitor too, so I'll start by just copying the exterior automation and updating the entity IDs:
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# interior_motion.yaml
|
||||
alias: Interior Motion Alerts
|
||||
description: ""
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id:
|
||||
- binary_sensor.kitchen_back_door_person
|
||||
- binary_sensor.garage_person
|
||||
- binary_sensor.garage_vehicle
|
||||
- binary_sensor.study_entryway_person
|
||||
from: "off"
|
||||
to: "on"
|
||||
condition:
|
||||
- condition: template
|
||||
value_template: "{{ is_state(timers[trigger.to_state.entity_id], 'idle') }}"
|
||||
action:
|
||||
- service: camera.snapshot
|
||||
target:
|
||||
entity_id: "{{ cameras[trigger.to_state.entity_id] }}"
|
||||
data:
|
||||
filename: /media/snaps/motion.jpg
|
||||
- service: shell_command.ntfy_put
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}"
|
||||
file: /media/snaps/motion.jpg
|
||||
- service: timer.start
|
||||
target:
|
||||
entity_id: "{{ timers[trigger.to_state.entity_id] }}"
|
||||
max_exceeded: silent
|
||||
mode: single
|
||||
variables:
|
||||
cameras:
|
||||
binary_sensor.kitchen_back_door_person: camera.kitchen_back_door_sub
|
||||
binary_sensor.study_entryway_person: camera.study_entryway_sub
|
||||
binary_sensor.garage_person: camera.garage_sub
|
||||
binary_sensor.garage_vehicle: camera.garage_sub
|
||||
timers:
|
||||
binary_sensor.kitchen_back_door_person: timer.kitchen_person
|
||||
binary_sensor.study_entryway_person: timer.study_person
|
||||
binary_sensor.garage_person: timer.garage_person
|
||||
binary_sensor.garage_vehicle: timer.garage_vehicle
|
||||
```
|
||||
|
||||
But I don't typically want to get alerted by these cameras if my wife or I are home and awake. So I'll use the [local calendar integration](https://www.home-assistant.io/integrations/local_calendar) to create a schedule for when the interior cameras should be active. Once that integration is enabled and the entity `calendar.interior_camera_schedule` created, I can navigate to the Calendar section of my Home Assistant interface to create the recurring calendar events (with the summary "On"). I'll basically be enabling notifications while we're sleeping and while we're at work, but disabling notifications while we're expected to be at home.
|
||||
|
||||
![calendar](schedule.png)
|
||||
|
||||
So then I'll just add another condition so that the automation will only fire during those calendar events:
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# interior_motion.yaml [tl! focus]
|
||||
alias: Interior Motion Alerts
|
||||
description: ""
|
||||
trigger: # [tl! collapse:start]
|
||||
- platform: state
|
||||
entity_id:
|
||||
- binary_sensor.kitchen_back_door_person
|
||||
- binary_sensor.garage_person
|
||||
- binary_sensor.garage_vehicle
|
||||
- binary_sensor.study_entryway_person
|
||||
from: "off"
|
||||
to: "on" # [tl! collapse:end]
|
||||
condition:
|
||||
- condition: template
|
||||
value_template: "{{ is_state(timers[trigger.to_state.entity_id], 'idle') }}"
|
||||
- condition: state # [tl! focus:2 ++:2]
|
||||
entity_id: calendar.interior_camera_schedule
|
||||
state: "on"
|
||||
action: # [tl! collapse:start]
|
||||
- service: camera.snapshot
|
||||
target:
|
||||
entity_id: "{{ cameras[trigger.to_state.entity_id] }}"
|
||||
data:
|
||||
filename: /media/snaps/motion.jpg
|
||||
- service: shell_command.ntfy_put
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}"
|
||||
file: /media/snaps/motion.jpg
|
||||
- service: timer.start
|
||||
target:
|
||||
entity_id: "{{ timers[trigger.to_state.entity_id] }}" # [tl! collapse:end]
|
||||
max_exceeded: silent
|
||||
mode: single
|
||||
variables: # [tl! collapse:start]
|
||||
cameras:
|
||||
binary_sensor.kitchen_back_door_person: camera.kitchen_back_door_sub
|
||||
binary_sensor.study_entryway_person: camera.study_entryway_sub
|
||||
binary_sensor.garage_person: camera.garage_sub
|
||||
binary_sensor.garage_vehicle: camera.garage_sub
|
||||
timers:
|
||||
binary_sensor.kitchen_back_door_person: timer.kitchen_person
|
||||
binary_sensor.study_entryway_person: timer.study_person
|
||||
binary_sensor.garage_person: timer.garage_person
|
||||
binary_sensor.garage_vehicle: timer.garage_vehicle # [tl! collapse:end]
|
||||
```
|
||||
|
||||
I'd also like to ensure that the interior motion alerts are also activated whenever our [Abode](https://goabode.com/) security system is armed, regardless of what time that may be. That will make the condition a little bit trickier: alerts should be pushed if the timer isn't running AND the schedule is active OR the security system is armed (in either "Home" or "Away" mode). So here's what that will look like:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# interior_motion.yaml [tl! focus]
|
||||
alias: Interior Motion Alerts
|
||||
description: ""
|
||||
trigger: # [tl! collapse:start]
|
||||
- platform: state
|
||||
entity_id:
|
||||
- binary_sensor.kitchen_back_door_person
|
||||
- binary_sensor.garage_person
|
||||
- binary_sensor.garage_vehicle
|
||||
- binary_sensor.study_entryway_person
|
||||
from: "off"
|
||||
to: "on" # [tl! collapse:end]
|
||||
condition: # [tl! focus:start]
|
||||
- condition: and # [tl! ++:1]
|
||||
conditions: # [tl! collapse:5]
|
||||
- condition: template # [tl! --:4]
|
||||
value_template: "{{ is_state(timers[trigger.to_state.entity_id], 'idle') }}"
|
||||
- condition: state
|
||||
entity_id: calendar.interior_camera_schedule
|
||||
state: "on"
|
||||
- condition: template # [tl! ++:start reindex(-5)]
|
||||
value_template: "{{ is_state(timers[trigger.to_state.entity_id], 'idle') }}"
|
||||
- condition: or
|
||||
conditions:
|
||||
- condition: state
|
||||
entity_id: calendar.interior_camera_schedule
|
||||
state: "on"
|
||||
- condition: state
|
||||
state: armed_away
|
||||
entity_id: alarm_control_panel.abode_alarm
|
||||
- condition: state
|
||||
state: armed_home
|
||||
entity_id: alarm_control_panel.abode_alarm # [tl! ++:end focus:end]
|
||||
action: # [tl! collapse:start]
|
||||
- service: camera.snapshot
|
||||
target:
|
||||
entity_id: "{{ cameras[trigger.to_state.entity_id] }}"
|
||||
data:
|
||||
filename: /media/snaps/motion.jpg
|
||||
- service: shell_command.ntfy_put
|
||||
data:
|
||||
title: Motion detected!
|
||||
message: "{{ trigger.to_state.attributes.friendly_name }}"
|
||||
file: /media/snaps/motion.jpg
|
||||
- service: timer.start
|
||||
target:
|
||||
entity_id: "{{ timers[trigger.to_state.entity_id] }}" # [tl! collapse:end]
|
||||
max_exceeded: silent
|
||||
mode: single
|
||||
variables: # [tl! collapse:start]
|
||||
cameras:
|
||||
binary_sensor.kitchen_back_door_person: camera.kitchen_back_door_sub
|
||||
binary_sensor.study_entryway_person: camera.study_entryway_sub
|
||||
binary_sensor.garage_person: camera.garage_sub
|
||||
binary_sensor.garage_vehicle: camera.garage_sub
|
||||
timers:
|
||||
binary_sensor.kitchen_back_door_person: timer.kitchen_person
|
||||
binary_sensor.study_entryway_person: timer.study_person
|
||||
binary_sensor.garage_person: timer.garage_person
|
||||
binary_sensor.garage_vehicle: timer.garage_vehicle # [tl! collapse:end]
|
||||
```
|
||||
|
||||
### Snooze or disable alerts
|
||||
We've got a lawn service that comes pretty regularly to take care of things, and I don't want to get constant alerts while they're doing things in the yard. Or maybe we stay up a little late one night and don't want to get pinged with interior alerts during that time. So I created a script to snooze all motion alerts for 30 minutes, simply by temporarily disabling the automations I just created:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# snooze_motion_alerts.yaml
|
||||
alias: Snooze Motion Alerts
|
||||
sequence:
|
||||
- service: automation.turn_off
|
||||
data:
|
||||
stop_actions: true
|
||||
target:
|
||||
entity_id:
|
||||
- automation.exterior_motion_alerts
|
||||
- automation.interior_motion_alerts
|
||||
- service: notify.ntfy
|
||||
data:
|
||||
title: Motion Snooze
|
||||
message: Camera motion alerts are disabled for 30 minutes.
|
||||
- delay:
|
||||
hours: 0
|
||||
minutes: 30
|
||||
seconds: 0
|
||||
milliseconds: 0
|
||||
- service: automation.turn_on
|
||||
data: {}
|
||||
target:
|
||||
entity_id:
|
||||
- automation.interior_motion_alerts
|
||||
- automation.exterior_motion_alerts
|
||||
- service: notify.ntfy
|
||||
data:
|
||||
title: Motion Resume
|
||||
message: Camera motion alerts are resumed.
|
||||
mode: single
|
||||
icon: mdi:alarm-snooze
|
||||
```
|
||||
|
||||
I can then add that script to the camera dashboard in Home Assistant or pin it to the home controls on my Android phone for easy access.
|
||||
|
||||
I'll also create another script for manually toggling interior alerts for when we're home at an odd time:
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# toggle_interior_alerts.yaml
|
||||
alias: Toggle Indoor Camera Alerts
|
||||
sequence:
|
||||
- service: automation.toggle
|
||||
data: {}
|
||||
target:
|
||||
entity_id: automation.interior_motion_alerts
|
||||
- service: notify.ntfy
|
||||
data:
|
||||
title: "Interior Camera Alerts "
|
||||
message: "Alerts are {{ states('automation.interior_motion_alerts') }} "
|
||||
mode: single
|
||||
icon: mdi:cctv
|
||||
```
|
||||
|
||||
### That's a wrap
|
||||
This was a fun little project which had me digging a bit deeper into Home Assistant than I had previously ventured, and I'm really happy with how things turned out. I definitely learned a ton in the process. I might explore [adding action buttons to the notifications](https://community.home-assistant.io/t/ntfy-sh-with-actions/578603) to directly snooze alerts that way, but that will have to wait a bit as I'm out of tinkering time for now.
|
||||
|
After Width: | Height: | Size: 127 KiB |
After Width: | Height: | Size: 289 KiB |
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
thumbnail: "code.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Scripts
|
||||
series: Code
|
||||
tags:
|
||||
- vmware
|
||||
- powercli
|
||||
|
|
|
@ -12,7 +12,7 @@ featured: true
|
|||
|
||||
There are a number of fantastic Windows applications for creating bootable USB drives from ISO images - but those don't work on a Chromebook. Fortunately there's an easily-available tool which will do the trick: Google's own [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) app.
|
||||
|
||||
Normally that tool is used to creating bootable media to [reinstall Chrome OS on a broken Chromebook](https://support.google.com/chromebook/answer/1080595) (hence the name) but it also has the capability to write other arbitrary images as well. So if you find yourself needing to create a USB drive for installing ESXi on a computer in your [home lab](https://twitter.com/johndotbowdre/status/1341767090945077248) (more on that soon!) here's what you'll need to do:
|
||||
Normally that tool is used to creating bootable media to [reinstall Chrome OS on a broken Chromebook](https://support.google.com/chromebook/answer/1080595) (hence the name) but it also has the capability to write other arbitrary images as well. So if you find yourself needing to create a USB drive for installing ESXi on a computer in your home lab (more on that soon!) here's what you'll need to do:
|
||||
|
||||
1. Install the [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm).
|
||||
2. Download the ISO you intend to use.
|
||||
|
|
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Tips # Projects, Scripts, vRA8, K8s on vSphere
|
||||
series: Tips # Projects, Code, vRA8, K8s on vSphere
|
||||
tags:
|
||||
- linux
|
||||
- shell
|
||||
|
|
BIN
content/posts/ditching-vsphere-for-proxmox/efi_disk.png
Normal file
After Width: | Height: | Size: 18 KiB |
170
content/posts/ditching-vsphere-for-proxmox/index.md
Normal file
|
@ -0,0 +1,170 @@
|
|||
---
|
||||
title: "I Ditched vSphere for Proxmox VE"
|
||||
date: 2023-11-24
|
||||
description: "I moved my homelab from VMware vSphere to Proxmox VE, and my only regret is that I didn't make this change sooner."
|
||||
featured: false
|
||||
toc: true
|
||||
comment: true
|
||||
series: Tips # Projects, Code
|
||||
tags:
|
||||
- homelab
|
||||
- linux
|
||||
- tailscale
|
||||
- proxmox
|
||||
- vmware
|
||||
---
|
||||
Way back in 2021, I [documented](/vmware-home-lab-on-intel-nuc-9) how I had built a VMWare-focused home lab on an Intel NUC 9 host. The setup was fairly complicated specifically so I could build and test content for what was then known as vRealize Automation. My priorities [have since shifted](/virtuallypotato-runtimeterror)[^future], though, and I no longer have need for vRA at my house. vSphere + vCenter carries a hefty amount of overhead, so I thought it might be time to switch my homelab over to something a bit simpler in the form of [Proxmox VE](https://www.proxmox.com/en/proxmox-virtual-environment/overview).
|
||||
|
||||
[^future]: And, if I'm being entirely honest, I'm a little uncertain of what the future may hold for VMware now that the [Broadcom acquisition has closed](https://www.theregister.com/2023/11/23/broadcom_vmware_reorg/).
|
||||
|
||||
I only really have the one physical host[^esxi-arm] so I knew that I wouldn't be able to do any sort of live migration. This move would require a full "production" outage as I converted the host, but I needed to ensure I could export some of my critical workloads off of vSphere and the import them into the new Proxmox VE environment. I run [Home Assistant](https://www.home-assistant.io/) in a VM and it would be _Double Plus Bad_ if I broke my smart home's brain in the process.
|
||||
|
||||
[^esxi-arm]: Sure, I've still got [ESXi-ARM running on a Quartz64 single-board computer](/esxi-arm-on-quartz64) but that doesn't *really* count.
|
||||
|
||||
It took most of a Sunday afternoon, but I eventually fumbled my way through the hypervisor transformation and got things up and running again in time for my evening HA automations to fire at sunset[^deadline]. I'm not going to detail the entire process here, but do want to highlight a few of the lessons I learned along the way. A lot of this information comes from Proxmox's [Migration of servers to Proxmox VE](https://pve.proxmox.com/wiki/Migration_of_servers_to_Proxmox_VE#VMware) doc[^hindsight].
|
||||
|
||||
[^deadline]: There's nothing like starting a project with a strict deadline!.
|
||||
[^hindsight]: I realized in hindsight that I _probably_ could have used the [Server self-migration](https://pve.proxmox.com/wiki/Migration_of_servers_to_Proxmox_VE#Server_self-migration) steps to import VMs straight off of the VMFS datastore but (1) I wasn't sure if/how that would work with a single datastore stretched across multiple disks and (2) I didn't think of it until after I had already imported from OVF.
|
||||
|
||||
### The Plan
|
||||
I've found that my most successful projects begin with a plan[^plan]:
|
||||
|
||||
1. Capture details like IPs and network configurations to try and maintain some consistency.
|
||||
2. Export my needed VMs as OVFs, and save these to external storage.
|
||||
3. Install Proxmox VE.
|
||||
4. Transfer the OVFs to the new Proxmox host, and then import them.
|
||||
5. Perform any required post-import cleanup to make things happy.
|
||||
|
||||
[^plan]: The plan usually doesn't survive for very long once the project starts, but establishing a plan is an important part of the project ritual.
|
||||
|
||||
#### Exporting VMs to OVF
|
||||
My initial plan was to just right-click the VMs in the vSphere UI and export them from there, but I'd forgotten how reliable that process seems to be. A few smaller VMs exported fine, but most would fail after a while.
|
||||
|
||||
I switched to using [VMware's `ovftool`](https://developer.vmware.com/web/tool/ovf/) to do the exports and that worked better, though I did have to fumble through until I found the right syntax:
|
||||
|
||||
```shell
|
||||
~/ovftool/ovftool --noSSLVerify \ # [tl! .cmd]
|
||||
"vi://${vsphere_username}@vsphere.local:${vsphere_password}@${vsphere_host}/${vsphere_datacenter}/vm/${vsphere_folder}/${vsphere_vm_name}" \
|
||||
"${vsphere_vm_name}.ova"
|
||||
```
|
||||
|
||||
{{% notice tip %}}
|
||||
Along the way I also learned that I could search for a VM name with `?dns=${vsphere_vm_name}` and not have to work out the complete path to it:
|
||||
```shell
|
||||
~/ovftool/ovftool --noSSLVerify \ # [tl! .cmd]
|
||||
"vi://${vsphere_username}@vsphere.local:${vsphere_password}@${vsphere_host}/?dns=${vsphere_vm_name}" \
|
||||
"${vsphere_vm_name}.ova"
|
||||
```
|
||||
{{% /notice %}}
|
||||
|
||||
#### Installing Proxmox VE
|
||||
I had been running ESXi off of small USB drive inserted in the NUC's internal USB port, and that typically worked fine since ESXi runs entirely in-memory after it's booted. Proxox VE doesn't do the ramdisk thing, and I found after an initial test install that the performance when running from USB wasn't great. I dug around in my desk drawers and miscellaneous parts bins and found the 512GB M.2 2230 NVMe drive that originally came in my Steam Deck (before I upgraded it to a [2TB drive from Framework](https://frame.work/products/western-digital-sn740-nvme-m-2-2230)). I installed that (and a [2230->2242 adapter](https://www.amazon.com/dp/B0BLJNGGVD)) in the NUC's third (and final) M.2 slot and used _that_ as Proxmox VE's system drive.
|
||||
|
||||
Performance was a lot better that way!
|
||||
|
||||
After Proxmox VE booted, I went ahead and wiped the two 1TB NVMe drives (which had been my VMFS datastore) to get them ready for use. Without the heavyweight vCenter and vRA VMs, I wouldn't be needing as much storage so I elected to add these drives to a RAID1 ZFS pool (named `zeefs`) to get a bit of extra resiliency.
|
||||
|
||||
![zeefs](zeefs.png)
|
||||
|
||||
#### Importing VMs
|
||||
Once Proxmox VE was up and (marginally) configured, I could start the process of restoring my workloads, beginning with the all-important Home Assistant VM.
|
||||
|
||||
I used `scp` to transfer it to the Proxmox VE host:
|
||||
```shell
|
||||
scp hassos.ova root@${proxmox_host}:/tmp/ #[tl! .cmd]
|
||||
```
|
||||
|
||||
On the host, I needed to first extract the OVA archive so I could get at the OVF and VMDK files inside:
|
||||
```shell
|
||||
cd /tmp # [tl! .cmd_root:2]
|
||||
tar xf hassos.ova
|
||||
```
|
||||
|
||||
I could then use the [`qm` command](https://pve.proxmox.com/pve-docs/qm.1.html) to import the OVF. The syntax is:
|
||||
```shell
|
||||
qm importovf ${vm_id} ${ovf_filename} ${vm_storage}
|
||||
```
|
||||
|
||||
I'll assign this VM ID number `100` and will deploy it on the `zeefs` storage:
|
||||
```shell
|
||||
qm importovf 100 hassos.ovf zeefs # [tl! .cmd_root]
|
||||
```
|
||||
|
||||
#### Booting imported VMs (or trying to)
|
||||
|
||||
Once the import completed, I went to the Proxmox VE UI to see how things looked. The imported VM was missing a network interface, so I added a new `virtio` one. Everything else looked okay so I hit the friendly Start button and popped to the Console view to keep an eye on things.
|
||||
|
||||
Unfortunately, the boot hung with a message about not being able to find the OS. In this case, that's because the imported VM defaulted to a traditional BIOS firmware while the installed guest OS is configured for use with UEFI firmware. That's easy enough to change:
|
||||
![uefi](uefi.png)
|
||||
|
||||
Though it does warn that I'll need to manually add an EFI disk for storing the configuration, so I'll do that as well:
|
||||
![efi_disk](efi_disk.png)
|
||||
|
||||
That allowed me to boot the VM successfully, and my Home Assistant system came online in its new home without much more fuss. I could then move forward with importing the remaining workloads. Some needed to have their network settings reconfigured, while some preserved them (I guess it depends on how the guest was addressing the NIC).
|
||||
|
||||
In hindsight, I should have also recorded information about the firmware configuration of my VMs. Some used BIOS, some used EUFI, and I didn't really know which way a particular VM was going to lean until I tried booting it with the default `SeaBIOS`.
|
||||
|
||||
### Tailscale
|
||||
I'm a big fan of [Tailscale](https://tailscale.com/), and have been using it to make [secure networking simple](/secure-networking-made-simple-with-tailscale) for a little while now. Naturally, I wanted to see how I could leverage it with this new setup.
|
||||
|
||||
#### On the host
|
||||
While ESXi is a locked-down hypervisor which runs off a ramdisk (making installing arbitrary software kind of tricky/messy/dangerous), Proxmox VE is a customized Debian install with a bunch of VM-management tools built in. This means (among other things) that I can [easily install Tailscale](https://tailscale.com/kb/1133/proxmox/) and use that for securely accessing my Proxmox VE server remotely.
|
||||
|
||||
Installing Tailscale on a Proxmox VE host is basically the same as on any other Debian-based Linux OS:
|
||||
```shell
|
||||
curl -fsSL https://tailscale.com/install.sh | sh # [tl! .cmd_root]
|
||||
```
|
||||
|
||||
I can then use `tailscale up` to start the process of logging in to my Tailscale account, and I throw in the `--ssh` flag to configure [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh/):
|
||||
```shell
|
||||
tailscale up --ssh # [tl! .cmd_root]
|
||||
```
|
||||
|
||||
Once I'm logged in, I'll also use [Tailscale Serve](https://tailscale.com/kb/1312/serve/) as a reverse-proxy for Proxmox VE's web management interface:
|
||||
```shell
|
||||
tailscale serve --bg https+insecure://localhost:8006 # [tl! .cmd_root]
|
||||
```
|
||||
|
||||
That takes a few minutes for the MagicDNS record and automatically-provisioned TLS certificate to go live, but I can then access my environment by going to `https://prox1.my-tailnet.ts.net/`, and my browser won't throw any warnings about untrusted certs. Very cool.
|
||||
|
||||
#### In LXC containers
|
||||
One of the other slick features of Proxmox VE is the ability to run [lightweight LXC system containers](https://pve.proxmox.com/wiki/Linux_Container) right on the hypervisor. These are unprivileged containers by default, though, so attempting to bring Tailscale online fails:
|
||||
```shell
|
||||
tailscale up # [tl! .cmd_root focus:3]
|
||||
failed to connect to local tailscaled; it doesn’t appear to be running (sudo systemctl start tailscaled ?) # [tl! .nocopy]
|
||||
systemctl start tailscaled # [tl! .cmd_root:1]
|
||||
systemctl status tailscaled
|
||||
x tailscaled.service - Tailscale node agent # [tl! .nocopy:2]
|
||||
Loaded: loaded (/lib/systemd/system/tailscaled.service; enabled; vendor preset: enabled)
|
||||
Active: failed (Result: exit-code) since Fri 2023-11-24 20:36:25 UTC; 463ms ago # [tl! focus highlight]
|
||||
```
|
||||
|
||||
Fortunately, [Tailscale has a doc](https://tailscale.com/kb/1130/lxc-unprivileged/) for working with unprivileged LXC containers. I just need to edit the CT's config file on the proxmox host:
|
||||
```shell
|
||||
vim /etc/pve/lxc/${ID}.conf # [tl! .cmd_root]
|
||||
```
|
||||
|
||||
And add these two lines at the bottom:
|
||||
```ini
|
||||
# torchlight! {"lineNumbers": true}
|
||||
arch: amd64
|
||||
cores: 2
|
||||
features: nesting=1
|
||||
hostname: gitlab
|
||||
memory: 2048
|
||||
nameserver: 192.168.1.1
|
||||
net0: name=eth0,bridge=vmbr0,firewall=1,hwaddr=B6:DE:F0:8A:5C:5C,ip=dhcp,type=veth
|
||||
ostype: debian
|
||||
rootfs: zeefs:subvol-104-disk-0,size=60G
|
||||
swap: 512
|
||||
unprivileged: 1
|
||||
lxc.cgroup2.devices.allow: c 10:200 rwm # [tl! focus:1 ++:1]
|
||||
lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file
|
||||
```
|
||||
|
||||
After stopping and restarting the container, Tailscale works just brilliantly!
|
||||
|
||||
### Closing thoughts
|
||||
There were a few hiccups along the way, but I'm overall very happy with both the decision to ditch ESXi as well as how the migration to Proxmox VE went. I've been focused on VMware for so long, and hadn't really kept up with the other options. It's past time for me to broaden my virtualization horizons a bit.
|
||||
|
||||
Proxmox VE makes a lot more sense for a homelab setup, and I'm looking forward to learning more about it as I build more stuff on it.
|
BIN
content/posts/ditching-vsphere-for-proxmox/uefi.png
Normal file
After Width: | Height: | Size: 19 KiB |
BIN
content/posts/ditching-vsphere-for-proxmox/zeefs.png
Normal file
After Width: | Height: | Size: 52 KiB |
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: "Easy Push Notifications With ntfy.sh"
|
||||
date: 2023-09-17
|
||||
lastmod: 2023-10-21
|
||||
lastmod: 2023-12-22
|
||||
description: "Deploying and configuring a self-hosted pub-sub notification handler, getting another server to send a notifcation when it boots, and integrating the notification handler into Home Assistant."
|
||||
featured: false
|
||||
toc: true
|
||||
|
@ -66,10 +66,10 @@ services:
|
|||
- ./lib/ntf:/var/lib/ntfy
|
||||
ports:
|
||||
- 2586:80
|
||||
healthcheck: # optional, remember to adapt the host and port to your environment
|
||||
healthcheck: # this should be the port inside the container, not the host port
|
||||
test: [
|
||||
"CMD-SHELL",
|
||||
"wget -q --tries=1 http://localhost:8080/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1"
|
||||
"wget -q --tries=1 http://localhost:80/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1"
|
||||
]
|
||||
interval: 60s
|
||||
timeout: 10s
|
||||
|
|
|
@ -10,7 +10,7 @@ tags:
|
|||
title: Free serverless URL shortener on Google Cloud Run
|
||||
---
|
||||
### Intro
|
||||
I've been [using short.io with a custom domain](https://twitter.com/johndotbowdre/status/1370125198196887556) to keep track of and share messy links for a few months now. That approach has worked very well, but it's also seriously overkill for my needs. I don't need (nor want) tracking metrics to know anything about when those links get clicked, and short.io doesn't provide an easy way to turn that off. I was casually looking for a lighter self-hosted alternative today when I stumbled upon a *serverless* alternative: **[sheets-url-shortener](https://github.com/ahmetb/sheets-url-shortener)**. This uses [Google Cloud Run](https://cloud.google.com/run/) to run an ultralight application container which receives an incoming web request, looks for the path in a Google Sheet, and redirects the client to the appropriate URL. It supports connecting with a custom domain, and should run happily within the [Cloud Run Free Tier limits](https://cloud.google.com/run/pricing).
|
||||
I've been using [short.io](https://short.io) with a custom domain to keep track of and share messy links for a few months now. That approach has worked very well, but it's also seriously overkill for my needs. I don't need (nor want) tracking metrics to know anything about when those links get clicked, and short.io doesn't provide an easy way to turn that off. I was casually looking for a lighter self-hosted alternative today when I stumbled upon a *serverless* alternative: **[sheets-url-shortener](https://github.com/ahmetb/sheets-url-shortener)**. This uses [Google Cloud Run](https://cloud.google.com/run/) to run an ultralight application container which receives an incoming web request, looks for the path in a Google Sheet, and redirects the client to the appropriate URL. It supports connecting with a custom domain, and should run happily within the [Cloud Run Free Tier limits](https://cloud.google.com/run/pricing).
|
||||
|
||||
The Github instructions were pretty straight-forward but I did have to fumble through a few additional steps to get everything up and running. Here we go:
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: vRA8 # Projects, Scripts, vRA8
|
||||
series: vRA8 # Projects, Code, vRA8
|
||||
tags:
|
||||
- vmware
|
||||
- vra
|
||||
|
|
|
@ -22,7 +22,7 @@ Fortunately, vRA 8 supports adding an Active Directory integration to handle sta
|
|||
| `DRE` | `lab.bowre.net/LAB/DRE/Computers/Servers` |
|
||||
|
||||
|
||||
I didn't find a lot of documentation on how make this work, though, so here's how I've implemented it in my lab (now [running vRA 8.4.2](https://twitter.com/johndotbowdre/status/1416037317052178436)).
|
||||
I didn't find a lot of documentation on how make this work, though, so here's how I've implemented it in my lab (now running vRA 8.4.2).
|
||||
|
||||
### Adding the AD integration
|
||||
First things first: connecting vRA to AD. I do this by opening the Cloud Assembly interface, navigating to **Infrastructure > Connections > Integrations**, and clicking the **Add Integration** button. I'm then prompted to choose the integration type so I select the **Active Directory** one, and then I fill in the required information: a name (`Lab AD` seems appropriate), my domain controller as the LDAP host (`ldap://win01.lab.bowdre.net:389`), credentials for an account with sufficient privileges to create and delete computer objects (`lab\vra`), and finally the base DN to be used for the LDAP connection (`DC=lab,DC=bowdre,DC=net`).
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
series: Scripts
|
||||
series: Code
|
||||
date: "2020-09-16T08:34:30Z"
|
||||
thumbnail: LJOcy2oqc.png
|
||||
usePageBundles: true
|
||||
|
|
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
thumbnail: "nessus_login.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Tips # Projects, Scripts, vRA8
|
||||
series: Tips # Projects, Code, vRA8
|
||||
tags:
|
||||
- vmware
|
||||
- kubernetes
|
||||
|
|
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
thumbnail: "PowerCLI.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Scripts
|
||||
series: Code
|
||||
tags:
|
||||
- vmware
|
||||
- powercli
|
||||
|
|
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Scripts
|
||||
series: Code
|
||||
tags:
|
||||
- powershell
|
||||
- windows
|
||||
|
|
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Tips # Projects, Scripts, vRA8, K8s on vSphere
|
||||
series: Tips # Projects, Code, vRA8, K8s on vSphere
|
||||
tags:
|
||||
- vmware
|
||||
- powershell
|
||||
|
|
|
@ -286,8 +286,8 @@ main:
|
|||
url: /series/vra8
|
||||
- title: "Projects"
|
||||
url: /series/projects
|
||||
- title: "Scripts"
|
||||
url: /series/scripts
|
||||
- title: "Code"
|
||||
url: /series/code
|
||||
- title: "Tips & Tricks"
|
||||
url: /series/tips
|
||||
- title: "Tags"
|
||||
|
|
|
@ -14,7 +14,7 @@ featureImage: "basic-architecture.png" # Sets featured image on blog post.
|
|||
thumbnail: "basic-architecture.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Tips # Projects, Scripts, vRA8
|
||||
series: Tips # Projects, Code, vRA8
|
||||
tags:
|
||||
- vmware
|
||||
- vsphere
|
||||
|
|
259
content/posts/salt-state-netdata-tailscale/index.md
Normal file
|
@ -0,0 +1,259 @@
|
|||
---
|
||||
title: "Quick Salt State to Deploy Netdata"
|
||||
date: 2023-12-21
|
||||
lastmod: 2023-12-22
|
||||
description: "A hasty Salt state to deploy netdata monitoring and publish it internally on my tailnet with Tailscale Serve"
|
||||
featured: false
|
||||
toc: true
|
||||
comment: true
|
||||
series: Code
|
||||
tags:
|
||||
- homelab
|
||||
- iac
|
||||
- linux
|
||||
- salt
|
||||
- tailscale
|
||||
---
|
||||
As a follow-up to my [recent explorations](/tailscale-ssh-serve-funnel) with using Tailscale Serve to make [netdata](https://github.com/netdata/netdata) monitoring readily available on my [tailnet](https://tailscale.com/kb/1136/tailnet), I wanted a quick way to reproduce that configuration across my handful of systems. These systems already have [Tailscale installed](https://tailscale.com/download/linux) and configured, and they're all [managed with Salt](https://docs.saltproject.io/en/getstarted/).
|
||||
|
||||
So here's a hasty Salt state that I used to make it happen.
|
||||
|
||||
It simply installs netdata using the [handy-dandy kickstart script](https://learn.netdata.cloud/docs/installing/one-line-installer-for-all-linux-systems), and then configures Tailscale to Serve the netdata instance (with a trusted cert!) inside my tailnet over `https://[hostname].[tailnet-name].ts.net:8443/netdata`.
|
||||
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
# -*- coding: utf-8 -*-
|
||||
# vim: ft=sls
|
||||
# Hasty Salt config to install Netdata and make it available within a tailnet
|
||||
# at https://[hostname].[tailnet-name].ts.net:8443/netdata
|
||||
|
||||
curl:
|
||||
pkg.installed
|
||||
|
||||
tailscale:
|
||||
pkg.installed:
|
||||
- version: latest
|
||||
|
||||
netdata-kickstart:
|
||||
cmd.run:
|
||||
- name: curl -Ss https://my-netdata.io/kickstart.sh | sh -s -- --dont-wait
|
||||
- require:
|
||||
- pkg: curl
|
||||
# don't run this block if netdata is already running
|
||||
- unless: pgrep netdata
|
||||
|
||||
tailscale-serve:
|
||||
cmd.run:
|
||||
- name: tailscale serve --bg --https 8443 --set-path /netdata 19999
|
||||
- require:
|
||||
- pkg: tailscale
|
||||
- cmd: netdata-kickstart
|
||||
# don't run this if netdata is already tailscale-served
|
||||
- unless: tailscale serve status | grep -q '/netdata proxy http://127.0.0.1:19999'
|
||||
```
|
||||
|
||||
It's not super elegant... but it got the job done, and that's all I needed it to do.
|
||||
|
||||
```shell
|
||||
sudo salt 'minion-name' state.apply netdata # [tl! .cmd focus]
|
||||
minion-name: # [tl! .nocopy:start collapse:start]
|
||||
----------
|
||||
ID: curl
|
||||
Function: pkg.installed
|
||||
Result: True
|
||||
Comment: All specified packages are already installed
|
||||
Started: 22:59:00.821329
|
||||
Duration: 28.639 ms
|
||||
Changes:
|
||||
----------
|
||||
ID: tailscale
|
||||
Function: pkg.installed
|
||||
Result: True
|
||||
Comment: All specified packages are already installed and are at the desired version
|
||||
Started: 22:59:00.850083
|
||||
Duration: 4589.765 ms
|
||||
Changes:
|
||||
----------
|
||||
ID: netdata-kickstart
|
||||
Function: cmd.run
|
||||
Name: curl -Ss https://my-netdata.io/kickstart.sh | sh -s -- --dont-wait
|
||||
Result: True
|
||||
Comment: Command "curl -Ss https://my-netdata.io/kickstart.sh | sh -s -- --dont-wait" run
|
||||
Started: 22:59:05.441217
|
||||
Duration: 10617.082 ms
|
||||
Changes:
|
||||
----------
|
||||
pid:
|
||||
169287
|
||||
retcode:
|
||||
0
|
||||
stderr:
|
||||
sh: 19: cd: can't cd to sh
|
||||
--- Using /tmp/netdata-kickstart-ZtqZcfWuqk as a temporary directory. ---
|
||||
--- Checking for existing installations of Netdata... ---
|
||||
--- No existing installations of netdata found, assuming this is a fresh install. ---
|
||||
--- Attempting to install using native packages... ---
|
||||
--- Repository configuration is already present, attempting to install netdata. ---
|
||||
[/tmp/netdata-kickstart-ZtqZcfWuqk]# env DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::=--force-confdef -o Dpkg::Options::=--force-confold install -y netdata
|
||||
OK
|
||||
|
||||
[/tmp/netdata-kickstart-ZtqZcfWuqk]# test -x //usr/libexec/netdata/netdata-updater.sh
|
||||
OK
|
||||
|
||||
[/tmp/netdata-kickstart-ZtqZcfWuqk]# grep -q \-\-enable-auto-updates //usr/libexec/netdata/netdata-updater.sh
|
||||
OK
|
||||
|
||||
[/tmp/netdata-kickstart-ZtqZcfWuqk]# //usr/libexec/netdata/netdata-updater.sh --enable-auto-updates
|
||||
Thu Dec 21 22:59:15 UTC 2023 : INFO: netdata-updater.sh: Auto-updating has been ENABLED through cron, updater script linked to /etc/cron.daily/netdata-updater
|
||||
|
||||
Thu Dec 21 22:59:15 UTC 2023 : INFO: netdata-updater.sh: If the update process fails and you have email notifications set up correctly for cron on this system, you should receive an email notification of the failure.
|
||||
Thu Dec 21 22:59:15 UTC 2023 : INFO: netdata-updater.sh: Successful updates will not send an email.
|
||||
OK
|
||||
|
||||
Successfully installed the Netdata Agent.
|
||||
|
||||
Official documentation can be found online at https://learn.netdata.cloud/docs/.
|
||||
|
||||
Looking to monitor all of your infrastructure with Netdata? Check out Netdata Cloud at https://app.netdata.cloud.
|
||||
|
||||
Join our community and connect with us on:
|
||||
- GitHub: https://github.com/netdata/netdata/discussions
|
||||
- Discord: https://discord.gg/5ygS846fR6
|
||||
- Our community forums: https://community.netdata.cloud/
|
||||
[/tmp/netdata-kickstart-ZtqZcfWuqk]# rm -rf /tmp/netdata-kickstart-ZtqZcfWuqk
|
||||
OK
|
||||
stdout:
|
||||
Reading package lists...
|
||||
Building dependency tree...
|
||||
Reading state information...
|
||||
The following packages were automatically installed and are no longer required:
|
||||
libnorm1 libpgm-5.2-0 libxmlb1 libzmq5 python3-contextvars python3-croniter
|
||||
python3-dateutil python3-gnupg python3-immutables python3-jmespath
|
||||
python3-msgpack python3-psutil python3-pycryptodome python3-tz python3-zmq
|
||||
Use 'apt autoremove' to remove them.
|
||||
The following additional packages will be installed:
|
||||
netdata-ebpf-code-legacy netdata-plugin-apps netdata-plugin-chartsd
|
||||
netdata-plugin-debugfs netdata-plugin-ebpf netdata-plugin-go
|
||||
netdata-plugin-logs-management netdata-plugin-nfacct netdata-plugin-perf
|
||||
netdata-plugin-pythond netdata-plugin-slabinfo
|
||||
netdata-plugin-systemd-journal
|
||||
Suggested packages:
|
||||
netdata-plugin-cups netdata-plugin-freeipmi apcupsd nut nvme-cli
|
||||
The following NEW packages will be installed:
|
||||
netdata netdata-ebpf-code-legacy netdata-plugin-apps netdata-plugin-chartsd
|
||||
netdata-plugin-debugfs netdata-plugin-ebpf netdata-plugin-go
|
||||
netdata-plugin-logs-management netdata-plugin-nfacct netdata-plugin-perf
|
||||
netdata-plugin-pythond netdata-plugin-slabinfo
|
||||
netdata-plugin-systemd-journal
|
||||
0 upgraded, 13 newly installed, 0 to remove and 11 not upgraded.
|
||||
Need to get 0 B/30.7 MB of archives.
|
||||
After this operation, 154 MB of additional disk space will be used.
|
||||
Selecting previously unselected package netdata-ebpf-code-legacy.
|
||||
(Reading database ...
|
||||
(Reading database ... 5%
|
||||
(Reading database ... 10%
|
||||
(Reading database ... 15%
|
||||
(Reading database ... 20%
|
||||
(Reading database ... 25%
|
||||
(Reading database ... 30%
|
||||
(Reading database ... 35%
|
||||
(Reading database ... 40%
|
||||
(Reading database ... 45%
|
||||
(Reading database ... 50%
|
||||
(Reading database ... 55%
|
||||
(Reading database ... 60%
|
||||
(Reading database ... 65%
|
||||
(Reading database ... 70%
|
||||
(Reading database ... 75%
|
||||
(Reading database ... 80%
|
||||
(Reading database ... 85%
|
||||
(Reading database ... 90%
|
||||
(Reading database ... 95%
|
||||
(Reading database ... 100%
|
||||
(Reading database ... 118906 files and directories currently installed.)
|
||||
Preparing to unpack .../00-netdata-ebpf-code-legacy_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-ebpf-code-legacy (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-ebpf.
|
||||
Preparing to unpack .../01-netdata-plugin-ebpf_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-ebpf (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-apps.
|
||||
Preparing to unpack .../02-netdata-plugin-apps_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-apps (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-pythond.
|
||||
Preparing to unpack .../03-netdata-plugin-pythond_1.44.0-77-nightly_all.deb ...
|
||||
Unpacking netdata-plugin-pythond (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-go.
|
||||
Preparing to unpack .../04-netdata-plugin-go_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-go (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-debugfs.
|
||||
Preparing to unpack .../05-netdata-plugin-debugfs_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-debugfs (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-nfacct.
|
||||
Preparing to unpack .../06-netdata-plugin-nfacct_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-nfacct (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-chartsd.
|
||||
Preparing to unpack .../07-netdata-plugin-chartsd_1.44.0-77-nightly_all.deb ...
|
||||
Unpacking netdata-plugin-chartsd (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-slabinfo.
|
||||
Preparing to unpack .../08-netdata-plugin-slabinfo_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-slabinfo (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-perf.
|
||||
Preparing to unpack .../09-netdata-plugin-perf_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-perf (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata.
|
||||
Preparing to unpack .../10-netdata_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-logs-management.
|
||||
Preparing to unpack .../11-netdata-plugin-logs-management_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-logs-management (1.44.0-77-nightly) ...
|
||||
Selecting previously unselected package netdata-plugin-systemd-journal.
|
||||
Preparing to unpack .../12-netdata-plugin-systemd-journal_1.44.0-77-nightly_amd64.deb ...
|
||||
Unpacking netdata-plugin-systemd-journal (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-nfacct (1.44.0-77-nightly) ...
|
||||
Setting up netdata (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-pythond (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-systemd-journal (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-debugfs (1.44.0-77-nightly) ...
|
||||
Setting up netdata-ebpf-code-legacy (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-perf (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-chartsd (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-ebpf (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-apps (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-logs-management (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-go (1.44.0-77-nightly) ...
|
||||
Setting up netdata-plugin-slabinfo (1.44.0-77-nightly) ...
|
||||
Processing triggers for systemd (245.4-4ubuntu3.22) ...
|
||||
----------
|
||||
ID: tailscale-serve
|
||||
Function: cmd.run
|
||||
Name: tailscale serve --bg --https 8443 --set-path /netdata 19999
|
||||
Result: True
|
||||
Comment: Command "tailscale serve --bg --https 8443 --set-path /netdata 19999" run
|
||||
Started: 22:59:16.060397
|
||||
Duration: 62.624 ms
|
||||
Changes: ' # [tl! collapse:end]
|
||||
----------
|
||||
pid:
|
||||
170328
|
||||
retcode:
|
||||
0
|
||||
stderr:
|
||||
stdout:
|
||||
Available within your tailnet: # [tl! focus:start]
|
||||
|
||||
https://minion-name.tailnet-name.ts.net:8443/netdata
|
||||
|-- proxy http://127.0.0.1:19999
|
||||
|
||||
Serve started and running in the background.
|
||||
To disable the proxy, run: tailscale serve --https=8443 off
|
||||
|
||||
Summary for minion-name
|
||||
------------
|
||||
Succeeded: 4 (changed=2) # [tl! highlight]
|
||||
Failed: 0
|
||||
------------
|
||||
Total states run: 4
|
||||
Total run time: 15.298 s
|
||||
# [tl! .nocopy:end focus:end]
|
||||
```
|
|
@ -15,7 +15,7 @@ thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homep
|
|||
# shareImage: "/images/path/share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
codeMaxLines: 30
|
||||
series: Scripts
|
||||
series: Code
|
||||
tags:
|
||||
- hugo
|
||||
- meta
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
series: Scripts
|
||||
series: Code
|
||||
date: "2021-07-19T16:03:30Z"
|
||||
usePageBundles: true
|
||||
tags:
|
||||
|
|
|
@ -15,7 +15,7 @@ title: Setting up Linux on a new Lenovo Chromebook Duet (bonus arm64 complicatio
|
|||
featured: false
|
||||
---
|
||||
|
||||
I've [written in the past](/3d-modeling-and-printing-on-chrome-os) about the Linux setup I've been using on my Pixel Slate. My Slate's keyboard stopped working over the weekend, though, and there don't seem to be any replacements (either Google or Brydge) to be found. And then I saw that [Walmart had the 64GB Lenovo Chromebook Duet temporarily marked down](https://twitter.com/johndotbowdre/status/1320733614426988544) to a mere $200 - just slightly more than the Slate's *keyboard* originally cost. So I jumped on that deal, and the little Chromeblet showed up today.
|
||||
I've [written in the past](/3d-modeling-and-printing-on-chrome-os) about the Linux setup I've been using on my Pixel Slate. My Slate's keyboard stopped working over the weekend, though, and there don't seem to be any replacements (either Google or Brydge) to be found. And then I saw that Walmart had the 64GB Lenovo Chromebook Duet temporarily marked down to a mere $200 - just slightly more than the Slate's *keyboard* originally cost. So I jumped on that deal, and the little Chromeblet showed up today.
|
||||
|
||||
![Aww, it's so cute!](kULHPeDuc.jpeg)
|
||||
|
||||
|
@ -102,7 +102,7 @@ Once you connect the phone to Linux, check the phone to approve the debugging co
|
|||
|
||||
### Microsoft PowerShell and VMware PowerCLI
|
||||
*[Updated 5/20/2021 with Microsoft's newer instructions]*
|
||||
I'm working on setting up a [VMware homelab on an Intel NUC 9](https://twitter.com/johndotbowdre/status/1317558182936563714) so being able to automate things with PowerCLI will be handy.
|
||||
I'm working on setting up a VMware homelab on an Intel NUC 9 so being able to automate things with PowerCLI will be handy.
|
||||
|
||||
PowerShell for ARM is still in an early stage so while [it is supported](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7.2#support-for-arm-processors) it must be installed manually. Microsoft has instructions for installing PowerShell from binary archives [here](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7.2#linux), and I grabbed the latest `-linux-arm64.tar.gz` release I could find [here](https://github.com/PowerShell/PowerShell/releases).
|
||||
```shell
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
title: "Spotlight on Torchlight"
|
||||
date: 2023-11-09
|
||||
# lastmod: 2023-11-06
|
||||
lastmod: 2023-11-13
|
||||
description: "Syntax highlighting powered by the Torchlight.dev API makes it easier to dress up code blocks. Here's an overview of what I did to replace this blog's built-in Hugo highlighter (Chroma) with Torchlight."
|
||||
featured: false
|
||||
toc: true
|
||||
comment: true
|
||||
series: Projects # Projects, Scripts
|
||||
series: Projects # Projects, Code
|
||||
tags:
|
||||
- javascript
|
||||
- hugo
|
||||
|
@ -15,13 +15,13 @@ tags:
|
|||
|
||||
I've been futzing around a bit with how code blocks render on this blog. Hugo has a built-in, _really fast_, [syntax highlighter](https://gohugo.io/content-management/syntax-highlighting/) courtesy of [Chroma](https://github.com/alecthomas/chroma). Chroma is basically automatic and it renders very quickly[^fast] during the `hugo` build process, and it's a pretty solid "works everywhere out of the box" option.
|
||||
|
||||
That said, Chroma sometimes seems to struggle with tokenizing and highlighting certain languages, leaving me with boring monochromatic text blocks. Hugo's implementation allows for annotations like `{hl_lines="11-13"}` alongside the code fences to (for instance) highlight lines 11-13, but that can get kind of clumsy if you're not sure which lines need to be highlighted[^eleven] or are needing to highlight multiple disjoined lines. And sometimes I'd like to share a long code block for context while also collapsing it down to just the bits I'm going to write about. That's not something that can be done with the built-in highlighter (at least not without tacking on a bunch of extra JavaScript and CSS nonsense)[^nonsense].
|
||||
That said, the one-size-fits-all approach may not actually fit everyone *well*, and Chroma does leave me wanting a bit more. Chroma sometimes struggles with tokenizing and highlighting certain languages, leaving me with boring monochromatic text blocks. Hugo's implementation supports highlighting individual lines by inserting directives next to the code fence backticks (like `{hl_lines="11-13"}` to highlight lines 11-13), but that can be clumsy if you're not sure which lines need to be highlighted[^eleven], are needing to highlight multiple disjointed lines, or later insert additional lines which throw off the count. And sometimes I'd like to share a full file for context while also collapsing it down to just the bits I'm going to write about. That's not something that can be done with the built-in highlighter (at least not without tacking on a bunch of extra JavaScript and CSS nonsense[^nonsense]).
|
||||
|
||||
[^fast]: Did I mention that it's fast?
|
||||
[^eleven]: (or how to count to eleven)
|
||||
[^nonsense]: Spoiler: I'm going to tack on some JS and CSS nonsense later - we'll get to that.
|
||||
|
||||
But then I found a post from Sebastian de Deyne about [Better code highlighting in Hugo with Torchlight](https://sebastiandedeyne.com/better-code-highlighting-in-hugo-with-torchlight), and I thought that [Torchlight](https://torchlight.dev) sounded pretty promising.
|
||||
But then I found a post from Sebastian de Deyne about [Better code highlighting in Hugo with Torchlight](https://sebastiandedeyne.com/better-code-highlighting-in-hugo-with-torchlight). and I thought that [Torchlight](https://torchlight.dev) sounded pretty promising.
|
||||
|
||||
From Torchlight's [docs](https://torchlight.dev/docs),
|
||||
|
||||
|
@ -126,7 +126,7 @@ I started with registering for a free[^free] account at [torchlight.dev](https:/
|
|||
echo "TORCHLIGHT_TOKEN=torch_[...]" > ./.env # [tl! .cmd]
|
||||
```
|
||||
|
||||
[^free]: Torchlight is free for sites which don't generate revenue, though it does require a link back to `torchlight.dev`. I stuck the attribution link in the footer. More pricing info [here].
|
||||
[^free]: Torchlight is free for sites which don't generate revenue, though it does require a link back to `torchlight.dev`. I stuck the attribution link in the footer. More pricing info [here](https://torchlight.dev/#pricing).
|
||||
|
||||
#### Installation
|
||||
I then used `npm` to install Torchlight in the root of my Hugo repo:
|
||||
|
@ -157,8 +157,8 @@ node:internal/fs/utils:350
|
|||
throw err;
|
||||
^
|
||||
|
||||
Error: ENOENT: no such file or directory, open '/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/stubs/config.js' # [tl! focus collapse:start]
|
||||
at Object.openSync (node:fs:603:3) #
|
||||
Error: ENOENT: no such file or directory, open '/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/stubs/config.js' # [tl! focus]
|
||||
at Object.openSync (node:fs:603:3)
|
||||
at Object.readFileSync (node:fs:471:35)
|
||||
at write (/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/bin/torchlight.cjs.js:524:39)
|
||||
at init (/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/bin/torchlight.cjs.js:538:12)
|
||||
|
@ -167,7 +167,7 @@ Error: ENOENT: no such file or directory, open '/home/john/projects/runtimeterro
|
|||
at /home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1227:65
|
||||
at Command._chainOrCall (/home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1144:12)
|
||||
at Command._parseCommand (/home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1227:27)
|
||||
at Command._dispatchSubcommand (/home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1050:25) { # [tl! collapse:end]
|
||||
at Command._dispatchSubcommand (/home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1050:25) {
|
||||
errno: -2,
|
||||
syscall: 'open',
|
||||
code: 'ENOENT',
|
||||
|
@ -180,10 +180,10 @@ Node.js v18.17.1
|
|||
|
||||
Oh. Hmm.
|
||||
|
||||
There's an [open issue](https://github.com/torchlight-api/torchlight-cli/issues/4) which reveals that the stub config file is actually located under the `src/` directory instead of `dist/`.
|
||||
There's an [open issue](https://github.com/torchlight-api/torchlight-cli/issues/4) which reveals that the stub config file is actually located under the `src/` directory instead of `dist/`. And it turns out the `init` step isn't strictly necessary, it's just a helper to get you a working config to start.
|
||||
|
||||
#### Configuration
|
||||
I'll just copy that to my repo root and then set to work modifying it to suit my needs:
|
||||
Now that I know where the stub config lives, I can simply copy it to my repo root. I'll then get to work modifying it to suit my needs:
|
||||
|
||||
```shell
|
||||
cp node_modules/@torchlight-api/torchlight-cli/src/stubs/config.js ./torchlight.config.js # [tl! .cmd]
|
||||
|
@ -226,11 +226,13 @@ module.exports = {
|
|||
|
||||
// If there are any diff indicators for a line, put them
|
||||
// in place of the line number to save horizontal space.
|
||||
diffIndicatorsInPlaceOfLineNumbers: true
|
||||
diffIndicatorsInPlaceOfLineNumbers: true // [tl! --]
|
||||
diffIndicatorsInPlaceOfLineNumbers: true, // [tl! ++ reindex(-1)]
|
||||
|
||||
// When lines are collapsed, this is the text that will
|
||||
// be shown to indicate that they can be expanded.
|
||||
// summaryCollapsedIndicator: '...',
|
||||
// summaryCollapsedIndicator: '...', [tl! --]
|
||||
summaryCollapsedIndicator: 'Click to expand...', // make the collapse a little more explicit [tl! ++ reindex(-1)]
|
||||
},
|
||||
|
||||
// Options for the highlight command.
|
||||
|
|
|
@ -6,7 +6,7 @@ description: "Quick notes on using `systemctl edit` to override a systemd servic
|
|||
featured: false
|
||||
toc: false
|
||||
comment: true
|
||||
series: Tips # Projects, Scripts
|
||||
series: Tips # Projects, Code
|
||||
tags:
|
||||
- crostini
|
||||
- linux
|
||||
|
|
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
thumbnail: "golinks.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Projects # Projects, Scripts, vRA8, K8s on vSphere
|
||||
series: Projects # Projects, Code, vRA8, K8s on vSphere
|
||||
tags:
|
||||
- docker
|
||||
- vpn
|
||||
|
|
|
@ -14,7 +14,7 @@ usePageBundles: true
|
|||
thumbnail: "Tailscale-AppIcon.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Tips # Projects, Scripts, vRA8, K8s on vSphere
|
||||
series: Tips # Projects, Code, vRA8, K8s on vSphere
|
||||
tags:
|
||||
- vmware
|
||||
- linux
|
||||
|
|
After Width: | Height: | Size: 119 KiB |
After Width: | Height: | Size: 40 KiB |
After Width: | Height: | Size: 160 KiB |
343
content/posts/tailscale-serve-docker-compose-sidecar/index.md
Normal file
|
@ -0,0 +1,343 @@
|
|||
---
|
||||
title: "Tailscale Serve in a Docker Compose Sidecar"
|
||||
date: 2023-12-30
|
||||
# lastmod: 2023-12-28
|
||||
description: "Using Docker Compose to deploy containerized applications and make them available via Tailscale Serve and Tailscale Funnel"
|
||||
featured: false
|
||||
toc: true
|
||||
comment: true
|
||||
series: Projects
|
||||
tags:
|
||||
- containers
|
||||
- docker
|
||||
- selfhosting
|
||||
- tailscale
|
||||
---
|
||||
Hi, and welcome back to what has become my [Tailscale blog](/tags/tailscale/).
|
||||
|
||||
I have a few servers that I use for running multiple container workloads. My approach in the past had been to use [Caddy webserver](https://caddyserver.com/) on the host to proxy the various containers. With this setup, each app would have its own DNS record, and Caddy would be configured to route traffic to the appropriate internal port based on that. For instance:
|
||||
|
||||
```text
|
||||
# torchlight! {"lineNumbers": true}
|
||||
cyberchef.runtimeterror.dev {
|
||||
reverse_proxy localhost:8000
|
||||
}
|
||||
|
||||
ntfy.runtimeterror.dev, http://ntfy.runtimeterror.dev {
|
||||
reverse_proxy localhost:8080
|
||||
@httpget {
|
||||
protocol http
|
||||
method GET
|
||||
path_regexp ^/([-_a-z0-9]{0,64}$|docs/|static/)
|
||||
}
|
||||
redir @httpget https://{host}{uri}
|
||||
}
|
||||
|
||||
uptime.runtimeterror.dev {
|
||||
reverse_proxy localhost:3001
|
||||
}
|
||||
|
||||
miniflux.runtimeterror.dev {
|
||||
reverse_proxy localhost:8080
|
||||
}
|
||||
```
|
||||
|
||||
*and so on...* You get the idea. This approach works well for services I want/need to be public, but it does require me to manage those DNS records and keep track of which app is on which port. That can be kind of tedious.
|
||||
|
||||
And I don't really need all of these services to be public. Not because they're particularly sensitive, but I just don't really have a reason to share my personal [Miniflux](https://github.com/miniflux/v2) or [CyberChef](https://github.com/gchq/CyberChef) instances with the world at large. Those would be great candidates to proxy with [Tailscale Serve](/tailscale-ssh-serve-funnel#tailscale-serve) so they'd only be available on my tailnet. Of course, with that setup I'd then have to differentiate the services based on external port numbers since they'd all be served with the same hostname. That's not ideal either.
|
||||
|
||||
```shell
|
||||
sudo tailscale serve --bg --https 8443 8180 # [tl! .cmd]
|
||||
Available within your tailnet: # [tl! .nocopy:6]
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net/
|
||||
|-- proxy http://127.0.0.1:8000
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net:8443/
|
||||
|-- proxy http://127.0.0.1:8080
|
||||
```
|
||||
|
||||
It would be really great if I could directly attach each container to my tailnet and then access the apps with addresses like `https://miniflux.tailnet-name.ts.net` or `https://cyber.tailnet-name.ts.net`. Tailscale does have an [official Docker image](https://hub.docker.com/r/tailscale/tailscale), and at first glance it seems like that would solve my needs pretty directly. Unfortunately, it looks like trying to leverage that container image directly would still require me to configure Tailscale Serve interactively.[^ts_serve_config].
|
||||
|
||||
[^ts_serve_config]: While not documented for the image itself, the `containerboot` binary seems like it should accept a [`TS_SERVE_CONFIG` argument](https://github.com/tailscale/tailscale/blob/5812093d31c8a7f9c5e3a455f0fd20dcc011d8cd/cmd/containerboot/main.go#L43) to designate the file path of the `ipn.ServeConfig`... but I couldn't find any information on how to actually configure that.
|
||||
|
||||
And then I came across [Louis-Philippe Asselin's post](https://asselin.engineer/tailscale-docker) about how he set up Tailscale in Docker Compose. When he wrote his post, there was even less documentation on how to do this stuff, so he used a [modified Tailscale docker image](https://github.com/lpasselin/tailscale-docker) which loads a [startup script](https://github.com/lpasselin/tailscale-docker/blob/c6f8d75b5e1235b8dbeee849df9321f515c526e5/images/tailscale/start.sh) to handle some of the configuration steps. His repo also includes a [helpful docker-compose example](https://github.com/lpasselin/tailscale-docker/blob/c6f8d75b5e1235b8dbeee849df9321f515c526e5/docker-compose/stateful-example/docker-compose.yml) of how to connect it together.
|
||||
|
||||
I quickly realized I could modify his startup script to take care of my Tailscale Serve need. So here's how I did it.
|
||||
|
||||
### Docker Image
|
||||
My image starts out basically the same as Louis-Philippe's, with just pulling in the official image and then adding the customized script:
|
||||
|
||||
```Dockerfile
|
||||
# torchlight! {"lineNumbers": true}
|
||||
FROM tailscale/tailscale:v1.56.1
|
||||
COPY start.sh /usr/bin/start.sh
|
||||
RUN chmod +x /usr/bin/start.sh
|
||||
CMD ["/usr/bin/start.sh"]
|
||||
```
|
||||
|
||||
My `start.sh` script has a few tweaks for brevity/clarity, and also adds a block for conditionally enabling a basic Tailscale Serve (or Funnel) configuration:
|
||||
```shell
|
||||
# torchlight! {"lineNumbers": true}
|
||||
#!/bin/ash
|
||||
trap 'kill -TERM $PID' TERM INT
|
||||
echo "Starting Tailscale daemon"
|
||||
tailscaled --tun=userspace-networking --statedir="${TS_STATE_DIR}" ${TS_TAILSCALED_EXTRA_ARGS} &
|
||||
PID=$!
|
||||
until tailscale up --authkey="${TS_AUTHKEY}" --hostname="${TS_HOSTNAME}" ${TS_EXTRA_ARGS}; do
|
||||
sleep 0.1
|
||||
done
|
||||
tailscale status
|
||||
if [ -n "${TS_SERVE_PORT}" ]; then # [tl! ++:10]
|
||||
if [ -n "${TS_FUNNEL}" ]; then
|
||||
if ! tailscale funnel status | grep -q -A1 '(Funnel on)' | grep -q "${TS_SERVE_PORT}"; then
|
||||
tailscale funnel --bg "${TS_SERVE_PORT}"
|
||||
fi
|
||||
else
|
||||
if ! tailscale serve status | grep -q "${TS_SERVE_PORT}"; then
|
||||
tailscale serve --bg "${TS_SERVE_PORT}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
wait ${PID}
|
||||
```
|
||||
|
||||
This script starts the `tailscaled` daemon in userspace mode, and it tells the daemon to store its state in a user-defined location. It then uses a supplied [pre-auth key](https://tailscale.com/kb/1085/auth-keys) to bring up the new Tailscale node and set the hostname.
|
||||
|
||||
If both `TS_SERVE_PORT` and `TS_FUNNEL` are set, the script will publicly proxy the designated port with Tailscale Funnel. If only `TS_SERVE_PORT` is set, it will just proxy it internal to the tailnet with Tailscale Serve.[^normal]
|
||||
|
||||
[^normal]: If *neither* variable is set, the script just brings up Tailscale like normal... in which case you might as well just use the official image.
|
||||
|
||||
I'm using [this git repo](https://github.com/jbowdre/tailscale-docker/) to track my work on this, and it automatically builds my [tailscale-docker](https://github.com/jbowdre/tailscale-docker/pkgs/container/tailscale-docker) image. So now I can can simply reference `ghcr.io/jbowdre/tailscale-docker` in my Docker configurations.
|
||||
|
||||
On that note...
|
||||
|
||||
### Compose Configuration
|
||||
There's also a [sample `docker-compose.yml`](https://github.com/jbowdre/tailscale-docker/blob/a54e45ca717023a45d6b1d0aac7143902b02cb0b/docker-compose-example/docker-compose.yml) in the repo to show how to use the image:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
services:
|
||||
tailscale:
|
||||
image: ghcr.io/jbowdre/tailscale-docker:latest
|
||||
container_name: tailscale
|
||||
environment:
|
||||
TS_AUTHKEY: ${TS_AUTHKEY:?err} # from https://login.tailscale.com/admin/settings/authkeys
|
||||
TS_HOSTNAME: ${TS_HOSTNAME:-ts-docker} # optional hostname to use for this node
|
||||
TS_STATE_DIR: "/var/lib/tailscale/" # store ts state in a local volume
|
||||
TS_TAILSCALED_EXTRA_ARGS: ${TS_TAILSCALED_EXTRA_ARGS:-} # optional extra args to pass to tailscaled
|
||||
TS_EXTRA_ARGS: ${TS_EXTRA_ARGS:-} # optional extra flags to pass to tailscale up
|
||||
TS_SERVE_PORT: ${TS_SERVE_PORT:-} # optional port to proxy with tailscale serve (ex: '80')
|
||||
TS_FUNNEL: ${TS_FUNNEL:-} # if set, serve publicly with tailscale funnel
|
||||
volumes:
|
||||
- ./ts_data:/var/lib/tailscale/ # the mount point should match TS_STATE_DIR
|
||||
myservice:
|
||||
image: nginxdemos/hello
|
||||
network_mode: "service:tailscale" # use the tailscale network service's network
|
||||
```
|
||||
|
||||
You'll note that most of those environment variables aren't actually defined in this YAML. Instead, they'll be inherited from the environment used for spawning the containers. This provides a few benefits. First, it lets the `tailscale` service definition block function as a template to allow copying it into other Compose files without having to modify. Second, it avoids holding sensitive data in the YAML itself. And third, it allows us to set default values for undefined variables (if `TS_HOSTNAME` is empty it will be automatically replaced with `ts-docker`) or throw an error if a required value isn't set (an empty `TS_AUTHKEY` will throw an error and abort).
|
||||
|
||||
You can create the required variables by exporting them at the command line (`export TS_HOSTNAME=ts-docker`) - but that runs the risk of having sensitive values like an authkey stored in your shell history. It's not a great habit.
|
||||
|
||||
Perhaps a better approach is to set the variables in a `.env` file stored alongside the `docker-compose.yaml` but with stricter permissions. This file can be owned and only readable by root (or the defined Docker user), while the Compose file can be owned by your own user or the `docker` group.
|
||||
|
||||
Here's how the `.env` for this setup might look:
|
||||
|
||||
```shell
|
||||
# torchlight! {"lineNumbers": true}
|
||||
TS_AUTHKEY=tskey-auth-somestring-somelongerstring
|
||||
TS_HOSTNAME=tsdemo
|
||||
TS_TAILSCALED_EXTRA_ARGS=--verbose=1
|
||||
TS_EXTRA_ARGS=--ssh
|
||||
TS_SERVE_PORT=8080
|
||||
TS_FUNNEL=1
|
||||
```
|
||||
|
||||
| Variable Name | Example | Description |
|
||||
| --- | --- | --- |
|
||||
| `TS_AUTHKEY` | `tskey-auth-somestring-somelongerstring` | used for unattended auth of the new node, get one [here](https://login.tailscale.com/admin/settings/keys) |
|
||||
| `TS_HOSTNAME` | `tsdemo` | optional Tailscale hostname for the new node[^hostname] |
|
||||
| `TS_STATE_DIR` | `/var/lib/tailscale/` | required directory for storing Tailscale state, this should be mounted to the container for persistence |
|
||||
| `TS_TAILSCALED_EXTRA_ARGS` | `--verbose=1`[^verbose] | optional additional [flags](https://tailscale.com/kb/1278/tailscaled#flags-to-tailscaled) for `tailscaled` |
|
||||
| `TS_EXTRA_ARGS` | `--ssh`[^ssh] | optional additional [flags](https://tailscale.com/kb/1241/tailscale-up) for `tailscale up` |
|
||||
| `TS_SERVE_PORT` | `8080` | optional application port to expose with [Tailscale Serve](https://tailscale.com/kb/1312/serve) |
|
||||
| `TS_FUNNEL` | `1` | if set (to anything), will proxy `TS_SERVE_PORT` **publicly** with [Tailscale Funnel](https://tailscale.com/kb/1223/funnel) |
|
||||
|
||||
[^hostname]: This hostname will determine the fully-qualified domain name where the resource will be served: `https://[hostname].[tailnet-name].ts.net`. So you'll want to make sure it's a good one for what you're trying to do.
|
||||
[^verbose]: Passing the `--verbose` flag to `tailscaled` increases the logging verbosity, which can be helpful if you need to troubleshoot.
|
||||
[^ssh]: The `--ssh` flag to `tailscale up` will enable Tailscale SSH and (ACLs permitting) allow you to easily SSH directly into the *Tailscale* container without having to talk to the Docker host and spawn a shell from there.
|
||||
|
||||
A few implementation notes:
|
||||
- If you want to use Funnel with this configuration, it might be a good idea to associate the [Funnel ACL policy](https://tailscale.com/kb/1223/funnel#tailnet-policy-file-requirement) with a tag (like `tag:funnel`), as I discussed a bit [here](/tailscale-ssh-serve-funnel/#tailscale-funnel). And then when you create the [pre-auth key](https://tailscale.com/kb/1085/auth-keys), you can set it to automatically apply the tag so it can enable Funnel.
|
||||
- It's very important that the path designated by `TS_STATE_DIR` is a volume mounted into the container. Otherwise, the container will lose its Tailscale configuration when it stops. That could be inconvenient.
|
||||
- Linking `network_mode` on the application container back to the `service:tailscale` definition is [the magic](https://docs.docker.com/compose/compose-file/05-services/#network_mode) that lets the sidecar proxy traffic for the app. This way the two containers effectively share the same network interface, allowing them to share the same ports. So port `8080` on the app container is available on the tailscale container, and that enables `tailscale serve --bg 8080` to work.
|
||||
|
||||
### Usage Examples
|
||||
|
||||
To tie this all together, I'm going to quickly run through the steps I took to create and publish two container-based services without having to do any interactive configuration.
|
||||
|
||||
#### CyberChef
|
||||
|
||||
I'll start with my [CyberChef](https://github.com/gchq/CyberChef) instance.
|
||||
|
||||
> *CyberChef is a simple, intuitive web app for carrying out all manner of "cyber" operations within a web browser. These operations include simple encoding like XOR and Base64, more complex encryption like AES, DES and Blowfish, creating binary and hexdumps, compression and decompression of data, calculating hashes and checksums, IPv6 and X.509 parsing, changing character encodings, and much more.*
|
||||
|
||||
This will be served publicly with Funnel so that my friends can use this instance if they need it.
|
||||
|
||||
I'll need a pre-auth key so that the Tailscale container can authenticate to my Tailnet. I can get that by going to the [Tailscale Admin Portal](https://login.tailscale.com/admin/settings/keys) and generating a new auth key. I gave it a description, ticked the option to pre-approve whatever device authenticates with this key (since I have [Device Approval](https://tailscale.com/kb/1099/device-approval) enabled on my tailnet). I also used the option to auto-apply the `tag:internal` tag I used for grouping my on-prem systems as well as the `tag:funnel` tag I use for approving Funnel devices in the ACL.
|
||||
|
||||
![authkey creation](authkey1.png)
|
||||
|
||||
That gives me a new single-use authkey:
|
||||
|
||||
![new authkey](authkey2.png)
|
||||
|
||||
I'll use that new key as well as the knowledge that CyberChef is served by default on port `8000` to create an appropriate `.env` file:
|
||||
|
||||
```shell
|
||||
# torchlight! {"lineNumbers": true}
|
||||
TS_AUTHKEY=tskey-auth-somestring-somelongerstring
|
||||
TS_HOSTNAME=cyber
|
||||
TS_EXTRA_ARGS=--ssh
|
||||
TS_SERVE_PORT=8000
|
||||
TS_FUNNEL=true
|
||||
```
|
||||
|
||||
And I can add the corresponding `docker-compose.yml` to go with it:
|
||||
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
services:
|
||||
tailscale: # [tl! focus:start]
|
||||
image: ghcr.io/jbowdre/tailscale-docker:latest
|
||||
container_name: cyberchef-tailscale
|
||||
environment:
|
||||
TS_AUTHKEY: ${TS_AUTHKEY:?err}
|
||||
TS_HOSTNAME: ${TS_HOSTNAME:-ts-docker}
|
||||
TS_STATE_DIR: "/var/lib/tailscale/"
|
||||
TS_TAILSCALED_EXTRA_ARGS: ${TS_TAILSCALED_EXTRA_ARGS:-}
|
||||
TS_EXTRA_ARGS: ${TS_EXTRA_ARGS:-}
|
||||
TS_SERVE_PORT: ${TS_SERVE_PORT:-}
|
||||
TS_FUNNEL: ${TS_FUNNEL:-}
|
||||
volumes:
|
||||
- ./ts_data:/var/lib/tailscale/ # [tl! focus:end]
|
||||
cyberchef:
|
||||
container_name: cyberchef
|
||||
image: mpepping/cyberchef:latest
|
||||
restart: unless-stopped
|
||||
network_mode: service:tailscale # [tl! focus]
|
||||
```
|
||||
|
||||
I can just bring it online like so:
|
||||
```shell
|
||||
docker compose up -d # [tl! .cmd .nocopy:1,4]
|
||||
[+] Running 3/3
|
||||
✔ Network cyberchef_default Created
|
||||
✔ Container cyberchef-tailscale Started
|
||||
✔ Container cyberchef Started
|
||||
```
|
||||
|
||||
I can review the logs for the `tailscale` service to confirm that the Funnel configuration was applied:
|
||||
```shell
|
||||
docker compose logs tailscale # [tl! .cmd .nocopy:1,12 focus]
|
||||
cyberchef-tailscale | # Health check:
|
||||
cyberchef-tailscale | # - not connected to home DERP region 12
|
||||
cyberchef-tailscale | # - Some peers are advertising routes but --accept-routes is false
|
||||
cyberchef-tailscale | 2023/12/30 17:44:48 serve: creating a new proxy handler for http://127.0.0.1:8000
|
||||
cyberchef-tailscale | 2023/12/30 17:44:48 Hostinfo.WireIngress changed to true
|
||||
cyberchef-tailscale | Available on the internet: # [tl! focus:6]
|
||||
cyberchef-tailscale |
|
||||
cyberchef-tailscale | https://cyber.tailnet-name.ts.net/
|
||||
cyberchef-tailscale | |-- proxy http://127.0.0.1:8000
|
||||
cyberchef-tailscale |
|
||||
cyberchef-tailscale | Funnel started and running in the background.
|
||||
cyberchef-tailscale | To disable the proxy, run: tailscale funnel --https=443 off
|
||||
```
|
||||
|
||||
And after ~10 minutes or so (it sometimes takes a bit longer for the DNS and SSL to start working outside the tailnet), I'll be able to hit the instance at `https://cyber.tailnet-name.ts.net` from anywhere on the web.
|
||||
|
||||
![cyberchef](cyberchef.png)
|
||||
|
||||
|
||||
#### Miniflux
|
||||
I've lately been playing quite a bit with [my omg.lol address](https://jbowdre.omg.lol/) and [associated services](https://home.omg.lol/referred-by/jbowdre), and that's inspired me to [revisit the world](https://rknight.me/blog/the-web-is-fantastic/) of curating RSS feeds instead of relying on algorithms to keep me informed. Through that experience, I recently found [Miniflux](https://github.com/miniflux/v2), a "Minimalist and opinionated feed reader". It's written in Go, is fast and lightweight, and works really well as a PWA installed on mobile devices, too.
|
||||
|
||||
It will be great for keeping track of my feeds, but I need to expose this service publicly. So I'll serve it up inside my tailnet with Tailscale Serve.
|
||||
|
||||
Here's the `.env` that I'll use:
|
||||
```shell
|
||||
# torchlight! {"lineNumbers": true}
|
||||
DB_USER=db-username
|
||||
DB_PASS=db-passw0rd
|
||||
ADMIN_USER=sysadmin
|
||||
ADMIN_PASS=hunter2
|
||||
TS_AUTHKEY=tskey-auth-somestring-somelongerstring
|
||||
TS_HOSTNAME=miniflux
|
||||
TS_EXTRA_ARGS=--ssh
|
||||
TS_SERVE_PORT=8080
|
||||
```
|
||||
|
||||
Funnel will not be configured for this since `TS_FUNNEL` was not defined.
|
||||
|
||||
I adapted the [example `docker-compose.yml`](https://miniflux.app/docs/dacker.html#docker-compose) from Miniflux to add in my Tailscale bits:
|
||||
```yaml
|
||||
# torchlight! {"lineNumbers": true}
|
||||
services:
|
||||
tailscale: # [tl! focus:start]
|
||||
image: ghcr.io/jbowdre/tailscale-docker:latest
|
||||
container_name: miniflux-tailscale
|
||||
environment:
|
||||
TS_AUTHKEY: ${TS_AUTHKEY:?err}
|
||||
TS_HOSTNAME: ${TS_HOSTNAME:-ts-docker}
|
||||
TS_STATE_DIR: "/var/lib/tailscale/"
|
||||
TS_TAILSCALED_EXTRA_ARGS: ${TS_TAILSCALED_EXTRA_ARGS:-}
|
||||
TS_EXTRA_ARGS: ${TS_EXTRA_ARGS:-}
|
||||
TS_SERVE_PORT: ${TS_SERVE_PORT:-}
|
||||
TS_FUNNEL: ${TS_FUNNEL:-}
|
||||
volumes:
|
||||
- ./ts_data:/var/lib/tailscale/ # [tl! focus:end]
|
||||
miniflux:
|
||||
image: miniflux/miniflux:latest
|
||||
container_name: miniflux
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- DATABASE_URL=postgres://${DB_USER}:${DB_PASS}@db/miniflux?sslmode=disable
|
||||
- RUN_MIGRATIONS=1
|
||||
- CREATE_ADMIN=1
|
||||
- ADMIN_USERNAME=${ADMIN_USER}
|
||||
- ADMIN_PASSWORD=${ADMIN_PASS}
|
||||
network_mode: "service:tailscale" # [tl! focus]
|
||||
db:
|
||||
image: postgres:15
|
||||
container_name: miniflux-db
|
||||
environment:
|
||||
- POSTGRES_USER=${DB_USER}
|
||||
- POSTGRES_PASSWORD=${DB_PASS}
|
||||
volumes:
|
||||
- ./mf_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready", "-U", "${DB_USER}"]
|
||||
interval: 10s
|
||||
start_period: 30s
|
||||
```
|
||||
|
||||
I can bring it up with:
|
||||
```shell
|
||||
docker compose up -d # [tl! .cmd .nocopy:1,5]
|
||||
[+] Running 4/4
|
||||
✔ Network miniflux_default Created
|
||||
✔ Container miniflux-db Started
|
||||
✔ Container miniflux-tailscale Started
|
||||
✔ Container miniflux Created
|
||||
```
|
||||
|
||||
And I can hit it at `https://miniflux.tailnet-name.ts.net` from within my tailnet:
|
||||
|
||||
![miniflux](miniflux.png)
|
||||
|
||||
Nice, right? Now to just convert all of my other containerized apps that don't really need to be public. Fortunately that shouldn't take too long since I've got this nice, portable, repeatable Docker Compose setup I can use.
|
||||
|
||||
Maybe I'll write about something *other* than Tailscale soon. Stay tuned!
|
After Width: | Height: | Size: 55 KiB |
BIN
content/posts/tailscale-ssh-serve-funnel/cockpit.png
Normal file
After Width: | Height: | Size: 398 KiB |
BIN
content/posts/tailscale-ssh-serve-funnel/file_server.png
Normal file
After Width: | Height: | Size: 69 KiB |
336
content/posts/tailscale-ssh-serve-funnel/index.md
Normal file
|
@ -0,0 +1,336 @@
|
|||
---
|
||||
title: "Tailscale Feature Highlight: SSH, Serve, and Funnel"
|
||||
date: 2023-12-20
|
||||
# lastmod: 2023-12-18
|
||||
description: "Exploring some of my favorite Tailscale addon features: SSH, Serve, and Funnel."
|
||||
featured: false
|
||||
toc: true
|
||||
comment: true
|
||||
series: Tips # Projects, Code
|
||||
tags:
|
||||
- homelab
|
||||
- networking
|
||||
- tailscale
|
||||
- vpn
|
||||
---
|
||||
|
||||
I've spent the past two years in love with [Tailscale](https://tailscale.com/), which builds on the [secure and high-performance Wireguard VPN protocol](/cloud-based-wireguard-vpn-remote-homelab-access/) and makes it [really easy to configure and manage](/secure-networking-made-simple-with-tailscale/). Being able to easily (and securely) access remote devices as if they were on the same LAN is pretty awesome to begin with, but Tailscale is packed with an ever-expanding set of features that can really help to streamline your operations too. Here are three of my favorites.
|
||||
|
||||
### Tailscale SSH
|
||||
Tailscale already takes care of issuing, rotating, and otherwise managing the Wireguard keys used for securing communications between the systems in your tailnet. [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh) lets it do the same for your SSH keys as well. No more manually dropping public keys on systems you're setting up for remote access. No more scrambling to figure out how to get your private key onto your mobile device so you can SSH to a server. No more worrying about who has access to what. Tailscale can solve all those concerns for you - and it does it without impacting traditional SSH operations:
|
||||
|
||||
> *With Tailscale, you can already connect machines in your network, and encrypt communications end-to-end from one point to another—and this includes, for example, SSHing from your work laptop to your work desktop. Tailscale also knows your identity, since that’s how you connected to your tailnet. When you enable Tailscale SSH, Tailscale claims port 22 for the Tailscale IP address (that is, only for traffic coming from your tailnet) on the devices for which you have enabled Tailscale SSH. This routes SSH traffic for the device from the Tailscale network to an SSH server run by Tailscale, instead of your standard SSH server. With Tailscale SSH, based on the ACLs in your tailnet, you can allow devices to connect over SSH and rely on Tailscale for authentication instead of public key authentication.*
|
||||
|
||||
All you need to advertise Tailscale SSH on a system is to pass the appropriate flag in the `tailscale up` command:
|
||||
```shell
|
||||
sudo tailscale up --ssh # [tl! .cmd]
|
||||
```
|
||||
|
||||
To actually use the feature, though, you'll need to make sure that your Tailscale ACL permits access. The default "allow all" ACL does this:
|
||||
```json
|
||||
{
|
||||
"acls": [
|
||||
// Allow all connections.
|
||||
{ "action": "accept", "src": ["*"], "dst": ["*:*"] },
|
||||
],
|
||||
"ssh": [ // [tl! highlight:start]
|
||||
// Allow all users to SSH into their own devices in check mode.
|
||||
{
|
||||
"action": "check",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self"],
|
||||
"users": ["autogroup:nonroot", "root"]
|
||||
}
|
||||
] // [tl! highlight:end]
|
||||
}
|
||||
```
|
||||
|
||||
The `acls` block allows all nodes to talk to each other over all ports, and the `ssh` block will allow all users in the tailnet to SSH into systems they own (both as nonroot users as well as the `root` account) but only if they have reauthenticated to Tailscale within the last 12 hours (due to that `check` action).
|
||||
|
||||
Most of my tailnet nodes are tagged with a location (`internal`/`external`) instead of belonging to a single user, and the `check` action doesn't work when the source is a tagged system. So my SSH ACL looks a bit more like this:
|
||||
```json
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
// internal systems can SSH to internal and external systems
|
||||
"action": "accept",
|
||||
"users": ["tag:internal"],
|
||||
"ports": [
|
||||
"tag:internal:22",
|
||||
"tag:external:22"
|
||||
],
|
||||
},
|
||||
],
|
||||
"tagOwners": { // [tl! collapse:3]
|
||||
"tag:external": ["group:admins"],
|
||||
"tag:internal": ["group:admins"],
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
// users can SSH to their own systems and those tagged as internal and external
|
||||
"action": "check",
|
||||
"src": ["autogroup:members"],
|
||||
"dst": ["autogroup:self", "tag:internal", "tag:external"],
|
||||
"users": ["autogroup:nonroot", "root"],
|
||||
},
|
||||
{
|
||||
// internal systems can SSH to internal and external systems,
|
||||
// but external systems can't SSH at all
|
||||
"action": "accept",
|
||||
"src": ["tag:internal"],
|
||||
"dst": ["tag:internal", "tag:external"],
|
||||
"users": ["autogroup:nonroot"],
|
||||
},
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
This way, SSH connections originating from `internal` systems will be accepted, while those originating from untagged systems[^web1] will have the extra check for tailnet authentication. You might also note that this policy prevents connections from tagged systems as the `root` user, requiring instead that the user log in with their own account and then escalate as needed.
|
||||
|
||||
[^web1]: Or the Tailscale admin web console - as we'll soon see.
|
||||
|
||||
These ACLs can get [pretty granular](https://tailscale.com/kb/1018/acls), and I think it's pretty cool to be able to codify your SSH access rules in a centrally-managed[^vcs] policy instead of having to manually keep track of which keys are on which systems.
|
||||
|
||||
[^vcs]: And potentially [version-controlled](https://tailscale.com/kb/1204/gitops-acls).
|
||||
|
||||
Once SSH is enabled on a tailnet node and the ACL rules are in place, you can SSH from a Tailscale-protected system to another as easily as `ssh [hostname]` and you'll be connected right away - no worrying about keys or fumbling to enter credentials. I think this is doubly cool when implemented on systems running in The Cloud; Tailscale provides the connectivity so I don't need to open up port 22 to the world.
|
||||
|
||||
```shell
|
||||
ssh tsdemo # [tl! .cmd]
|
||||
Welcome to Ubuntu 22.04.3 LTS (GNU/Linux 6.5.11-6-pve x86_64) # [tl! .nocopy:start]
|
||||
|
||||
* Documentation: https://help.ubuntu.com
|
||||
* Management: https://landscape.canonical.com
|
||||
* Support: https://ubuntu.com/advantage
|
||||
Last login: Tue Dec 19 04:17:15 UTC 2023 from 100.73.92.61 on pts/3
|
||||
john@tsdemo:~$ # [tl! .nocopy:end]
|
||||
```
|
||||
|
||||
As a bonus, I can also open an SSH session from the Tailscale [admin console](https://login.tailscale.com/admin/machines)[^web2]:
|
||||
![web_ssh_1](web_ssh_1.png)
|
||||
|
||||
![web_ssh_2](web_ssh_2.png)
|
||||
|
||||
![web_ssh_3](web_ssh_3.png)
|
||||
|
||||
[^web2]: SSH connections originating from the admin portal are associated with that logon, so they will follow the `check` portion of the policy. The first attempt to connect will require reauthentication with Tailscale, and subsequent connections will auto-connect for the next 12 hours.
|
||||
|
||||
That even works from mobile devices, too!
|
||||
|
||||
### Tailscale Serve
|
||||
I've [mentioned in the past](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#reverse-proxy-setup) how impressed I was (and still am!) by the [Caddy webserver](https://caddyserver.com/) and how effortless it makes configuring a reverse proxy with automatic TLS. I've used it for a *lot* of my externally-facing projects.
|
||||
|
||||
Caddy is great, but it's not quite as easy to use for internal stuff - I'd need a public DNS record and inbound HTTP access in order for the ACME challenge to complete and a cert to be issued and installed, or I would have to manually create a certificate and load it in the Caddy config. That's probably not a great fit for wanting to proxy my [Proxmox host](/ditching-vsphere-for-proxmox/#on-the-host). And that is where the capabilities of [Tailscale Serve](https://tailscale.com/kb/1312/serve) really come in handy.
|
||||
|
||||
> *Tailscale Serve is a feature that allows you to route traffic from other devices on your Tailscale network (known as a tailnet) to a local service running on your device. You can think of this as sharing the service, such as a website, with the rest of your tailnet.*
|
||||
|
||||
{{% notice note "Prerequisites" %}}
|
||||
Tailscale Serve requires that the MagicDNS and HTTPS features be enabled on your Tailnet. You can learn how to turn those on [here](https://tailscale.com/kb/1153/enabling-https).
|
||||
{{% /notice %}}
|
||||
|
||||
The general syntax is:
|
||||
```shell
|
||||
tailscale serve <target>
|
||||
```
|
||||
> *`<target>` can be a file, directory, text, or most commonly the location to a service running on the local machine. The location to the location service can be expressed as a port number (e.g., `3000`), a partial URL (e.g., `localhost:3000), or a full URL including a path (e.g., `http://localhost:3000/foo`).*
|
||||
|
||||
The command also supports some useful flags:
|
||||
```shell
|
||||
--bg, --bg=false
|
||||
Run the command as a background process (default false)
|
||||
--http uint
|
||||
Expose an HTTP server at the specified port
|
||||
--https uint
|
||||
Expose an HTTPS server at the specified port (default mode)
|
||||
--set-path string
|
||||
Appends the specified path to the base URL for accessing the underlying service
|
||||
--tcp uint
|
||||
Expose a TCP forwarder to forward raw TCP packets at the specified port
|
||||
--tls-terminated-tcp uint
|
||||
Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port
|
||||
--yes, --yes=false
|
||||
Update without interactive prompts (default false)
|
||||
```
|
||||
|
||||
Tailscale Serve can be used for spawning a simple file server (like this one which shares the contents of the `/demo` directory):
|
||||
```shell
|
||||
sudo tailscale serve /demo # [tl! .cmd]
|
||||
Available within your tailnet: # [tl! .nocopy:5]
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net/
|
||||
|-- path /demo
|
||||
|
||||
Press Ctrl+C to exit.
|
||||
```
|
||||
|
||||
![file server](file_server.png)
|
||||
|
||||
Note that this server is running in the foreground, and that it's serving the site with an automatically-generated automatically-trusted [Let's Encrypt](https://letsencrypt.org/) certificate.
|
||||
|
||||
I can also use Tailscale Serve for proxying another web server, like [Cockpit](https://cockpit-project.org/), which runs on `http://localhost:9090`:
|
||||
```shell
|
||||
sudo tailscale serve --bg 9090 # [tl! .cmd]
|
||||
Available within your tailnet: # [tl! .nocopy:6]
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net/
|
||||
|-- proxy http://127.0.0.1:9090
|
||||
|
||||
Serve started and running in the background.
|
||||
To disable the proxy, run: tailscale serve --https=443 off
|
||||
```
|
||||
|
||||
![cockpit](cockpit.png)
|
||||
|
||||
This time, I included the `--bg` flag so that the server would run in the background, and I told it to proxy port `9090` instead of a file path.
|
||||
|
||||
But what if I want to proxy *another* service (like [netdata](https://github.com/netdata/netdata), which runs on `http://localhost:19999`) at the same time? I can either proxy it on another port, like `8443`:
|
||||
```shell
|
||||
sudo tailscale serve --bg --https 8443 19999 # [tl! .cmd]
|
||||
Available within your tailnet: # [tl! .nocopy:9]
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net/
|
||||
|-- proxy http://127.0.0.1:9090
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net:8443/
|
||||
|-- proxy http://127.0.0.1:19999
|
||||
|
||||
Serve started and running in the background.
|
||||
To disable the proxy, run: tailscale serve --https=8443 off
|
||||
```
|
||||
|
||||
Or serve it at a different path:
|
||||
```shell
|
||||
sudo tailscale serve --bg --set-path /netdata 19999 # [tl! .cmd]
|
||||
Available within your tailnet: # [tl! .nocopy:9]
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net/
|
||||
|-- proxy http://127.0.0.1:9090
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net/netdata
|
||||
|-- proxy http://127.0.0.1:19999
|
||||
|
||||
Serve started and running in the background.
|
||||
To disable the proxy, run: tailscale serve --https=443 off
|
||||
```
|
||||
|
||||
![netdata](netdata.png)
|
||||
|
||||
{{% notice note "Stubborn Apps" %}}
|
||||
Not all web apps adapt well to being served at a different path than they expect. It works fine for netadata, but did not work with Cockpit (at least not without digging deeper into the configuration to change the base URL). But hey, that's why we've got options!
|
||||
{{% /notice %}}
|
||||
|
||||
### Tailscale Funnel
|
||||
So Tailscale Serve works well for making web resources available inside of my tailnet... but what if I want to share them externally? [Tailscale Funnel](https://tailscale.com/kb/1223/funnel) has that use case covered, and it makes it easy to share things without having to manage DNS records, punch holes in firewalls, or configure certificates.
|
||||
|
||||
> *Tailscale Funnel is a feature that allows you to route traffic from the wider internet to a local service running on a machine in your Tailscale network (known as a tailnet). You can think of this as publicly sharing a local service, like a web app, for anyone to access—even if they don’t have Tailscale themselves.*
|
||||
|
||||
In addition to requiring that HTTPS is enabled, Funnel also requires that nodes have the `funnel` attribute applied through the ACL policy.
|
||||
|
||||
There's a default policy snippet to apply this:
|
||||
```json
|
||||
"nodeAttrs": [
|
||||
{
|
||||
"target": ["autogroup:member"],
|
||||
"attr": ["funnel"],
|
||||
},
|
||||
],
|
||||
```
|
||||
|
||||
But I use a tag to manage funnel privileges instead so my configuration will look something like this:
|
||||
```json
|
||||
{
|
||||
"acls": [ // [tl! collapse:start]
|
||||
{
|
||||
// internal systems can SSH to internal and external systems
|
||||
"action": "accept",
|
||||
"users": ["tag:internal"],
|
||||
"ports": [
|
||||
"tag:internal:22",
|
||||
"tag:external:22"
|
||||
],
|
||||
},
|
||||
], // [tl! collapse:end]
|
||||
"tagOwners": {
|
||||
"tag:external": ["group:admins"],
|
||||
"tag:funnel": ["group:admins"], // [tl! focus]
|
||||
"tag:internal": ["group:admins"],
|
||||
},
|
||||
"ssh": [ // [tl! collapse:start]
|
||||
{
|
||||
// users can SSH to their own systems and those tagged as internal and external
|
||||
"action": "check",
|
||||
"src": ["autogroup:members"],
|
||||
"dst": ["autogroup:self", "tag:internal", "tag:external"],
|
||||
"users": ["autogroup:nonroot", "root"],
|
||||
},
|
||||
{
|
||||
// internal systems can SSH to internal and external systems,
|
||||
// but external systems can't SSH at all
|
||||
"action": "accept",
|
||||
"src": ["tag:internal"],
|
||||
"dst": ["tag:internal", "tag:external"],
|
||||
"users": ["autogroup:nonroot"],
|
||||
},
|
||||
], // [tl! collapse:end]
|
||||
"nodeAttrs": [ // [tl! focus:start]
|
||||
{
|
||||
// devices with the funnel tag can enable Tailscale Funnel
|
||||
"target": ["tag:funnel"],
|
||||
"attr": ["funnel"],
|
||||
},
|
||||
] // [tl! focus:end]
|
||||
}
|
||||
```
|
||||
|
||||
Now only nodes with the `funnel` tag will be able to enable Funnel.
|
||||
|
||||
From there, the process to activate Tailscale Funnel is basically identical to that of Tailscale Serve - you just use `tailscale funnel` instead of `tailscale serve`.
|
||||
|
||||
{{% notice warning "Funnel Ports, Not Resources" %}}
|
||||
A Funnel configuration is applied to the **port** that Tailscale Serve uses to make a resource available, not the resource itself. In the example above, I have both Cockpit and netdata being served over port `443`. If I try to use `sudo tailscale funnel --set-path /netdata 19999` to Funnel just the netdata instance, that will actually Funnel *both* resources instead of just the one.
|
||||
{{% /notice %}}
|
||||
|
||||
If I want to make the netdata instance available publicly while keeping Cockpit internal-only, I'll need to serve netdata on a different port. Funnel [only supports](https://tailscale.com/kb/1223/funnel#limitations) ports `443`, `8443`, and `10000`, so I'll use `8443`:
|
||||
```shell
|
||||
sudo tailscale funnel --bg --https 8443 --set-path /netdata 19999 # [tl! .cmd]
|
||||
Available on the internet: # [tl! .nocopy:6]
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net:8443/netdata
|
||||
|-- proxy http://127.0.0.1:19999
|
||||
|
||||
Funnel started and running in the background.
|
||||
To disable the proxy, run: tailscale funnel --https=8443 off
|
||||
```
|
||||
|
||||
It will take 10 or so minutes for the public DNS record to get created, but after that anyone on the internet (not just within my tailnet!) would be able to access the resource I've shared.
|
||||
|
||||
I can use `tailscale serve status` to confirm that both Cockpit and netdata are served internally on port `443`, but only netdata is published externally on port `8443`:
|
||||
```shell
|
||||
sudo tailscale serve status # [tl! .cmd]
|
||||
# [tl! .nocopy:9]
|
||||
# Funnel on:
|
||||
# - https://tsdemo.tailnet-name.ts.net:8443
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net (tailnet only)
|
||||
|-- / proxy http://127.0.0.1:9090
|
||||
|-- /netdata proxy http://127.0.0.1:19999
|
||||
|
||||
https://tsdemo.tailnet-name.ts.net:8443 (Funnel on)
|
||||
|-- /netdata proxy http://127.0.0.1:19999
|
||||
```
|
||||
|
||||
Pretty cool, right?
|
||||
|
||||
### Other Features
|
||||
This post has covered some of my favorite (and most-used) Tailscale features, but there are *plenty* of other cool tricks up Tailscale's sleeves. These are some others that are definitely worth checking out:
|
||||
- [Mullvad Exit Nodes](https://tailscale.com/kb/1258/mullvad-exit-nodes)
|
||||
- [Taildrop](https://tailscale.com/kb/1106/taildrop)
|
||||
- [golink](https://tailscale.com/blog/golink) (also covered [here](/tailscale-golink-private-shortlinks-tailnet/))
|
||||
|
||||
And if you're particularly nerdy like me, these will probably also grab your interest:
|
||||
- [GitOps for Tailscale ACLs](https://tailscale.com/kb/1204/gitops-acls)
|
||||
- [Manage Tailscale resources using Terraform](https://tailscale.com/kb/1210/terraform-provider)
|
||||
|
||||
Need more inspiration? Tailscale has a pretty thorough [collection of example uses cases](https://tailscale.com/kb/solutions) to help get the sparks flying.
|
||||
|
||||
I'm sure I'll have [more Tailscale-centric posts](/tags/tailscale/) to share in the future, too. Stay tuned!
|
BIN
content/posts/tailscale-ssh-serve-funnel/netdata.png
Normal file
After Width: | Height: | Size: 183 KiB |
BIN
content/posts/tailscale-ssh-serve-funnel/web_ssh_1.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
content/posts/tailscale-ssh-serve-funnel/web_ssh_2.png
Normal file
After Width: | Height: | Size: 40 KiB |
BIN
content/posts/tailscale-ssh-serve-funnel/web_ssh_3.png
Normal file
After Width: | Height: | Size: 71 KiB |
|
@ -14,7 +14,7 @@ featureImage: "esxi8.png" # Sets featured image on blog post.
|
|||
# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Tips # Projects, Scripts, vRA8
|
||||
series: Tips # Projects, Code, vRA8
|
||||
tags:
|
||||
- vmware
|
||||
- homelab
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
series: Scripts
|
||||
series: Code
|
||||
date: "2021-04-29T08:34:30Z"
|
||||
usePageBundles: true
|
||||
tags:
|
||||
|
|
|
@ -14,7 +14,7 @@ featureImage: "vdt.png" # Sets featured image on blog post.
|
|||
thumbnail: "pulse2.png" # Sets thumbnail image appearing inside card on homepage.
|
||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
||||
series: Tips # Projects, Scripts, vRA8
|
||||
series: Tips # Projects, Code, vRA8
|
||||
tags:
|
||||
- vmware
|
||||
- vsphere
|
||||
|
|
|
@ -5,7 +5,6 @@ date: 2023-09-13
|
|||
timeless: true
|
||||
draft: false
|
||||
description: "This blog has migrated from virtuallypotato.com to runtimeterror.dev."
|
||||
featured: true
|
||||
toc: false
|
||||
comment: true
|
||||
tags:
|
||||
|
|
|
@ -19,18 +19,37 @@ Incoming messages are routed through a pool of servers so that your conversation
|
|||
The app is also packed with other features like disappearing messages, encrypted file transfers, encrypted voice messages, encrypted audio and video calls, decentralized private groups, and a cool incognito mode which connects new conversations to a randomly-generated profile instead of your primary one. There's even a [CLI client](https://github.com/simplex-chat/simplex-chat/blob/stable/docs/CLI.md)!
|
||||
|
||||
## Servers
|
||||
[![](https://status.vpota.to/api/badge/11/status)](https://status.vpota.to/status/simplex)
|
||||
|
||||
You can easily host your own [simplexmq server](https://github.com/simplex-chat/simplexmq) for handling your inbound message queue, and I've done just that; in fact, I've deployed three! And, as one of my closest internet friends, *you're welcome to use them as well.*
|
||||
|
||||
Just add these in the SimpleX app at **Settings > Network & servers > SMP servers > + Add server...**. Enable the option to use them for new connections, and they'll be added to the pool used for incoming messages in new conversations. If you want to use them immediately for existing conversations, go into each conversation's options menu and use the **Switch receiving address** option. You can also *disable* the option to use the default servers for new conversations if you only want messages to be routed through specific servers, but that does increase the likelikhood of concurrent conversations being routed the same way. More servers, more path options, less metadata in any one place.
|
||||
|
||||
---
|
||||
![](/images/smp-vpota-to.png)
|
||||
|
||||
`smp://kYx5LmVD9FMM8hJN4BQqL4WmeUNZn8ipXsX2UkBoiHE=@smp.vpota.to`
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| [![](https://status.vpota.to/api/badge/6/uptime)](https://status.vpota.to/status/simplex) | [[details](https://jbowdre.url.lol/smp_status)] |
|
||||
|
||||
---
|
||||
|
||||
![](/images/smp1-vpota-to.png)
|
||||
|
||||
`smp://TbUrGydawdVKID0Lvix14UkaN-WarFgqXx4kaEG8Trw=@smp1.vpota.to`
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| [![](https://status.vpota.to/api/badge/4/uptime)](https://status.vpota.to/status/simplex) | [[details](https://jbowdre.url.lol/smp1_status)] |
|
||||
|
||||
---
|
||||
|
||||
![](/images/smp2-vpota-to.png)
|
||||
|
||||
`smp://tNfQisxTQ9MhKpFDTbx9RnjgWigtxF1a26jroy5-rR4=@smp2.vpota.to`
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| [![](https://status.vpota.to/api/badge/5/uptime)](https://status.vpota.to/status/simplex) | [[details](https://jbowdre.url.lol/smp2_status)] |
|
||||
|
|
|
@ -17,10 +17,7 @@
|
|||
<link>{{ .Permalink }}</link>
|
||||
<description>Recent content {{ if ne .Title .Site.Title }}{{ with .Title }}in {{.}} {{ end }}{{ end }}on {{ .Site.Title }}</description>
|
||||
<generator>Hugo -- gohugo.io</generator>{{ with .Site.LanguageCode }}
|
||||
<language>{{.}}</language>{{end}}{{ with .Site.Author.email }}
|
||||
<managingEditor>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</managingEditor>{{end}}{{ with .Site.Author.email }}
|
||||
<webMaster>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</webMaster>{{end}}{{ with .Site.Copyright }}
|
||||
<copyright>{{.}}</copyright>{{end}}{{ if not .Date.IsZero }}
|
||||
<language>{{.}}</language>{{end}}{{ if not .Date.IsZero }}
|
||||
<lastBuildDate>{{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }}</lastBuildDate>{{ end }}
|
||||
{{- with .OutputFormats.Get "RSS" -}}
|
||||
{{ printf "<atom:link href=%q rel=\"self\" type=%q />" .Permalink .MediaType | safeHTML }}
|
||||
|
@ -30,24 +27,10 @@
|
|||
<title>{{ .Title }}</title>
|
||||
<link>{{ .Permalink }}</link>
|
||||
<pubDate>{{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }}</pubDate>
|
||||
{{ with .Site.Author.email }}<author>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</author>{{end}}
|
||||
{{ with .Site.Params.Author.name }}<dc:creator>{{.}}</dc:creator>{{end}}
|
||||
<guid>{{ .Permalink }}</guid>
|
||||
<description>
|
||||
{{ if ne site.Params.rss_summary false }}
|
||||
{{ if .Params.summary }}
|
||||
{{ .Params.summary | html }}
|
||||
{{ else if .Params.abstract }}
|
||||
{{ .Params.abstract | html }}
|
||||
{{ else if .Summary }}
|
||||
{{ .Summary | html }}
|
||||
{{ end }}
|
||||
{{ if site.Params.rss_summary_read_more_link }}
|
||||
{{ $trans := i18n "read_more" -}}
|
||||
{{ printf "<p><a href=\"%s\">%s</a></p>" .RelPermalink $trans | html }}
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
{{ .Content | html }}
|
||||
{{ end }}
|
||||
{{ .Content | html }}
|
||||
</description>
|
||||
</item>
|
||||
{{ end }}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
{{ with .Site.Params.about }}
|
||||
<div class="aside__about">
|
||||
{{ with .logo }}<img class="about__logo" src="{{ . | absURL }}" alt="Logo">{{ end }}
|
||||
<h1 class="about__title">{{ .title }}</h1>
|
||||
<h1 class="about__title">{{ .title }} <a target="_blank" href="/feed.xml" aria-label="RSS"><i class="fa-solid fa-square-rss"></i></a> </h1>
|
||||
{{ partial "tagline.html" . }}
|
||||
<br>
|
||||
<a href="about"><i class="fa-regular fa-user"></i></a> <a href="/about">{{ site.Params.Author }}</a>
|
||||
<a href="about"><i class="fa-regular fa-user"></i></a> <a href="/about">{{ site.Params.Author.username }}</a>
|
||||
</div>
|
||||
{{ end }}
|
||||
<ul class="aside__social-links">
|
||||
|
|
|
@ -6,12 +6,13 @@
|
|||
{{ if .IsHome }}
|
||||
<h1>{{ site.Params.indexTitle | markdownify }}</h1>
|
||||
{{ else }}
|
||||
<h1>{{ .Title | markdownify }}</h1>
|
||||
<h1>{{ .Title | markdownify }}{{ if eq .Kind "term" }} <a target="_blank" href="feed.xml" aria-label="Category RSS"><i class="fa-solid fa-square-rss"></i></a> </h1>{{ end }}
|
||||
{{ end }}
|
||||
{{ .Content }}
|
||||
</header>
|
||||
|
||||
{{ range $pages }}
|
||||
{{- if ne .Title "Tags"}}
|
||||
{{- range (.Paginate $pages).Pages }}
|
||||
{{- $postDate := .Date.Format "2006-01-02" }}
|
||||
{{- $updateDate := .Lastmod.Format "2006-01-02" }}
|
||||
<article class="post">
|
||||
|
@ -28,3 +29,9 @@
|
|||
<br>
|
||||
</article>
|
||||
{{ end }}
|
||||
{{- template "_internal/pagination.html" . }}
|
||||
{{- else }}
|
||||
{{- range .Pages.ByTitle }}
|
||||
[<a href="{{ .Permalink }}">{{ .Title | markdownify }}</a>]
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -59,3 +59,6 @@
|
|||
{{- end }}
|
||||
</ul>
|
||||
{{- end }}
|
||||
<hr>
|
||||
<h3>status.lol</h3>
|
||||
<script src="https://status.lol/jbowdre.js?time&link&fluent&pretty"></script>
|
||||
|
|
|
@ -5,12 +5,27 @@
|
|||
<meta charset="UTF-8"/>
|
||||
{{ if or (.Site.Params.noindex) (.Params.noindex) }}<meta name="robots" content="noindex" /> {{ end }}
|
||||
|
||||
<!-- RSS -->
|
||||
{{ with .OutputFormats.Get "rss" -}}
|
||||
{{ printf `<link rel=%q type=%q href=%q title=%q>` .Rel .MediaType.Type .Permalink site.Title | safeHTML }}
|
||||
{{ end }}
|
||||
|
||||
<!-- verification links -->
|
||||
{{ if .IsHome }}
|
||||
{{ range $item := .Site.Params.verifyLinks }}
|
||||
<link rel="me" title="{{ $item.title }}" href="{{ $item.url }}">
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
{{ partialCached "favicon" . }}
|
||||
{{ partial "opengraph" . }}
|
||||
|
||||
<!-- FontAwesome <https://fontawesome.com/> -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.1.2/css/all.min.css" integrity="sha512-1sCRPdkRXhBV2PBLUdRb4tMg1w2YPf37qatUFeS7zlBy7jJI8Lf4VHwWfZZfpXtYSLy85pkm9GaYVYMfw5BC1A==" crossorigin="anonymous" />
|
||||
|
||||
<!-- ForkAwesome <https://forkaweso.me/> -->
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/fork-awesome@1.2.0/css/fork-awesome.min.css" integrity="sha256-XoaMnoYC5TH6/+ihMEnospgm0J1PM/nioxbOUdnM8HY=" crossorigin="anonymous">
|
||||
|
||||
<!-- Academicons <https://jpswalsh.github.io/academicons/> -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/academicons/1.9.1/css/academicons.min.css" integrity="sha512-b1ASx0WHgVFL5ZQhTgiPWX+68KjS38Jk87jg7pe+qC7q9YkEtFq0z7xCglv7qGIs/68d3mAp+StfC8WKC5SSAg==" crossorigin="anonymous" />
|
||||
|
||||
|
@ -19,8 +34,10 @@
|
|||
<link rel="stylesheet" href="{{ "css/risotto.css" | absURL }}">
|
||||
<link rel="stylesheet" href="{{ "css/custom.css" | absURL }}">
|
||||
|
||||
{{ if .Site.Params.analytics }}
|
||||
<!-- cabin analytics -->
|
||||
<script async defer src="https://scripts.withcabin.com/hello.js"></script>
|
||||
{{ end }}
|
||||
|
||||
<!-- syntax highlighting -->
|
||||
{{ if (findRE "<pre" .Content 1) }}
|
||||
|
|
|
@ -156,4 +156,48 @@ body.dark .notice {
|
|||
position: relative;
|
||||
}
|
||||
|
||||
/* pagination overrides */
|
||||
ul.pagination li::marker {
|
||||
content:'';
|
||||
}
|
||||
|
||||
ul.pagination li {
|
||||
margin: 0 0.25rem;
|
||||
}
|
||||
|
||||
.pagination {
|
||||
display:flex;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
/* statuslol overrides */
|
||||
.statuslol {
|
||||
background: var(--inner-bg) !important;
|
||||
flex-direction:column
|
||||
}
|
||||
|
||||
.statuslol_emoji_container {
|
||||
font-size: 1.5em !important;
|
||||
}
|
||||
|
||||
.statuslol_content p,
|
||||
.statuslol_content li,
|
||||
.statuslol_content em,
|
||||
.statuslol_content strong {
|
||||
overflow-wrap: inherit;
|
||||
font-size: 0.8em;
|
||||
line-height:normal;
|
||||
margin: 0.8em auto;
|
||||
color: var(--fg);
|
||||
}
|
||||
|
||||
.statuslol_time a:link,
|
||||
.statuslol_time a:visited {
|
||||
color: var(--link) !important;
|
||||
}
|
||||
|
||||
.statuslol_time a:hover,
|
||||
.statuslol_time a:active,
|
||||
.statuslol_time a.active {
|
||||
color: var(--hover) !important;
|
||||
}
|
BIN
static/images/broken-computer.png
Normal file
After Width: | Height: | Size: 7.3 KiB |
|
@ -11,7 +11,7 @@ module.exports = {
|
|||
|
||||
// Which theme you want to use. You can find all of the themes at
|
||||
// https://torchlight.dev/docs/themes.
|
||||
theme: 'one-dark-pro',
|
||||
theme: 'synthwave-84',
|
||||
|
||||
// The Host of the API.
|
||||
host: 'https://api.torchlight.dev',
|
||||
|
@ -30,11 +30,11 @@ module.exports = {
|
|||
|
||||
// If there are any diff indicators for a line, put them
|
||||
// in place of the line number to save horizontal space.
|
||||
diffIndicatorsInPlaceOfLineNumbers: true
|
||||
diffIndicatorsInPlaceOfLineNumbers: true,
|
||||
|
||||
// When lines are collapsed, this is the text that will
|
||||
// be shown to indicate that they can be expanded.
|
||||
// summaryCollapsedIndicator: '...',
|
||||
summaryCollapsedIndicator: '{ ... }',
|
||||
},
|
||||
|
||||
// Options for the highlight command.
|
||||
|
|