diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f9be552 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.hugo_build.lock + diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..3e8ef45 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "themes/risotto"] + path = themes/risotto + url = https://github.com/joeroe/risotto.git +[submodule "themes/hugo-notice"] + path = themes/hugo-notice + url = https://github.com/martignoni/hugo-notice.git +[submodule "themes/hugo-cloak-email"] + path = themes/hugo-cloak-email + url = https://github.com/martignoni/hugo-cloak-email.git diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4527efb --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/config.toml b/config.toml deleted file mode 100644 index 1d7c819..0000000 --- a/config.toml +++ /dev/null @@ -1,3 +0,0 @@ -baseURL = 'http://example.org/' -languageCode = 'en-us' -title = 'My New Hugo Site' diff --git a/config/_default/config.toml b/config/_default/config.toml new file mode 100644 index 0000000..00f306b --- /dev/null +++ b/config/_default/config.toml @@ -0,0 +1,26 @@ +baseURL = "https://runtimeterror.dev" +theme = [ "hugo-cloak-email", "hugo-notice", "risotto"] +title = "runtimeterror" +author = "ops" +copyright = "© 2023 [runtimeterror](https://runtimeterror.dev)" +paginate = 10 +languageCode = "en" +DefaultContentLanguage = "en" +enableInlineShortcodes = true + +# Automatically add content sections to main menu +sectionPagesMenu = "main" + +[outputs] + home = ["HTML", "RSS", "JSON"] + +[permalinks] + posts = ":filename" + +[services] + + [services.instagram] + disableInlineCSS = true + + [services.twitter] + disableInlineCSS = true diff --git a/config/_default/configTaxo.toml b/config/_default/configTaxo.toml new file mode 100644 index 0000000..9220fcf --- /dev/null +++ b/config/_default/configTaxo.toml @@ -0,0 +1,27 @@ +timeout = 30000 +enableInlineShortcodes = true + +[taxonomies] +category = "categories" +tag = "tags" +series = "series" + +[privacy] + + [privacy.vimeo] + disabled = false + simple = true + + [privacy.twitter] + disabled = false + enableDNT = true + simple = true + disableInlineCSS = true + + [privacy.instagram] + disabled = false + simple = true + + [privacy.youtube] + disabled = false + privacyEnhanced = true diff --git a/config/_default/markup.toml b/config/_default/markup.toml new file mode 100644 index 0000000..6042a49 --- /dev/null +++ b/config/_default/markup.toml @@ -0,0 +1,11 @@ +# For hugo >= 0.60.0, enable inline HTML +[goldmark.renderer] +unsafe = true + + +# Table of contents +# Add toc = true to content front matter to enable +[tableOfContents] + startLevel = 2 + endLevel = 3 + ordered = true \ No newline at end of file diff --git a/config/_default/menu.toml b/config/_default/menu.toml new file mode 100644 index 0000000..749f786 --- /dev/null +++ b/config/_default/menu.toml @@ -0,0 +1,5 @@ +[[main]] + identifier = "about" + name = "About" + url = "/about/" + weight = 10 \ No newline at end of file diff --git a/config/_default/params.toml b/config/_default/params.toml new file mode 100644 index 0000000..e4b5e7d --- /dev/null +++ b/config/_default/params.toml @@ -0,0 +1,27 @@ +noindex = false + +usePageBundles = true + +[theme] +palette = "runtimeterror" + +# Sidebar: about/bio +[about] +title = "runtimeterror" +description = "Better living through less-bad code." +logo = "images/broken-computer.svg" + +# Sidebar: social links +# Available icon sets: +# * FontAwesome 6 ('fa-brands', 'fa-normal', or 'fa-solid' for brands) +# * Academicons ('ai ai-') + +[[socialLinks]] +icon = "fa-brands fa-github" +title = "GitHub" +url = "https://github.com/jbowdre" + +[[socialLinks]] +icon = "fa-solid fa-envelope" +title = "Email" +url = "mailto:ops@runtimeterror.dev" diff --git a/content/_index.md b/content/_index.md new file mode 100644 index 0000000..ebcbee7 --- /dev/null +++ b/content/_index.md @@ -0,0 +1,3 @@ ++++ +author = "ops" ++++ diff --git a/content/about.md b/content/about.md new file mode 100644 index 0000000..b31bf1d --- /dev/null +++ b/content/about.md @@ -0,0 +1,25 @@ ++++ +title = "About" +description = "Hugo, the world's fastest framework for building websites" +date = "2019-02-28" +aliases = ["about-us", "about-hugo", "contact"] +author = "Hugo Authors" ++++ + +Written in Go, Hugo is an open source static site generator available under the [Apache Licence 2.0.](https://github.com/gohugoio/hugo/blob/master/LICENSE) Hugo supports TOML, YAML and JSON data file types, Markdown and HTML content files and uses shortcodes to add rich content. Other notable features are taxonomies, multilingual mode, image processing, custom output formats, HTML/CSS/JS minification and support for Sass SCSS workflows. + +Hugo makes use of a variety of open source projects including: + +* https://github.com/yuin/goldmark +* https://github.com/alecthomas/chroma +* https://github.com/muesli/smartcrop +* https://github.com/spf13/cobra +* https://github.com/spf13/viper + +Hugo is ideal for blogs, corporate websites, creative portfolios, online magazines, single page applications or even a website with thousands of pages. + +Hugo is for people who want to hand code their own website without worrying about setting up complicated runtimes, dependencies and databases. + +Websites built with Hugo are extremely fast, secure and can be deployed anywhere including, AWS, GitHub Pages, Heroku, Netlify and any other hosting provider. + +Learn more and contribute on [GitHub](https://github.com/gohugoio). diff --git a/content/archives.md b/content/archives.md new file mode 100644 index 0000000..db16eed --- /dev/null +++ b/content/archives.md @@ -0,0 +1,5 @@ +--- +date: 2019-05-28 +type: section +layout: "archives" +--- diff --git a/content/homepage/about.md b/content/homepage/about.md new file mode 100644 index 0000000..b5d6981 --- /dev/null +++ b/content/homepage/about.md @@ -0,0 +1,7 @@ +--- +title: 'Our Difference' +button: 'About us' +weight: 2 +--- + +Lorem ipsum dolor sit amet, et essent mediocritatem quo, choro volumus oporteat an mei. Ipsum dolor sit amet, et essent mediocritatem quo. diff --git a/content/homepage/index.md b/content/homepage/index.md new file mode 100644 index 0000000..ca03031 --- /dev/null +++ b/content/homepage/index.md @@ -0,0 +1,3 @@ +--- +headless: true +--- diff --git a/content/homepage/work.md b/content/homepage/work.md new file mode 100644 index 0000000..f99bc99 --- /dev/null +++ b/content/homepage/work.md @@ -0,0 +1,7 @@ +--- +title: 'We Help Business Grow' +button: 'Our Work' +weight: 1 +--- + +Lorem ipsum dolor sit amet, et essent mediocritatem quo, choro volumus oporteat an mei. Numquam dolores mel eu, mea docendi omittantur et, mea ea duis erat. Elit melius cu ius. Per ex novum tantas putant, ei his nullam aliquam apeirian. Aeterno quaestio constituto sea an, no eum intellegat assueverit. diff --git a/content/post/3d-modeling-and-printing-on-chrome-os/2g57odtq2.jpeg b/content/post/3d-modeling-and-printing-on-chrome-os/2g57odtq2.jpeg new file mode 100644 index 0000000..6ba9817 Binary files /dev/null and b/content/post/3d-modeling-and-printing-on-chrome-os/2g57odtq2.jpeg differ diff --git a/content/post/3d-modeling-and-printing-on-chrome-os/LHax6lAwh.png b/content/post/3d-modeling-and-printing-on-chrome-os/LHax6lAwh.png new file mode 100644 index 0000000..a4b1d82 Binary files /dev/null and b/content/post/3d-modeling-and-printing-on-chrome-os/LHax6lAwh.png differ diff --git a/content/post/3d-modeling-and-printing-on-chrome-os/VTISYOKHO.png b/content/post/3d-modeling-and-printing-on-chrome-os/VTISYOKHO.png new file mode 100644 index 0000000..072e04a Binary files /dev/null and b/content/post/3d-modeling-and-printing-on-chrome-os/VTISYOKHO.png differ diff --git a/content/post/3d-modeling-and-printing-on-chrome-os/f8nRJcyI6.png b/content/post/3d-modeling-and-printing-on-chrome-os/f8nRJcyI6.png new file mode 100644 index 0000000..9b1e6c5 Binary files /dev/null and b/content/post/3d-modeling-and-printing-on-chrome-os/f8nRJcyI6.png differ diff --git a/content/post/3d-modeling-and-printing-on-chrome-os/index.md b/content/post/3d-modeling-and-printing-on-chrome-os/index.md new file mode 100644 index 0000000..f0f4b76 --- /dev/null +++ b/content/post/3d-modeling-and-printing-on-chrome-os/index.md @@ -0,0 +1,96 @@ +--- +date: "2020-09-14T08:34:30Z" +thumbnail: qDTXt1jp3.png +featureImage: qDTXt1jp3.png +usePageBundles: true +tags: +- linux +- chromeos +- crostini +- 3dprinting +title: 3D Modeling and Printing on Chrome OS +--- + +I've got an Ender 3 Pro 3D printer, a Raspberry Pi 4, and a Pixel Slate. I can't interface directly with the printer over USB from the Slate (plus having to be physically connected to things is like so lame) so I installed [Octoprint on the Raspberry Pi](https://github.com/guysoft/OctoPi) and connected that to the printer's USB interface. This gave me a pretty web interface for controlling the printer - but it's only accessible over the local network. I also installed [The Spaghetti Detective](https://www.thespaghettidetective.com/) to allow secure remote control of the printer, with the added bonus of using AI magic and a cheap camera to detect and abort failing prints. + +That's a pretty sweet setup, but I still needed a way to convert STL 3D models into GCODE files which the printer can actually understand. And what if I want to create my own designs? + +Enter "Crostini," Chrome OS's [Linux (Beta) feature](https://chromium.googlesource.com/chromiumos/docs/+/master/containers_and_vms.md). It consists of a hardened Linux VM named `termina` which runs (by default) a Debian Buster LXD container named `penguin` (though you can spin up just about any container for which you can find an [image](https://us.images.linuxcontainers.org/)) and some fancy plumbing to let Chrome OS and Linux interact in specific clearly-defined ways. It's a brilliant balance between offering the flexibility of Linux while preserving Chrome OS's industry-leading security posture. + + +![Neofetch in the Crostini terminal](lhTnVwCO3.png) + +There are plenty of great guides (like [this one](https://www.computerworld.com/article/3314739/linux-apps-on-chrome-os-an-easy-to-follow-guide.html)) on how to get started with Linux on Chrome OS so I won't rehash those steps here. + +One additional step you will probably want to take is make sure that your Chromebook is configured to enable hyperthreading, as it may have [hyperthreading disabled by default](https://support.google.com/chromebook/answer/9340236). Just plug `chrome://flags/#scheduler-configuration` into Chrome's address bar, set it to `Enables Hyper-Threading on relevant CPUs`, and then click the button to restart your Chromebook. You'll thank me later. +![Enabling hyperthreading](LHax6lAwh.png) + +### The Software +I settled on using [FreeCAD](https://www.freecadweb.org/) for parametric modeling and [Ultimaker Cura](https://ultimaker.com/software/ultimaker-cura) for my GCODE slicer, but unfortunately getting them working cleanly wasn't entirely straightforward. + +#### FreeCAD +Installing FreeCAD is as easy as: +```shell +$ sudo apt update +$ sudo apt install freecad +``` +But launching `/usr/bin/freecad` caused me some weird graphical defects which rendered the application unusable. I found that I needed to pass the `LIBGL_DRI3_DISABLE=1` environment variable to eliminate these glitches: +```shell +$ env 'LIBGL_DRI3_DISABLE=1' /usr/bin/freecad & +``` +To avoid having to type that every time I wished to launch the app, I inserted this line at the bottom of my `~/.bashrc` file: +```shell +alias freecad="env 'LIBGL_DRI3_DISABLE=1' /usr/bin/freecad &" +``` +To be able to start FreeCAD from the Chrome OS launcher with that environment variable intact, edit it into the `Exec` line of the `/usr/share/applications/freecad.desktop` file: +```shell +$ sudo vi /usr/share/applications/freecad.desktop +[Desktop Entry] +Version=1.0 +Name=FreeCAD +Name[de]=FreeCAD +Comment=Feature based Parametric Modeler +Comment[de]=Feature-basierter parametrischer Modellierer +GenericName=CAD Application +GenericName[de]=CAD-Anwendung +Exec=env LIBGL_DRI3_DISABLE=1 /usr/bin/freecad %F +Path=/usr/lib/freecad +Terminal=false +Type=Application +Icon=freecad +Categories=Graphics;Science;Engineering +StartupNotify=true +GenericName[de_DE]=Feature-basierter parametrischer Modellierer +Comment[de_DE]=Feature-basierter parametrischer Modellierer +MimeType=application/x-extension-fcstd +``` +That's it! Get on with your 3D-modeling bad self. +![FreeCAD](qDTXt1jp3.png) +Now that you've got a model, be sure to [export it as an STL mesh](https://wiki.freecadweb.org/Export_to_STL_or_OBJ) so you can import it into your slicer. + +#### Ultimaker Cura +Cura isn't available from the default repos so you'll need to download the AppImage from https://github.com/Ultimaker/Cura/releases/tag/4.7.1. You can do this in Chrome and then use the built-in File app to move the file into your 'My Files > Linux Files' directory. Feel free to put it in a subfolder if you want to keep things organized - I stash all my AppImages in `~/Applications/`. + +To be able to actually execute the AppImage you'll need to adjust the permissions with 'chmod +x': +```shell +$ chmod +x ~/Applications/Ultimaker_Cura-4.7.1.AppImage +``` +You can then start up the app by calling the file directly: +```shell +$ ~/Applications/Ultimaker_Cura-4.7.1.AppImage & +``` +AppImages don't automatically appear in the Chrome OS launcher so you'll need to create its `.desktop` file. You can do this manually if you want, but I found it a lot easier to leverage `menulibre`: +```shell +$ sudo apt update && sudo apt install menulibre +$ menulibre +``` +Just plug in the relevant details (you can grab the appropriate icon [here](https://github.com/Ultimaker/Cura/blob/master/icons/cura-128.png)), hit the filing cabinet Save icon, and you should then be able to search for Cura from the Chrome OS launcher. +![Using menulibre to create the launcher shortcut](VTISYOKHO.png) + +![Ultimaker Cura](f8nRJcyI6.png) + +From there, just import the STL mesh, configure the appropriate settings, slice, and save the resulting GCODE. You can then just upload the GCODE straight to The Spaghetti Detective and kick off the print. + +![Successful print, designed and sliced on Chrome OS!](2g57odtq2.jpeg) + +Nice! \ No newline at end of file diff --git a/content/post/3d-modeling-and-printing-on-chrome-os/lhTnVwCO3.png b/content/post/3d-modeling-and-printing-on-chrome-os/lhTnVwCO3.png new file mode 100644 index 0000000..721407f Binary files /dev/null and b/content/post/3d-modeling-and-printing-on-chrome-os/lhTnVwCO3.png differ diff --git a/content/post/3d-modeling-and-printing-on-chrome-os/qDTXt1jp3.png b/content/post/3d-modeling-and-printing-on-chrome-os/qDTXt1jp3.png new file mode 100644 index 0000000..e90a324 Binary files /dev/null and b/content/post/3d-modeling-and-printing-on-chrome-os/qDTXt1jp3.png differ diff --git a/content/post/_index.md b/content/post/_index.md new file mode 100644 index 0000000..8a084d9 --- /dev/null +++ b/content/post/_index.md @@ -0,0 +1,6 @@ ++++ +aliases = ["posts", "articles", "blog", "showcase", "docs"] +title = "Posts" +author = "Hugo Authors" +tags = ["index"] ++++ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/1LDP5zxCU.gif b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/1LDP5zxCU.gif new file mode 100644 index 0000000..78cdf79 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/1LDP5zxCU.gif differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/630ix7uVw.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/630ix7uVw.png new file mode 100644 index 0000000..48c22eb Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/630ix7uVw.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/EBkQTGmNb.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/EBkQTGmNb.png new file mode 100644 index 0000000..f029319 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/EBkQTGmNb.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/ELly_F6x6.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/ELly_F6x6.png new file mode 100644 index 0000000..9c5548f Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/ELly_F6x6.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/EkmgtRYN4.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/EkmgtRYN4.png new file mode 100644 index 0000000..cbe74b9 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/EkmgtRYN4.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/RuIrsHDqC.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/RuIrsHDqC.png new file mode 100644 index 0000000..fa31db3 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/RuIrsHDqC.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/V3qLmfi50.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/V3qLmfi50.png new file mode 100644 index 0000000..3adb2cb Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/V3qLmfi50.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/YKADY8YQR.gif b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/YKADY8YQR.gif new file mode 100644 index 0000000..cca1770 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/YKADY8YQR.gif differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/YilNCaHil.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/YilNCaHil.png new file mode 100644 index 0000000..2bae69a Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/YilNCaHil.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/fmLDUWjia.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/fmLDUWjia.png new file mode 100644 index 0000000..3c67156 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/fmLDUWjia.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/h6dUCApdV.gif b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/h6dUCApdV.gif new file mode 100644 index 0000000..d73cfd1 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/h6dUCApdV.gif differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/iHsYd7lbw.png b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/iHsYd7lbw.png new file mode 100644 index 0000000..dd87219 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/iHsYd7lbw.png differ diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/index.md b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/index.md new file mode 100644 index 0000000..d3ceca0 --- /dev/null +++ b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/index.md @@ -0,0 +1,72 @@ +--- +series: Tips +date: "2020-09-24T08:34:30Z" +thumbnail: fmLDUWjia.png +usePageBundles: true +tags: +- chrome +title: Abusing Chrome's Custom Search Engines for Fun and Profit +--- + +Do you (like me) find yourself frequently searching for information within the same websites over and over? Wouldn't it be great if you could just type your query into your browser's address bar (AKA the Chrome Omnibox) and go straight to the results you need? Well you totally can - and probably already *are* for certain sites which have inserted themselves as search engines. + +### The basics +Point your browser to `chrome://settings/searchEngines` to see which sites are registered as Custom Search Engines: +![Custom search engines list](RuIrsHDqC.png) + +Each of these search engine entries has three parts: a name ("Search engine"), a Keyword, and a Query URL. The "Search engine" title is just what will appear in the Omnibox when the search engine gets triggered, the Keyword is what you'll type in the Omnibox to trigger it, and the Query URL tells Chrome how to handle the search. All you have to do is type the keyword, hit your Tab key to activate the search, input your query, and hit Enter: +![Using a custom search engine](o_o7rt4pA.gif) + +For sites which register themselves automatically, the keyword is often set to something like `domain.tld` so it might make sense to assign it as something shorter or more descriptive. + +The Query URL is basically just what appears in the address bar when you search the site directly, with `%s` placed where your query text would normally go. You can view these details for a given search entry by tapping the three-dot menu button and selecting "Edit", and you can manually create new entries by hitting that big friendly "Add" button: +![Editing a search engine](fmLDUWjia.png) + +By searching the site directly, you might find that it supports additional search filters which get appended to the URL: +![Discovering search filters](iHsYd7lbw.png) + +You can add those filters to the Query URL to further customize your Custom Search Engine: +![Adding filters to a custom search](EBkQTGmNb.png) + +I spend a lot of my free time helping out on Google's support forums as a part of their [Product Experts program](https://productexperts.withgoogle.com/what-it-is), and I often need to quickly look up a Help Center article or previous forum discussion to assist users. I created a set of Custom Search Engines to make that easier: +![Google Help Center search engines](630ix7uVw.png) +![Pixel Buds Help search](V3qLmfi50.png) + +------ + +### Creating search where there is none +Even if the site doesn't have a built-in native search, you can leverage Google's `sitesearch` operator to create one. I often want to look up a Linux command's `man` page, so I use this Query URL to search https://www.man7.org/linux/man-pages/: +``` +http://google.com/search?q=%s&sitesearch=man7.org%2Flinux%2Fman-pages +``` +![man search](EkmgtRYN4.png) +![Searching man](YKADY8YQR.gif) + +------ + +### Speak foreign to me +This works for pretty much any site which parses the URL to render certain content. I use this for getting words/phrases instantly translated: +![Google Translate search](ELly_F6x6.png) +![Translating German with search!](1LDP5zxCU.gif) + +------ + +### Shorter shortcuts +Your Query URL doesn't even need to include a query at all! You can use the Custom Search Engines as a sort of hyper-fast shortcut to pages you visit frequently. If I create a new entry with the Keyword `searchax` and `abusing-chromes-custom-search-engines-for-fun-and-profit` as the query URL, I can quickly open to this page by typing `searchax[tab][enter]`: +![Custom search shortener](YilNCaHil.png) + +I use that trick pretty regularly for getting back to vCenter appliance management interfaces without having to type out the full FQDN and port number and all that. + +------ + +### Scratchpad hack +You can do some other creative stuff too, like speedily accessing a temporary scratchpad for quickly jotting down notes, complete with spellcheck! Just drop this into the Query URL field: +``` +data:text/html;charset=utf-8, Scratchpad +``` +And give it a nice short keyword - like the single letter 's': +![My own scratchpad!](h6dUCApdV.gif) + +------ + +With just a bit of tweaking, you can really supercharge Chrome's Omnibox capabilities. Let me know if you come across any other clever uses for this! \ No newline at end of file diff --git a/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/o_o7rt4pA.gif b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/o_o7rt4pA.gif new file mode 100644 index 0000000..e3d7335 Binary files /dev/null and b/content/post/abusing-chromes-custom-search-engines-for-fun-and-profit/o_o7rt4pA.gif differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/34xD8tbli.png b/content/post/adguard-home-in-docker-on-photon-os/34xD8tbli.png new file mode 100644 index 0000000..6f1fc04 Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/34xD8tbli.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/Es90-kFW9.png b/content/post/adguard-home-in-docker-on-photon-os/Es90-kFW9.png new file mode 100644 index 0000000..46d4630 Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/Es90-kFW9.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/HRRpFOKuN.png b/content/post/adguard-home-in-docker-on-photon-os/HRRpFOKuN.png new file mode 100644 index 0000000..55f74dc Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/HRRpFOKuN.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/NOyfgjjUy.png b/content/post/adguard-home-in-docker-on-photon-os/NOyfgjjUy.png new file mode 100644 index 0000000..8b596a6 Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/NOyfgjjUy.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/OtPGufxlP.png b/content/post/adguard-home-in-docker-on-photon-os/OtPGufxlP.png new file mode 100644 index 0000000..8e2acee Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/OtPGufxlP.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/UHvtv1DrT.png b/content/post/adguard-home-in-docker-on-photon-os/UHvtv1DrT.png new file mode 100644 index 0000000..937a804 Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/UHvtv1DrT.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/bw09OXG7f.png b/content/post/adguard-home-in-docker-on-photon-os/bw09OXG7f.png new file mode 100644 index 0000000..3a193b3 Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/bw09OXG7f.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/clE6OVmjp.png b/content/post/adguard-home-in-docker-on-photon-os/clE6OVmjp.png new file mode 100644 index 0000000..ef9e80f Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/clE6OVmjp.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/index.md b/content/post/adguard-home-in-docker-on-photon-os/index.md new file mode 100644 index 0000000..3535d5a --- /dev/null +++ b/content/post/adguard-home-in-docker-on-photon-os/index.md @@ -0,0 +1,184 @@ +--- +series: Projects +date: "2021-05-27T08:34:30Z" +thumbnail: HRRpFOKuN.png +usePageBundles: true +tags: +- docker +- vmware +- containers +- networking +- security +title: AdGuard Home in Docker on Photon OS +--- + +I was recently introduced to [AdGuard Home](https://adguard.com/en/adguard-home/overview.html) by way of its very slick [Home Assistant Add-On](https://github.com/hassio-addons/addon-adguard-home/blob/main/adguard/DOCS.md). Compared to the relatively-complicated [Pi-hole](https://pi-hole.net/) setup that I had implemented several months back, AdGuard Home was *much* simpler to deploy (particularly since I basically just had to click the "Install" button from the Home Assistant add-ons manage). It also has a more modern UI with options arranged more logically (to me, at least), and it just feels easier to use overall. It worked great for a time... until my Home Assistant instance crashed, taking down AdGuard Home (and my internet access) with it. Maybe bundling these services isn't the best move. + +I'd like to use AdGuard Home, but the system it runs on needs to be rock-solid. With that in mind, I thought it might be fun to instead run AdGuard Home in a Docker container on a VM running VMware's container-optimized [Photon OS](https://github.com/vmware/photon), primarily because I want an excuse to play more with Docker and Photon (but also the thing I just mentioned about stability). So here's what it took to get that running. + +### Deploy Photon +First, up: getting Photon. There are a variety of delivery formats available [here](https://github.com/vmware/photon/wiki/Downloading-Photon-OS), and I opted for the HW13 OVA version. I copied that download URL: +``` +https://packages.vmware.com/photon/4.0/GA/ova/photon-hw13-uefi-4.0-1526e30ba0.ova +``` + +Then I went into vCenter, hit the **Deploy OVF Template** option, and pasted in the URL: +![Deploying the OVA straight from the internet](Es90-kFW9.png) +This lets me skip the kind of tedious "download file from internet and then upload file to vCenter" dance, and I can then proceed to click through the rest of the deployment options. +![Ready to deploy](rCpaTbPX5.png) + +Once the VM is created, I power it on and hop into the web console. The default root username is `changeme`, and I'll of course be forced to change that the first time I log in. + + +### Configure Networking +My next step was to configure a static IP address by creating `/etc/systemd/network/10-static-en.network` and entering the following contents: + +```conf +[Match] +Name=eth0 + +[Network] +Address=192.168.1.2/24 +Gateway=192.168.1.1 +DNS=192.168.1.5 +``` + +By the way, that `192.168.1.5` address is my Windows DC/DNS server that I use for [my homelab environment](/vmware-home-lab-on-intel-nuc-9#basic-infrastructure). That's the DNS server that's configured on my Google Wifi router, and it will continue to handle resolution for local addresses. + +I also disabled DHCP by setting `DHCP=no` in `/etc/systemd/network/99-dhcp-en.network`: + +```conf +[Match] +Name=e* + +[Network] +DHCP=no +IPv6AcceptRA=no +``` + +I set the required permissions on my new network configuration file with `chmod 644 /etc/systemd/network/10-static-en.network` and then restarted `networkd` with `systemctl restart systemd-networkd`. + +I then ran `networkctl` a couple of times until the `eth0` interface went fully green, and did an `ip a` to confirm that the address had been applied. +![Verifying networking](qOw7Ysj3O.png) + +One last little bit of housekeeping is to change the hostname with `hostnamectl set-hostname adguard` and then reboot for good measure. I can then log in via SSH to continue the setup. +![SSH login](NOyfgjjUy.png) + +Now that I'm in, I run `tdnf update` to make sure the VM is fully up to date. + +### Install docker-compose +Photon OS ships with Docker preinstalled, but I need to install `docker-compose` on my own to simplify container deployment. Per the [install instructions](https://docs.docker.com/compose/install/#install-compose), I run: + +```shell +curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose +chmod +x /usr/local/bin/docker-compose +``` + +And then verify that it works: +```shell +root@adguard [ ~]# docker-compose --version +docker-compose version 1.29.2, build 5becea4c +``` + +I'll also want to enable and start Docker: +```shell +systemctl enable docker +systemctl start docker +``` + +### Disable DNSStubListener +By default, the `resolved` daemon is listening on `127.0.0.53:53` and will prevent docker from binding to that port. Fortunately it's [pretty easy](https://github.com/pi-hole/docker-pi-hole#installing-on-ubuntu) to disable the `DNSStubListener` and free up the port: +```shell +sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf +rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf +systemctl restart systemd-resolved +``` + +### Deploy AdGuard Home container +Okay, now for the fun part. + +I create a directory for AdGuard to live in, and then create a `docker-compose.yaml` therein: +```shell +mkdir ~/adguard +cd ~/adguard +vi docker-compose.yaml +``` + +And I define the container: +```yaml +version: "3" + +services: + adguard: + container_name: adguard + restart: unless-stopped + image: adguard/adguardhome:latest + ports: + - "53:53/tcp" + - "53:53/udp" + - "67:67/udp" + - "68:68/tcp" + - "68:68/udp" + - "80:80/tcp" + - "443:443/tcp" + - "853:853/tcp" + - "3000:3000/tcp" + volumes: + - './workdir:/opt/adguardhome/work' + - './confdir:/opt/adguardhome/conf' + cap_add: + - NET_ADMIN +``` + +Then I can fire it up with `docker-compose up --detach`: + +```shell +root@adguard [ ~/adguard ]# docker-compose up --detach +Creating network "adguard_default" with the default driver +Pulling adguard (adguard/adguardhome:latest)... +latest: Pulling from adguard/adguardhome +339de151aab4: Pull complete +4db4be09618a: Pull complete +7e918e810e4e: Pull complete +bfad96428d01: Pull complete +Digest: sha256:de7d791b814560663fe95f9812fca2d6dd9d6507e4b1b29926cc7b4a08a676ad +Status: Downloaded newer image for adguard/adguardhome:latest +Creating adguard ... done +``` + + +### Post-deploy configuration +Next, I point a web browser to `http://adguard.lab.bowdre.net:3000` to perform the initial (minimal) setup: +![Initial config screen](UHvtv1DrT.png) + +Once that's done, I can log in to the dashboard at `http://adguard.lab.bowdre.net/login.html`: +![Login page](34xD8tbli.png) + +AdGuard Home ships with pretty sensible defaults so there's not really a huge need to actually do a lot of configuration. Any changes that I *do* do will be saved in `~/adguard/confdir/AdGuardHome.yaml` so they will be preserved across container changes. + + +### Getting requests to AdGuard Home +Normally, you'd tell your Wifi router what DNS server you want to use, and it would relay that information to the connected DHCP clients. Google Wifi is a bit funny, in that it wants to function as a DNS proxy for the network. When you configure a custom DNS server for Google Wifi, it still tells the DHCP clients to send the requests to the router, and the router then forwards the queries on to the configured DNS server. + +I already have Google Wifi set up to use my Windows DC (at `192.168.1.5`) for DNS. That lets me easily access systems on my internal `lab.bowdre.net` domain without having to manually configure DNS, and the DC forwards resolution requests it can't handle on to the upstream (internet) DNS servers. + +To easily insert my AdGuard Home instance into the flow, I pop in to my Windows DC and configure the AdGuard Home address (`192.168.1.2`) as the primary DNS forwarder. The DC will continue to handle internal resolutions, and anything it can't handle will now get passed up the chain to AdGuard Home. And this also gives me a bit of a failsafe, in that queries will fail back to the previously-configured upstream DNS if AdGuard Home doesn't respond within a few seconds. +![Setting AdGuard Home as a forwarder](bw09OXG7f.png) + +It's working! +![Requests!](HRRpFOKuN.png) + + +### Caveat +Chaining my DNS configurations in this way (router -> DC -> AdGuard Home -> internet) does have a bit of a limitation, in that all queries will appear to come from the Windows server: +![Only client](OtPGufxlP.png) +I won't be able to do any per-client filtering as a result, but honestly I'm okay with that as I already use the "Pause Internet" option in Google Wifi to block outbound traffic from certain devices anyway. And using the Windows DNS as an intermediary makes it significantly quicker and easier to switch things up if I run into problems later; changing the forwarder here takes effect instantly rather than having to manually update all of my clients or wait for DHCP to distribute the change. + +I have worked around this in the past by [bypassing Google Wifi's DHCP](https://www.mbreviews.com/pi-hole-google-wifi-raspberry-pi/) but I think it was actually more trouble than it was worth to me. + + +### One last thing... +I'm putting a lot of responsibility on both of these VMs, my Windows DC and my new AdGuard Home instance. If they aren't up, I won't have internet access, and that would be a shame. I already have my ESXi host configured to automatically start up when power is (re)applied, so I also adjust the VM Startup/Shutdown Configuration so that AdGuard Home will automatically boot after ESXi is loaded, followed closely by the Windows DC (and the rest of my virtualized infrastructure): +![Auto Start-up Options](clE6OVmjp.png) + +So there you have it. Simple DNS-based ad-blocking running on a minimal container-optimized VM that *should* be more stable than the add-on tacked on to my Home Assistant instance. Enjoy! \ No newline at end of file diff --git a/content/post/adguard-home-in-docker-on-photon-os/qOw7Ysj3O.png b/content/post/adguard-home-in-docker-on-photon-os/qOw7Ysj3O.png new file mode 100644 index 0000000..fe5ef1e Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/qOw7Ysj3O.png differ diff --git a/content/post/adguard-home-in-docker-on-photon-os/rCpaTbPX5.png b/content/post/adguard-home-in-docker-on-photon-os/rCpaTbPX5.png new file mode 100644 index 0000000..b94f5ac Binary files /dev/null and b/content/post/adguard-home-in-docker-on-photon-os/rCpaTbPX5.png differ diff --git a/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/9WdA6HRch.png b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/9WdA6HRch.png new file mode 100644 index 0000000..ff26dff Binary files /dev/null and b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/9WdA6HRch.png differ diff --git a/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/Ki7jo65t3.png b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/Ki7jo65t3.png new file mode 100644 index 0000000..1821572 Binary files /dev/null and b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/Ki7jo65t3.png differ diff --git a/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/KjGOX8Yiv.png b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/KjGOX8Yiv.png new file mode 100644 index 0000000..24f4d82 Binary files /dev/null and b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/KjGOX8Yiv.png differ diff --git a/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/WWuHwVvrk.png b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/WWuHwVvrk.png new file mode 100644 index 0000000..90151d7 Binary files /dev/null and b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/WWuHwVvrk.png differ diff --git a/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/index.md b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/index.md new file mode 100644 index 0000000..e6ec022 --- /dev/null +++ b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/index.md @@ -0,0 +1,179 @@ +--- +series: Projects +date: "2020-11-24T08:34:30Z" +lastmod: "2021-03-12" +thumbnail: Ki7jo65t3.png +usePageBundles: true +tags: +- android +- automation +- tasker +- vpn +title: Auto-connect to ProtonVPN on untrusted WiFi with Tasker [Update!] +--- + +*[Update 2021-03-12] This solution recently stopped working for me. While looking for a fix, I found that OpenVPN had published [some notes](https://openvpn.net/faq/how-do-i-use-tasker-with-openvpn-connect-for-android/) on controlling the [official OpenVPN Connect app](https://play.google.com/store/apps/details?id=net.openvpn.openvpn) from Tasker. Jump to the [Update](#update) below to learn how I adapted my setup with this new knowledge.* + +I recently shared how I use [Tasker and Home Assistant to keep my phone from charging past 80%](/safeguard-your-androids-battery-with-tasker-home-assistant). Today, I'm going to share the setup I use to automatically connect my phone to a VPN on networks I *don't* control. + +![Tasker + OpenVPN](Ki7jo65t3.png) + +### Background +Android has an option to [set a VPN as Always-On](https://support.google.com/android/answer/9089766#always-on_VPN) so for maximum security I could just use that. I'm not *overly* concerned (yet?) with my internet traffic being intercepted upstream of my ISP, though, and often need to connect to other devices on my home network without passing through a VPN (or introducing split-tunnel complexity). But I do want to be sure that my traffic is protected whenever I'm connected to a WiFi network controlled by someone else. + +I've recently started using [ProtonVPN](https://protonvpn.com/) in conjunction with my paid ProtonMail account so these instructions are tailored to that particular VPN provider. I'm paying for the ProtonVPN Plus subscription but these instructions should also work for the [free tier](https://protonvpn.com/free-vpn) as well. (And this should work for any VPN which provides an OpenVPN config file - you'll just have to find that on your own.) + +ProtonVPN does provide a quite excellent [Android app](https://play.google.com/store/apps/details?id=ch.protonvpn.android) but I couldn't find a way to automate it without root. (If your phone is rooted, you should be able to use a Tasker shell to run `cmd statusbar click-tile ch.protonvpn.android/com.protonvpn.android.components.QuickTileService` and avoid needing to use OpenVPN at all.) + +### The apps +You'll need a few apps to make this work: + +- [Tasker](https://play.google.com/store/apps/details?id=net.dinglisch.android.taskerm) +- [OpenVPN for Android](https://play.google.com/store/apps/details?id=de.blinkt.openvpn) +- [OpenVpn Tasker Plugin](https://play.google.com/store/apps/details?id=com.ffrog8.openVpnTaskerPlugin) + +It's important to use the [open-source](https://github.com/schwabe/ics-openvpn) 'OpenVPN for Android' app by Arne Schwabe rather than the 'OpenVPN Connect' app as the latter doesn't work with the Tasker plugin that's what I used when I originally wrote this guide. + +### OpenVPN config file +You can find instructions for configuring the OpenVPN client to work with ProtonVPN [here](https://protonvpn.com/support/android-vpn-setup/) but I'll go ahead and hit the highlights. You'll probably want to go ahead and do all this from your phone so you don't have to fuss with transferring files around, but hey, *you do you*. + +1. Log in to your ProtonVPN account (or sign up for a new free one) at https://account.protonvpn.com/login. +2. Use the panel on the left side to navigate to **[Downloads > OpenVPN configuration files](https://account.protonvpn.com/downloads#openvpn-configuration-files)**. +3. Select the **Android** platform and **UDP** as the protocol, unless you have a [particular reason to use TCP](https://protonvpn.com/support/udp-tcp/#:~:text=When%20to%20use%20UDP%20vs.%20TCP). +4. Select and download the desired config file: + - **Secure Core configs** utilize the [Secure Core](https://protonvpn.com/support/secure-core-vpn/) feature which connects you to a VPN node in your target country by way of a Proton-owned-and-managed server in privacy-friendly Iceland, Sweden, or Switzerland + - **Country configs** connect to a random VPN node in your target country + - **Standard server configs** let you choose the specific VPN node to use + - **Free server configs** connect you to one of the VPN nodes available in the free tier +![Client config download page](vdIG0jHmk.png) + +Feel free to download more than one if you'd like to have different profiles available within the OpenVPN app. + +ProtonVPN automatically generates a set of user credentials to use with a third-party VPN client so that you don't have to share your personal creds. You'll want to make a note of that randomly-generated username and password so you can plug them in to the OpenVPN app later. You can find the details at **[Account > OpenVPN / IKEv2 username](https://account.protonvpn.com/account#openvpn)**. + +**Now that you've got the profile file, skip on down to [The Update](#update) to import it into OpenVPN Connect.** + +### Configuring OpenVPN for Android +Now what you've got the config file(s) and your client credentials, it's time to actually configure that client. + +![OpenVPN connection list](9WdA6HRch.png) + +1. Launch the OpenVPN for Android app and tap the little 'downvote-in-a-box' "Import" icon. +2. Browse to wherever you saved the `.ovpn` config files and select the one you'd like to use. +3. You can rename if it you'd like but I feel that `us.protonvpn.com.udp` is pretty self-explanatory and will do just fine to distinguish between my profiles. Tap the check mark at the top-right or the floppy icon at the bottom right to confirm the import. +4. Now tap the pencil icon next to the new entry to edit its settings, and paste in the OpenVPN username and password where appropriate. Use your phone's back button/gesture to save the config and return to the list. +5. Repeat for any other configurations you'd like to import. We'll only use one for this particular Tasker profile but you might come up with different needs for different scenarios. +6. And finally, tap on the config name to test the connection. The OpenVPN Log window will appear, and you want the line at the top to (eventually) display something like `Connected: SUCCESS`. + +Success! + +I don't like to have a bunch of persistent notification icons hanging around (and Android already shows a persistent status icon when a VPN connection is active). If you're like me, long-press the OpenVPN notification and tap the gear icon. Then tap on the **Connection statistics** category and activate the **Minimized** slider. The notification will still appear, but it will collapse to the bottom of your notification stack and you won't get bugged by the icon. + +![Notification settings](WWuHwVvrk.png) + +### Tasker profiles +Open up Tasker and get ready to automate! We're going to wind up with at least two new Tasker profiles so (depending on how many you already have) you might want to create a new project by long-pressing the Home icon at the bottom-left of the screen and selecting the **Add** option. I chose to group all my VPN-related profiles in a project named (oh-so-creatively) "VPN". Totally your call though. + +Let's start with a profile to track whether or not we're connected to one of our preferred/trusted WiFi networks: + +#### Trusted WiFi +1. Tap the '+' sign to create a new profile, and add a new **State > Net > Wifi Connected** context. This profile will become active whenever your phone connects to WiFi. +2. Tap the magnifying glass next to the **SSID** field, which will pop up a list of all detected nearby network identifiers. Tap to select whichever network(s) you'd like to be considered "safe". You can also manually enter the SSID names, separating multiple options with a `/` (ex, `FBI Surveillance Van/TellMyWifiLoveHer/Pretty fly for a WiFi`). Or, for more security, identify the networks based on the MACs instead of the SSIDs - just be sure to capture the MACs for any extenders or mesh nodes too! +3. Once you've got your networks added, tap the back button to move *forward* to the next task (Ah, Android!): configuring the *action* which will occur when the context is satisfied. +4. Tap the **New Task** option and then tap the check mark to skip giving it a name (no need). +5. Hit the '+' button to add an action and select **Variables > Variable Set**. +6. For **Name**, enter `%TRUSTED_WIFI` (all caps to make it a "public" variable), and for the **To** field just enter `1`. +7. Hit back to save the action, and back again to save the profile. +8. Back at the profile list, long-press on the **Variable Set...** action and then select **Add Exit Task**. +9. We want to un-set the variable when no longer connected to a trusted WiFi network so add a new **Variables > Variable Clear** action and set the name to `%TRUSTED_WIFI`. +10. And back back out to admire your handiwork. Here's a recap of the profile: +``` +Profile: Trusted Wifi +State: Wifi Connected [ SSID:FBI Surveillance Van/TellMyWifiLoveHer/Pretty fly for a WiFi MAC:* IP:* Active:Any ] +Enter: Anon +A1: Variable Set [ Name:%TRUSTED_WIFI To:1 Recurse Variables:Off Do Maths:Off Append:Off Max Rounding Digits:0 ] +Exit: Anon +A1: Variable Clear [ Name:%TRUSTED_WIFI Pattern Matching:Off Local Variables Only:Off Clear All Variables:Off ] +``` +Onward! + +#### VPN on Strange WiFi +This profile will kick in if the phone connects to a WiFi network which isn't on the "approved" list - when the `%TRUSTED_WIFI` variable is not set. + +1. It starts out the same way by creating a new profile with the **State > Net > Wifi Connected** context but this time don't add any network names to the list. +2. For the action, select **Plugin > OpenVpn Tasker Plugin**, tap the pencil icon to edit the configuration, and select your VPN profile from the list under **Connect using profile** +3. Back at the Action Edit screen, tap the checkbox next to **If** and enter the variable name `%TRUSTED_WIFI`. Tap the '~' button to change the condition operator to **Isn't Set**. So while this profile will activate every time you connect to WiFi, the action which connects to the VPN will only fire if the WiFi isn't a trusted network. +4. Back out to the profile list and add a new Exit Task. +5. Add another **Plugin > OpenVpn Tasker Plugin** task and this time configure it to **Disconnect VPN**. + +To recap: +``` +Profile: VPN on Strange Wifi +State: Wifi Connected [ SSID:* MAC:* IP:* Active:Any ] +Enter: Anon +A1: OpenVPN [ Configuration:Connect (us.protonvpn.com.udp) Timeout (Seconds):0 ] If [ %TRUSTED_WIFI !Set ] +Exit: Anon +A1: OpenVPN [ Configuration:Disconnect Timeout (Seconds):0 ] +``` + +### Conclusion +Give it a try - the VPN should automatically activate the next time you connect to a network that's not on your list. If you find that it's not working correctly, you might try adding a short 3-5 second **Task > Wait** action before the connect/disconnect actions just to give a brief cooldown between state changes. + +### Epilogue: working with Google's VPN +My Google Pixel 5 has a neat option at **Settings > Network & internet > Wi-Fi > Wi-Fi preferences > Connect to public networks** which will automatically connect the phone to known-decent public WiFi networks and automatically tunnel the connection through a Google VPN. It doesn't provide quite as much privacy as ProtonVPN, of course, but it's enough to keep my traffic safe from prying eyes on those public networks, and the auto-connection option really comes in handy sometimes. Of course, my Tasker setup would see that I'm connected to an unknown network and try to connect to ProtonVPN at the same time the phone was trying to connect to the Google VPN. That wasn't ideal. + +I came up with a workaround to treat any network with the Google VPN as "trusted" as long as that VPN was active. I inserted a 10-second Wait before the Connect and Disconnect actions to give the VPN time to stand up, and added two new profiles to detect the Google VPN connection and disconnection. + +#### Google VPN On +This one uses an **Event > System > Logcat Entry**. The first time you try to use that you'll be prompted to use adb to grant Tasker the READ_LOGS permission but the app actually does a great job of walking you through that setup. We'll watch the `Vpn` component and filter for `Established by com.google.android.apps.gcs on tun0`, and then set the `%TRUSTED_WIFI` variable: +``` +Profile: Google VPN On +Event: Logcat Entry [ Output Variables:* Component:Vpn Filter:Established by com.google.android.apps.gcs on tun0 Grep Filter (Check Help):Off ] +Enter: Anon +A1: Variable Set [ Name:%TRUSTED_WIFI To:1 Recurse Variables:Off Do Maths:Off Append:Off Max Rounding Digits:3 ] +``` + +#### Google VPN Off +This one is pretty much the same but the opposite: +``` +Profile: Google VPN Off +Event: Logcat Entry [ Output Variables:* Component:Vpn Filter:setting state=DISCONNECTED, reason=agentDisconnect Grep Filter (Check Help):Off ] +Enter: Anon +A1: Variable Clear [ Name:%TRUSTED_WIFI Pattern Matching:Off Local Variables Only:Off Clear All Variables:Off ] +``` + +### Update +#### OpenVPN Connect app configuration +After installing and launching the official [OpenVPN Connect app](https://play.google.com/store/apps/details?id=net.openvpn.openvpn), tap the "+" button at the bottom right to create a new profile. Swipe over to the "File" tab and import the `*.ovpn` file you downloaded from ProtonVPN. Paste in the username, tick the "Save password" box, and paste in the password as well. I also chose to rename the profile to something a little bit more memorable - you'll need this name later. From there, hit the "Add" button and then go ahead and tap on your profile to test the connection. + +![Creating a profile in OpenVPN Connect](KjGOX8Yiv.png) + +#### Tasker profiles +Go ahead and create the [Trusted Wifi profile](#trusted-wifi) as described above. + +The condition for the [VPN on Strange Wifi profile](#vpn-on-strange-wifi) will be the same, but the task will be different. This time, add a **System > Send Intent** action. You'll need to enter the following details, leaving the other fields blank/default: + +``` +Action: net.openvpn.openvpn.CONNECT +Cat: None +Extra: net.openvpn.openvpn.AUTOSTART_PROFILE_NAME:PC us.protonvpn.com.udp (replace with your profile name) +Extra: net.openvpn.openvpn.AUTOCONNECT:true +Extra: net.openvpn.openvpn.APP_SECTION:PC +Package: net.openvpn.openvpn +Class: net.openvpn.unified.MainActivity +Target: Activity +If: %TRUSTED_WIFI !Set +``` + +The Exit Task to disconnect from the VPN uses a similar intent: + +``` +Action: net.openvpn.openvpn.DISCONNECT +Cat: None +Extra: net.openvpn.openvpn.STOP:true +Package: net.openvpn.openvpn +Class: net.openvpn.unified.MainActivity +Target: Activity +``` + +All set! You can pop back up to the [Epilogue](#epilogue-working-with-googles-vpn) section to continue tweaking to avoid conflicts with Google's auto-connect VPN if you'd like. \ No newline at end of file diff --git a/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/vdIG0jHmk.png b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/vdIG0jHmk.png new file mode 100644 index 0000000..e5f0975 Binary files /dev/null and b/content/post/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/vdIG0jHmk.png differ diff --git a/content/post/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance/i0UKdXleC.png b/content/post/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance/i0UKdXleC.png new file mode 100644 index 0000000..f79d6c5 Binary files /dev/null and b/content/post/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance/i0UKdXleC.png differ diff --git a/content/post/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance/index.md b/content/post/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance/index.md new file mode 100644 index 0000000..b3038e6 --- /dev/null +++ b/content/post/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance/index.md @@ -0,0 +1,231 @@ +--- +series: Projects +date: "2018-09-26T08:34:30Z" +lastmod: "2022-03-06" +thumbnail: i0UKdXleC.png +usePageBundles: true +tags: +- docker +- linux +- cloud +- gcp +- security +title: BitWarden password manager self-hosted on free Google Cloud instance +--- + +![Bitwarden login](i0UKdXleC.png) + +A friend mentioned the [BitWarden](https://bitwarden.com/) password manager to me yesterday and I had to confess that I'd never heard of it. I started researching it and was impressed by what I found: it's free, [open-source](https://github.com/bitwarden), feature-packed, fully cross-platform (with Windows/Linux/MacOS desktop clients, Android/iOS mobile apps, and browser extensions for Chrome/Firefox/Opera/Safari/Edge/etc), and even offers a self-hosted option. + +I wanted to try out the self-hosted setup, and I discovered that the [official distribution](https://help.bitwarden.com/article/install-on-premise/) works beautifully on an `n1-standard-1` 1-vCPU Google Compute Engine instance - but that would cost me an estimated $25/mo to run after my free Google Cloud Platform trial runs out. And I can't really scale that instance down further because the embedded database won't start with less than 2GB of RAM. + +I then came across [this comment](https://www.reddit.com/r/Bitwarden/comments/8vmwwe/best_place_to_self_host_bitwarden/e1p2f71/) on Reddit which discussed in somewhat-vague terms the steps required to get BitWarden to run on the [free](https://cloud.google.com/free/docs/always-free-usage-limits#compute_name) `e2-micro` instance, and also introduced me to the community-built [vaultwarden](https://github.com/dani-garcia/vaultwarden) project which is specifically designed to run a BW-compatible server on resource-constrained hardware. So here are the steps I wound up taking to get this up and running. + +{{% notice info "bitwarden_rs -> vaultwarden"%}} +When I originally wrote this post back in September 2018, the containerized BitWarden solution was called `bitwarden_rs`. The project [has since been renamed](https://github.com/dani-garcia/vaultwarden/discussions/1642) to `vaultwarden`, and I've since moved to the hosted version of BitWarden. I have attempted to update this article to account for the change but have not personally tested this lately. Good luck, dear reader! +{{% /notice %}} + + +### Spin up a VM +*Easier said than done, but head over to https://console.cloud.google.com/ and fumble through:* + +1. Creating a new project (or just add an instance to an existing one). +2. Creating a new Compute Engine instance, selecting `e2-micro` for the Machine Type and ticking the *Allow HTTPS traffic* box. +3. *(Optional)* Editing the instance to add an ssh-key for easier remote access. + +### Configure Dynamic DNS +*Because we're cheap and don't want to pay for a static IP.* + +1. Log in to the [Google Domain admin portal](https://domains.google.com/registrar) and [create a new Dynamic DNS record](https://domains.google.com/registrar). This will provide a username and password specific for that record. +2. Log in to the GCE instance and run `sudo apt-get update` followed by `sudo apt-get install ddclient`. Part of the install process prompts you to configure things... just accept the defaults and move on. +3. Edit the `ddclient` config file to look like this, substituting the username, password, and FDQN from Google Domains: +```shell +$ sudo vi /etc/ddclient.conf + # Configuration file for ddclient generated by debconf + # + # /etc/ddclient.conf + + protocol=googledomains, + ssl=yes, + syslog=yes, + use=web, + server=domains.google.com, + login='[USERNAME]', + password='[PASSWORD]', + [FQDN] +``` +4. `sudo vi /etc/default/ddclient` and make sure that `run_daemon="true"`: + +```shell +# Configuration for ddclient scripts +# generated from debconf on Sat Sep 8 21:58:02 UTC 2018 +# +# /etc/default/ddclient + +# Set to "true" if ddclient should be run every time DHCP client ('dhclient' +# from package isc-dhcp-client) updates the systems IP address. +run_dhclient="false" + +# Set to "true" if ddclient should be run every time a new ppp connection is +# established. This might be useful, if you are using dial-on-demand. +run_ipup="false" + +# Set to "true" if ddclient should run in daemon mode +# If this is changed to true, run_ipup and run_dhclient must be set to false. +run_daemon="true" + +# Set the time interval between the updates of the dynamic DNS name in seconds. +# This option only takes effect if the ddclient runs in daemon mode. +daemon_interval="300" +``` +5. Restart the `ddclient` service - twice for good measure (daemon mode only gets activated on the second go *because reasons*): +```shell +$ sudo systemctl restart ddclient +$ sudo systemctl restart ddclient +``` +6. After a few moments, refresh the Google Domains page to verify that your instance's external IP address is showing up on the new DDNS record. + +### Install Docker +*Steps taken from [here](https://docs.docker.com/install/linux/docker-ce/debian/).* +1. Update `apt` package index: +```shell +$ sudo apt-get update +``` +2. Install package management prereqs: +```shell +$ sudo apt-get install \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg2 \ + software-properties-common +``` +3. Add Docker GPG key: +```shell +$ curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - +``` +4. Add the Docker repo: +```shell +$ sudo add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/debian \ + $(lsb_release -cs) \ + stable" +``` +5. Update apt index again: +```shell +$ sudo apt-get update +``` +6. Install Docker: +```shell +$ sudo apt-get install docker-ce +``` + +### Install Certbot and generate SSL cert +*Steps taken from [here](https://certbot.eff.org/instructions?ws=other&os=debianbuster).* +1. Install Certbot: +```shell +$ sudo apt-get install certbot +``` +2. Generate certificate: +```shell +$ sudo certbot certonly --standalone -d [FQDN] +``` +3. Create a directory to store the new certificates and copy them there: +```shell +$ sudo mkdir -p /ssl/keys/ +$ sudo cp -p /etc/letsencrypt/live/[FQDN]/fullchain.pem /ssl/keys/ +$ sudo cp -p /etc/letsencrypt/live/[FQDN]/privkey.pem /ssl/keys/ +``` + +### Set up vaultwarden +*Using the container image available [here](https://github.com/dani-garcia/vaultwarden).* +1. Let's just get it up and running first: +```shell +$ sudo docker run -d --name vaultwarden \ + -e ROCKET_TLS={certs='"/ssl/fullchain.pem", key="/ssl/privkey.pem"}' \ + -e ROCKET_PORT='8000' \ + -v /ssl/keys/:/ssl/ \ + -v /bw-data/:/data/ \ + -v /icon_cache/ \ + -p 0.0.0.0:443:8000 \ + vaultwarden/server:latest +``` +2. At this point you should be able to point your web browser at `https://[FQDN]` and see the BitWarden login screen. Click on the Create button and set up a new account. Log in, look around, add some passwords, etc. Everything should basically work just fine. +3. Unless you want to host passwords for all of the Internet you'll probably want to disable signups at some point by adding the `env` option `SIGNUPS_ALLOWED=false`. And you'll need to set `DOMAIN=https://[FQDN]` if you want to use U2F authentication: +```shell +$ sudo docker stop vaultwarden +$ sudo docker rm vaultwarden +$ sudo docker run -d --name vaultwarden \ + -e ROCKET_TLS={certs='"/ssl/fullchain.pem",key="/ssl/privkey.pem"'} \ + -e ROCKET_PORT='8000' \ + -e SIGNUPS_ALLOWED=false \ + -e DOMAIN=https://[FQDN] \ + -v /ssl/keys/:/ssl/ \ + -v /bw-data/:/data/ \ + -v /icon_cache/ \ + -p 0.0.0.0:443:8000 \ + vaultwarden/server:latest +``` + +### Install vaultwarden as a service +*So we don't have to keep manually firing this thing off.* +1. Create a script to stop, remove, update, and (re)start the `vaultwarden` container: +```shell +$ sudo vi /usr/local/bin/start-vaultwarden.sh + #!/bin/bash + + docker stop vaultwarden + docker rm vaultwarden + docker pull vaultwarden/server + + docker run -d --name vaultwarden \ + -e ROCKET_TLS={certs='"/ssl/fullchain.pem",key="/ssl/privkey.pem"'} \ + -e ROCKET_PORT='8000' \ + -e SIGNUPS_ALLOWED=false \ + -e DOMAIN=https://[FQDN] \ + -v /ssl/keys/:/ssl/ \ + -v /bw-data/:/data/ \ + -v /icon_cache/ \ + -p 0.0.0.0:443:8000 \ + vaultwarden/server:latest +$ sudo chmod 744 /usr/local/bin/start-vaultwarden.sh +``` +2. And add it as a `systemd` service: +```shell +$ sudo vi /etc/systemd/system/vaultwarden.service + [Unit] + Description=BitWarden container + Requires=docker.service + After=docker.service + + [Service] + Restart=always + ExecStart=/usr/local/bin/vaultwarden-start.sh + ExecStop=/usr/bin/docker stop vaultwarden + + [Install] + WantedBy=default.target +$ sudo chmod 644 /etc/systemd/system/vaultwarden.service +``` +3. Try it out: +```shell +$ sudo systemctl start vaultwarden +$ sudo systemctl status vaultwarden + ● bitwarden.service - BitWarden container + Loaded: loaded (/etc/systemd/system/vaultwarden.service; enabled; vendor preset: enabled) + Active: deactivating (stop) since Sun 2018-09-09 03:43:20 UTC; 1s ago + Process: 13104 ExecStart=/usr/local/bin/bitwarden-start.sh (code=exited, status=0/SUCCESS) + Main PID: 13104 (code=exited, status=0/SUCCESS); Control PID: 13229 (docker) + Tasks: 5 (limit: 4915) + Memory: 9.7M + CPU: 375ms + CGroup: /system.slice/vaultwarden.service + └─control + └─13229 /usr/bin/docker stop vaultwarden + + Sep 09 03:43:20 vaultwarden vaultwarden-start.sh[13104]: Status: Image is up to date for vaultwarden/server:latest + Sep 09 03:43:20 vaultwarden vaultwarden-start.sh[13104]: ace64ca5294eee7e21be764ea1af9e328e944658b4335ce8721b99a33061d645 +``` + +### Conclusion +If all went according to plan, you've now got a highly-secure open-source full-featured cross-platform password manager running on an Always Free Google Compute Engine instance resolved by Google Domains dynamic DNS. Very slick! diff --git a/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/-lp1-DGiM.png b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/-lp1-DGiM.png new file mode 100644 index 0000000..aa3b640 Binary files /dev/null and b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/-lp1-DGiM.png differ diff --git a/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/index.md b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/index.md new file mode 100644 index 0000000..235571b --- /dev/null +++ b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/index.md @@ -0,0 +1,33 @@ +--- +series: Tips +date: "2020-12-23T08:34:30Z" +thumbnail: -lp1-DGiM.png +tags: +- chromeos +title: Burn an ISO to USB with the Chromebook Recovery Utility +toc: false +featured: true +--- + +There are a number of fantastic Windows applications for creating bootable USB drives from ISO images - but those don't work on a Chromebook. Fortunately there's an easily-available tool which will do the trick: Google's own [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) app. + +Normally that tool is used to creating bootable media to [reinstall Chrome OS on a broken Chromebook](https://support.google.com/chromebook/answer/1080595) (hence the name) but it also has the capability to write other arbitrary images as well. So if you find yourself needing to create a USB drive for installing ESXi on a computer in your [home lab](https://twitter.com/johndotbowdre/status/1341767090945077248) (more on that soon!) here's what you'll need to do: + +1. Install the [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm). +2. Download the ISO you intend to use. +3. Rename the file to append `.bin` on the end, after the `.iso` bit: +![My renamed ISO for installing ESXi](uoTjgtbN1.png) +4. Plug in the USB drive you're going to sacrifice for this effort - remember that ALL data on the drive will be erased. +5. Open the recovery utility, click on the gear icon at the top right, and select the *Use local image* option: +![The CRU menu](vdTpW9t7Q.png) +6. Browse to and select the `*.iso.bin` file. +7. Choose the USB drive, and click *Continue*. +![Selecting the drive](p_Ieqsw4p.png) +8. Click *Create now* to start the writing! +![Writing the image](lhw5EEqSD.png) +9. All done! It probably won't work great for actually recovering your Chromebook but will do wonders for installing ESXi (or whatever) on another computer! +![Success!](-lp1-DGiM.png) + +You can also use the CRU to make a bootable USB from a `.zip` archive containing a single `.img` file, such as those commonly used to distribute [Raspberry Pi images](https://www.raspberrypi.org/documentation/installation/installing-images/chromeos.md). + +Very cool! diff --git a/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/lhw5EEqSD.png b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/lhw5EEqSD.png new file mode 100644 index 0000000..06c30ff Binary files /dev/null and b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/lhw5EEqSD.png differ diff --git a/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/p_Ieqsw4p.png b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/p_Ieqsw4p.png new file mode 100644 index 0000000..84e6f9a Binary files /dev/null and b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/p_Ieqsw4p.png differ diff --git a/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/uoTjgtbN1.png b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/uoTjgtbN1.png new file mode 100644 index 0000000..75d6b6c Binary files /dev/null and b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/uoTjgtbN1.png differ diff --git a/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/vdTpW9t7Q.png b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/vdTpW9t7Q.png new file mode 100644 index 0000000..af1a3c6 Binary files /dev/null and b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/vdTpW9t7Q.png differ diff --git a/content/post/cat-file-without-comments/index.md b/content/post/cat-file-without-comments/index.md new file mode 100644 index 0000000..97e29dd --- /dev/null +++ b/content/post/cat-file-without-comments/index.md @@ -0,0 +1,54 @@ +--- +title: "Cat a File Without Comments" # Title of the blog post. +date: 2023-02-22 # Date of post creation. +# lastmod: 2023-02-20T10:32:20-06:00 # Date when last modified +description: "A quick trick to strip out the comments when viewing the contents of a file." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips # Projects, Scripts, vRA8, K8s on vSphere +tags: + - linux + - shell + - regex +comment: true # Disable comment if false. +--- +It's super handy when a Linux config file is loaded with comments to tell you precisely how to configure the thing, but all those comments can really get in the way when you're trying to review the current configuration. + +Next time, instead of scrolling through page after page of lengthy embedded explanations, just use: +```shell +egrep -v "^\s*(#|$)" $filename +``` + +For added usefulness, I alias this command to `ccat` (which my brain interprets as "commentless cat") in [my `~/.zshrc`](https://github.com/jbowdre/dotfiles/blob/main/zsh/.zshrc): +```shell +alias ccat='egrep -v "^\s*(#|$)"' +``` + +Now instead of viewing all 75 lines of a [mostly-default Vagrantfile](/create-vms-chromebook-hashicorp-vagrant), I just see the 7 that matter: +```shell +; wc -l Vagrantfile +75 Vagrantfile + +; ccat Vagrantfile +Vagrant.configure("2") do |config| + config.vm.box = "oopsme/windows11-22h2" + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 4 + libvirt.memory = 4096 + end +end + +; ccat Vagrantfile | wc -l +7 +``` + +Nice! diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_creation_time.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_creation_time.png new file mode 100644 index 0000000..da1caa8 Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_creation_time.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_firewall.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_firewall.png new file mode 100644 index 0000000..fa296b0 Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_firewall.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_instance_creation.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_instance_creation.png new file mode 100644 index 0000000..899d2c2 Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_instance_creation.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_network_settings.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_network_settings.png new file mode 100644 index 0000000..3eeaa49 Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_network_settings.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_security_settings.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_security_settings.png new file mode 100644 index 0000000..6ac3434 Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211027_security_settings.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_instance_advanced_settings.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_instance_advanced_settings.png new file mode 100644 index 0000000..04cdf96 Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_instance_advanced_settings.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_mobile_vsphere_sucks.jpg b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_mobile_vsphere_sucks.jpg new file mode 100644 index 0000000..00f8f16 Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_mobile_vsphere_sucks.jpg differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_qrcode_config.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_qrcode_config.png new file mode 100644 index 0000000..fe4af60 Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_qrcode_config.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_remote_homelab.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_remote_homelab.png new file mode 100644 index 0000000..ca4b49b Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_remote_homelab.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_tasker_setup.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_tasker_setup.png new file mode 100644 index 0000000..01a275c Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_tasker_setup.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_wireguard_in_the_cloud.jpg b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_wireguard_in_the_cloud.jpg new file mode 100644 index 0000000..a253e7e Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_wireguard_in_the_cloud.jpg differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_wireguard_mobile.png b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_wireguard_mobile.png new file mode 100644 index 0000000..2a882ca Binary files /dev/null and b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/20211028_wireguard_mobile.png differ diff --git a/content/post/cloud-based-wireguard-vpn-remote-homelab-access/index.md b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/index.md new file mode 100644 index 0000000..624bb9d --- /dev/null +++ b/content/post/cloud-based-wireguard-vpn-remote-homelab-access/index.md @@ -0,0 +1,503 @@ +--- +series: Projects +date: "2021-10-28T00:00:00Z" +thumbnail: 20211028_wireguard_in_the_cloud.jpg +usePageBundles: true +tags: +- linux +- gcp +- cloud +- wireguard +- vpn +- homelab +- tasker +- automation +- networking +- security +title: Cloud-hosted WireGuard VPN for remote homelab access +featured: false +--- +For a while now, I've been using an [OpenVPN Access Server](https://openvpn.net/access-server/) virtual appliance for remotely accessing my [homelab](/vmware-home-lab-on-intel-nuc-9). That's worked _fine_ but it comes with a lot of overhead. It also requires maintaining an SSL certificate and forwarding three ports through my home router, in addition to managing a fairly complex software package and configurations. The free version of the OpenVPN server also only supports a maximum of two simultaneous connections. I recently ran into issues with the `certbot` automated SSL renewal process on my OpenVPN AS VM and decided that it might be time to look for a simpler solution. + +I found that solution in [WireGuard](https://www.wireguard.com/), which provides an extremely efficient secure tunnel implemented directly in the Linux kernel. It has a much smaller (and easier-to-audit) codebase, requires minimal configuration, and uses the latest crypto wizardry to securely connect multiple systems. It took me an hour or so of fumbling to get WireGuard deployed and configured on a fresh (and minimal) Ubuntu 20.04 VM running on my ESXi 7 homelab host, and I was pretty happy with the performance, stability, and resource usage of the new setup. That new VM idled at a full _tenth_ of the memory usage of my OpenVPN AS, and it only required a single port to be forwarded into my home network. + +Of course, I soon realized that the setup could be _even better:_ I'm now running a WireGuard server on the Google Cloud free tier, and I've configured the [VyOS virtual router I use for my homelab stuff](/vmware-home-lab-on-intel-nuc-9#networking) to connect to that cloud-hosted server to create a secure tunnel between the two without needing to punch any holes in my local network (or consume any additional resources). I can then connect my client devices to the WireGuard server in the cloud. From there, traffic intended for my home network gets relayed to the VyOS router, and internet-bound traffic leaves Google Cloud directly. So my self-managed VPN isn't just good for accessing my home lab remotely, but also more generally for encrypting traffic when on WiFi networks I don't control - allowing me to replace the paid ProtonVPN subscription I had been using for that purpose. + +It's a pretty slick setup, if I do say so myself. Anyway, this post will discuss how I implemented this, and what I learned along the way. + +### WireGuard Concepts, in Brief +WireGuard does things a bit differently from other VPN solutions I've used in the past. For starters, there aren't any user accounts to manage, and in fact users don't really come into the picture at all. WireGuard also doesn't really distinguish between _client_ and _server_; the devices on both ends of a tunnel connection are _peers_, and they use the same software package and very similar configurations. Each WireGuard peer is configured with a virtual network interface with a private IP address used for the tunnel network, and a configuration file tells it which tunnel IP(s) will be used by the other peer(s). Each peer has its own cryptographic _private_ key, and the other peers get a copy of the corresponding _public_ key added to their configuration so that all the peers can recognize each other and encrypt/decrypt traffic appropriately. This mapping of peer addresses to public keys facilitates what WireGuard calls [Cryptokey Routing](https://www.wireguard.com/#cryptokey-routing). + +Once the peers are configured, all it takes is bringing up the WireGuard virtual interface on each peer to establish the tunnel and start passing secure traffic. + +You can read a lot more fascinating details about how this all works back on the [WireGuard homepage](https://www.wireguard.com/#conceptual-overview) (and even more in this [protocol description](https://www.wireguard.com/protocol/)) but this at least covers the key points I needed to grok prior to a successful initial deployment. + +For my hybrid cloud solution, I also leaned heavily upon [this write-up of a WireGuard Site-to-Site configuration](https://gist.github.com/insdavm/b1034635ab23b8839bf957aa406b5e39) for how to get traffic flowing between my on-site environment, cloud-hosted WireGuard server, and "Road Warrior" client devices, and drew from [this documentation on implementing WireGuard in GCP](https://github.com/agavrel/wireguard_google_cloud) as well. The [VyOS documentation for configuring the built-in WireGuard interface](https://docs.vyos.io/en/latest/configuration/interfaces/wireguard.html) was also quite helpful to me. + +Okay, enough background; let's get this thing going. + +### Google Cloud Setup +#### Instance Deployment +I started by logging into my Google Cloud account at https://console.cloud.google.com, and proceeded to create a new project (named `wireguard`) to keep my WireGuard-related resources together. I then navigated to **Compute Engine** and [created a new instance](https://console.cloud.google.com/compute/instancesAdd) inside that project. The basic setup is: + +| Attribute | Value | +| --- | --- | +| Name | `wireguard` | +| Region | `us-east1` (or whichever [free-tier-eligible region](https://cloud.google.com/free/docs/gcp-free-tier/#compute) is closest) | +| Machine Type | `e2-micro` | +| Boot Disk Size | 10 GB | +| Boot Disk Image | Ubuntu 20.04 LTS | + +![Instance creation](20211027_instance_creation.png) + +The other defaults are fine, but I'll holding off on clicking the friendly blue "Create" button at the bottom and instead click to expand the **Networking, Disks, Security, Management, Sole-Tenancy** sections to tweak a few more things. +![Instance creation advanced settings](20211028_instance_advanced_settings.png) + +##### Network Configuration +Expanding the **Networking** section of the request form lets me add a new `wireguard` network tag, which will make it easier to target the instance with a firewall rule later. I also want to enable the _IP Forwarding_ option so that the instance will be able to do router-like things. + +By default, the new instance will get assigned a public IP address that I can use to access it externally - but this address is _ephemeral_ so it will change periodically. Normally I'd overcome this by [using ddclient to manage its dynamic DNS record](/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance#configure-dynamic-dns), but (looking ahead) [VyOS's WireGuard interface configuration](https://docs.vyos.io/en/latest/configuration/interfaces/wireguard.html#interface-configuration) unfortunately only supports connecting to an IP rather than a hostname. That means I'll need to reserve a _static_ IP address for my instance. + +I can do that by clicking on the _Default_ network interface to expand the configuration. While I'm here, I'll first change the **Network Service Tier** from _Premium_ to _Standard_ to save a bit of money on network egress fees. _(This might be a good time to mention that while the compute instance itself is free, I will have to spend [about $3/mo for the public IP](https://cloud.google.com/vpc/network-pricing#:~:text=internal%20IP%20addresses.-,External%20IP%20address%20pricing,-You%20are%20charged), as well as [$0.085/GiB for internet egress via the Standard tier](https://cloud.google.com/vpc/network-pricing#:~:text=or%20Cloud%20Interconnect.-,Standard%20Tier%20pricing,-Egress%20pricing%20is) (versus [$0.12/GiB on the Premium tier](https://cloud.google.com/vpc/network-pricing#:~:text=Premium%20Tier%20pricing)). So not entirely free, but still pretty damn cheap for a cloud-hosted VPN that I control completely.)_ + +Anyway, after switching to the cheaper Standard tier I can click on the **External IP** dropdown and select the option to _Create IP Address_. I give it the same name as my instance to make it easy to keep up with. + +![Network configuration](20211027_network_settings.png) + +##### Security Configuration +The **Security** section lets me go ahead and upload an SSH public key that I can then use for logging into the instance once it's running. Of course, that means I'll first need to generate a key pair for this purpose: +```sh +ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519_wireguard +``` + +Okay, now that I've got my keys, I can click the **Add Item** button and paste in the contents of `~/.ssh/id_ed25519_wireguard.pub`. + +![Security configuration](20211027_security_settings.png) + +And that's it for the pre-deploy configuration! Time to hit **Create** to kick it off. + +![Do it!](20211027_creation_time.png) + +The instance creation will take a couple of minutes but I can go ahead and get the firewall sorted while I wait. + +#### Firewall +Google Cloud's default firewall configuration will let me reach my new server via SSH without needing to configure anything, but I'll need to add a new rule to allow the WireGuard traffic. I do this by going to **VPC > Firewall** and clicking the button at the top to **[Create Firewall Rule](https://console.cloud.google.com/networking/firewalls/add)**. I give it a name (`allow-wireguard-ingress`), select the rule target by specifying the `wireguard` network tag I had added to the instance, and set the source range to `0.0.0.0/0`. I'm going to use the default WireGuard port so select the _udp:_ checkbox and enter `51820`. + +![Firewall rule creation](20211027_firewall.png) + +I'll click **Create** and move on. + +#### WireGuard Server Setup +Once the **Compute Engine > Instances** [page](https://console.cloud.google.com/compute/instances) indicates that the instance is ready, I can make a note of the listed public IP and then log in via SSH: +```sh +ssh -i ~/.ssh/id_25519_wireguard {PUBLIC_IP} +``` + +##### Preparation +And, as always, I'll first make sure the OS is fully updated before doing anything else: +```sh +sudo apt update +sudo apt upgrade +``` + +Then I'll install `ufw` to easily manage the host firewall, `qrencode` to make it easier to generate configs for mobile clients, `openresolv` to avoid [this issue](https://superuser.com/questions/1500691/usr-bin-wg-quick-line-31-resolvconf-command-not-found-wireguard-debian/1500896), and `wireguard` to, um, guard the wires: +```sh +sudo apt install ufw qrencode openresolv wireguard +``` + +Configuring the host firewall with `ufw` is very straight forward: +```sh +# First, SSH: +sudo ufw allow 22/tcp +# and WireGuard: +sudo ufw allow 51820/udp +# Then turn it on: +sudo ufw enable +``` + +The last preparatory step is to enable packet forwarding in the kernel so that the instance will be able to route traffic between the remote clients and my home network (once I get to that point). I can configure that on-the-fly with: +```sh +sudo sysctl -w net.ipv4.ip_forward=1 +``` + +To make it permanent, I'll edit `/etc/sysctl.conf` and uncomment the same line: +```sh +$ sudo vi /etc/sysctl.conf +# Uncomment the next line to enable packet forwarding for IPv4 +net.ipv4.ip_forward=1 +``` + +##### WireGuard Interface Config +I'll switch to the root user, move into the `/etc/wireguard` directory, and issue `umask 077` so that the files I'm about to create will have a very limited permission set (to be accessible by root, and _only_ root): +```sh +sudo -i +cd /etc/wireguard +umask 077 +``` + +Then I can use the `wg genkey` command to generate the server's private key, save it to a file called `server.key`, pass it through `wg pubkey` to generate the corresponding public key, and save that to `server.pub`: +```sh +wg genkey | tee server.key | wg pubkey > server.pub +``` + +As I mentioned earlier, WireGuard will create a virtual network interface using an internal network to pass traffic between the WireGuard peers. By convention, that interface is `wg0` and it draws its configuration from a file in `/etc/wireguard` named `wg0.conf`. I could create a configuration file with a different name and thus wind up with a different interface name as well, but I'll stick with tradition to keep things easy to follow. + +The format of the interface configuration file will need to look something like this: +``` +[Interface] # this section defines the local WireGuard interface +Address = # CIDR-format IP address of the virtual WireGuard interface +ListenPort = # WireGuard listens on this port for incoming traffic (randomized if not specified) +PrivateKey = # private key used to encrypt traffic sent to other peers +MTU = # packet size +DNS = # optional DNS server(s) and search domain(s) used for the VPN +PostUp = # command executed by wg-quick wrapper when the interface comes up +PostDown = # command executed by wg-quick wrapper when the interface goes down + +[Peer] # now we're talking about the other peers connecting to this instance +PublicKey = # public key used to decrypt traffic sent by this peer +AllowedIPs = # which IPs will be routed to this peer +``` + +There will be a single `[Interface]` section in each peer's configuration file, but they may include multiple `[Peer]` sections. For my config, I'll use the `10.200.200.0/24` network for WireGuard, and let this server be `10.200.200.1`, the VyOS router in my home lab `10.200.200.2`, and I'll assign IPs to the other peers from there. I found a note that Google Cloud uses an MTU size of `1460` bytes so that's what I'll set on this end. I'm going to configure WireGuard to use the VyOS router as the DNS server, and I'll specify my internal `lab.bowdre.net` search domain. Finally, I'll leverage the `PostUp` and `PostDown` directives to enable and disable NAT so that the server will be able to forward traffic between networks for me. + +So here's the start of my GCP WireGuard server's `/etc/wireguard/wg0.conf`: +```sh +# /etc/wireguard/wg0.conf +[Interface] +Address = 10.200.200.1/24 +ListenPort = 51820 +PrivateKey = {GCP_PRIVATE_KEY} +MTU = 1460 +DNS = 10.200.200.2, lab.bowdre.net +PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens4 -j MASQUERADE +PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING -o ens4 -j MASQUERADE; ip6tables -D FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -D POSTROUTING -o ens4 -j MASQUERADE +``` + +I don't have any other peers ready to add to this config yet, but I can go ahead and bring up the interface all the same. I'm going to use the `wg-quick` wrapper instead of calling `wg` directly since it simplifies a bit of the configuration, but first I'll need to enable the `wg-quick@{INTERFACE}` service so that it will run automatically at startup: +```sh +systemctl enable wg-quick@wg0 +systemctl start wg-quick@wg0 +``` + +I can now bring up the interface with `wg-quick up wg0` and check the status with `wg show`: +``` +root@wireguard:~# wg-quick up wg0 +[#] ip link add wg0 type wireguard +[#] wg setconf wg0 /dev/fd/63 +[#] ip -4 address add 10.200.200.1/24 dev wg0 +[#] ip link set mtu 1460 up dev wg0 +[#] resolvconf -a wg0 -m 0 -x +[#] iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens4 -j MASQUERADE +root@wireguard:~# wg show +interface: wg0 + public key: {GCP_PUBLIC_IP} + private key: (hidden) + listening port: 51820 +``` + +I'll come back here once I've got a peer config to add. + +### Configure VyoS Router as WireGuard Peer +Comparatively, configuring WireGuard on VyOS is a bit more direct. I'll start by entering configuration mode and generating and binding a key pair for this interface: +```sh +configure +run generate pki wireguard key-pair install interface wg0 +``` + +And then I'll configure the rest of the options needed for the interface: +```sh +set interfaces wireguard wg0 address '10.200.200.2/24' +set interfaces wireguard wg0 description 'VPN to GCP' +set interfaces wireguard wg0 peer wireguard-gcp address '{GCP_PUBLIC_IP}' +set interfaces wireguard wg0 peer wireguard-gcp allowed-ips '0.0.0.0/0' +set interfaces wireguard wg0 peer wireguard-gcp persistent-keepalive '25' +set interfaces wireguard wg0 peer wireguard-gcp port '51820' +set interfaces wireguard wg0 peer wireguard-gcp public-key '{GCP_PUBLIC_KEY}' +``` + +Note that this time I'm allowing all IPs (`0.0.0.0/0`) so that this WireGuard interface will pass traffic intended for any destination (whether it's local, remote, or on the Internet). And I'm specifying a [25-second `persistent-keepalive` interval](https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence) to help ensure that this NAT-ed tunnel stays up even when it's not actively passing traffic - after all, I'll need the GCP-hosted peer to be able to initiate the connection so I can access the home network remotely. + +While I'm at it, I'll also add a static route to ensure traffic for the WireGuard tunnel finds the right interface: +```sh +set protocols static route 10.200.200.0/24 interface wg0 +``` + +And I'll add the new `wg0` interface as a listening address for the VyOS DNS forwarder: +```sh +set service dns forwarding listen-address '10.200.200.2' +``` + +I can use the `compare` command to verify the changes I've made, and then apply and save the updated config: +```sh +compare +commit +save +``` + +I can check the status of WireGuard on VyOS (and view the public key!) like so: +```sh +$ show interfaces wireguard wg0 summary +interface: wg0 + public key: {VYOS_PUBLIC_KEY} + private key: (hidden) + listening port: 43543 + +peer: {GCP_PUBLIC_KEY} + endpoint: {GCP_PUBLIC_IP}:51820 + allowed ips: 0.0.0.0/0 + transfer: 0 B received, 592 B sent + persistent keepalive: every 25 seconds +``` + +See? That part was much easier to set up! But it doesn't look like it's actually passing traffic yet... because while the VyOS peer has been configured with the GCP peer's public key, the GCP peer doesn't know anything about the VyOS peer yet. + +So I'll copy `{VYOS_PUBLIC_KEY}` and SSH back to the GCP instance to finish that configuration. Once I'm there, I can edit `/etc/wireguard/wg0.conf` as root and add in a new `[Peer]` section at the bottom, like this: +``` +[Peer] +# VyOS +PublicKey = {VYOS_PUBLIC_KEY} +AllowedIPs = 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16 +``` + +This time, I'm telling WireGuard that the new peer has IP `10.200.200.2` but that it should also get traffic destined for the `192.168.1.0/24` and `172.16.0.0/16` networks, my home and lab networks. Again, the `AllowedIPs` parameter is used for WireGuard's Cryptokey Routing so that it can keep track of which traffic goes to which peers (and which key to use for encryption). + +After saving the file, I can either restart WireGuard by bringing the interface down and back up (`wg-quick down wg0 && wg-quick up wg0`), or I can reload it on the fly with: +```sh +sudo -i +wg syncconf wg0 <(wg-quick strip wg0) +``` + +(I can't just use `wg syncconf wg0` directly since `/etc/wireguard/wg0.conf` includes the `PostUp`/`PostDown` commands which can only be parsed by the `wg-quick` wrapper, so I'm using `wg-quick strip {INTERFACE}` to grab the contents of the config file, remove the problematic bits, and then pass what's left to the `wg syncconf {INTERFACE}` command to update the current running config.) + +Now I can check the status of WireGuard on the GCP end: +```sh +root@wireguard:~# wg show +interface: wg0 + public key: {GCP_PUBLIC_KEY} + private key: (hidden) + listening port: 51820 + +peer: {VYOS_PUBLIC_KEY} + endpoint: {VYOS_PUBLIC_IP}:43990 + allowed ips: 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16 + latest handshake: 55 seconds ago + transfer: 1.23 KiB received, 368 B sent +``` + +Hey, we're passing traffic now! And I can verify that I can ping stuff on my home and lab networks from the GCP instance: +```sh +john@wireguard:~$ ping -c 1 192.168.1.5 +PING 192.168.1.5 (192.168.1.5) 56(84) bytes of data. +64 bytes from 192.168.1.5: icmp_seq=1 ttl=127 time=35.6 ms + +--- 192.168.1.5 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 35.598/35.598/35.598/0.000 ms + +john@wireguard:~$ ping -c 1 172.16.10.1 +PING 172.16.10.1 (172.16.10.1) 56(84) bytes of data. +64 bytes from 172.16.10.1: icmp_seq=1 ttl=64 time=35.3 ms + +--- 172.16.10.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 35.275/35.275/35.275/0.000 ms +``` + +Cool! + +### Adding Additional Peers +So my GCP and VyOS peers are talking, but the ultimate goals here are for my Chromebook to have access to my homelab resources while away from home, and for my phones to have secure internet access when connected to WiFi networks I don't control. That means adding at least two more peers to the GCP server. WireGuard [offers downloads](https://www.wireguard.com/install/) for just about every operating system you can imagine, but I'll be using the [Android app](https://play.google.com/store/apps/details?id=com.wireguard.android) for both the Chromebook and phones. + +#### Chromebook +The first step is to install the WireGuard Android app. + +_Note: the version of the WireGuard app currently available on the Play Store (v1.0.20210926) [has an issue](https://www.reddit.com/r/WireGuard/comments/q11rt9/wireguard_1020210926_and_chromeos/) on Chrome OS that causes it to not pass traffic after the Chromebook has resumed from sleep. The workaround for this is to install an older version of the app (1.0.20210506) which can be obtained from [F-Droid](https://f-droid.org/en/packages/com.wireguard.android/). Doing so requires having the Linux environment enabled on Chrome OS and the **Develop Android Apps > Enable ADB Debugging** option enabled in the Chrome OS settings. The process for sideloading apps is [detailed here](https://developer.android.com/topic/arc/development-environment)._ + +Once it's installed, I open the app and click the "Plus" button to create a new tunnel, and select the _Create from scratch_ option. I click the circle-arrows icon at the right edge of the _Private key_ field, and that automatically generates this peer's private and public key pair. Simply clicking on the _Public key_ field will automatically copy the generated key to my clipboard, which will be useful for sharing it with the server. Otherwise I fill out the **Interface** section similarly to what I've done already: + +| Parameter | Value | +| --- | --- | +| Name | `wireguard-gcp` | +| Private key | `{CB_PRIVATE_KEY}` | +| Public key | `{CB_PUBLIC_KEY}` | +| Addresses | `10.200.200.3/24` | +| Listen port | | +| DNS servers | `10.200.200.2` | +| MTU | | + +I then click the **Add Peer** button to tell this client about the peer it will be connecting to - the GCP-hosted instance: + +| Parameter | Value | +| --- | --- | +| Public key | `{GCP_PUBLIC_KEY}` | +| Pre-shared key | | +| Persistent keepalive | | +| Endpoint | `{GCP_PUBLIC_IP}:51820` | +| Allowed IPs | `0.0.0.0/0` | + +I _shouldn't_ need the keepalive for the "Road Warrior" peers connecting to the GCP peer, but I can always set that later if I run into stability issues. + +Now I can go ahead and save this configuration, but before I try (and fail) to connect I first need to tell the cloud-hosted peer about the Chromebook. So I fire up an SSH session to my GCP instance, become root, and edit the WireGuard configuration to add a new `[Peer]` section. + +```sh +sudo -i +vi /etc/wireguard/wg0.conf +``` + +Here's the new section that I'll add to the bottom of the config: + +```sh +[Peer] +# Chromebook +PublicKey = {CB_PUBLIC_KEY} +AllowedIPs = 10.200.200.3/32 +``` + +This one is acting as a single-node endpoint (rather than an entryway into other networks like the VyOS peer) so setting `AllowedIPs` to only the peer's IP makes sure that WireGuard will only send it traffic specifically intended for this peer. + +So my complete `/etc/wireguard/wg0.conf` looks like this so far: +```sh +# /etc/wireguard/wg0.conf +[Interface] +Address = 10.200.200.1/24 +ListenPort = 51820 +PrivateKey = {GCP_PRIVATE_KEY} +MTU = 1460 +DNS = 10.200.200.2, lab.bowdre.net +PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens4 -j MASQUERADE +PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING -o ens4 -j MASQUERADE; ip6tables -D FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -D POSTROUTING -o ens4 -j MASQUERADE + +[Peer] +# VyOS +PublicKey = {VYOS_PUBLIC_KEY} +AllowedIPs = 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16 + +[Peer] +# Chromebook +PublicKey = {CB_PUBLIC_KEY} +AllowedIPs = 10.200.200.3/32 +``` + +Now to save the file and reload the WireGuard configuration again: +```sh +wg syncconf wg0 <(wg-quick strip wg0) +``` + +At this point I can activate the connection in the WireGuard Android app, wait a few seconds, and check with `wg show` to confirm that the tunnel has been established successfully: + +```sh +root@wireguard:~# wg show +interface: wg0 + public key: {GCP_PUBLIC_KEY} + private key: (hidden) + listening port: 51820 + +peer: {VYOS_PUBLIC_KEY} + endpoint: {VYOS_PUBLIC_IP}:43990 + allowed ips: 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16 + latest handshake: 1 minute, 55 seconds ago + transfer: 200.37 MiB received, 16.32 MiB sent + +peer: {CB_PUBLIC_KEY} + endpoint: {CB_PUBLIC_IP}:33752 + allowed ips: 10.200.200.3/32 + latest handshake: 48 seconds ago + transfer: 169.17 KiB received, 808.33 KiB sent +``` + +And I can even access my homelab when not at home! +![Remote access to my homelab!](20211028_remote_homelab.png) + +#### Android Phone +Being able to copy-and-paste the required public keys between the WireGuard app and the SSH session to the GCP instance made it relatively easy to set up the Chromebook, but things could be a bit trickier on a phone without that kind of access. So instead I will create the phone's configuration on the WireGuard server in the cloud, render that config file as a QR code, and simply scan that through the phone's WireGuard app to import the settings. + +I'll start by SSHing to the GCP instance, elevating to root, setting the restrictive `umask` again, and creating a new folder to store client configurations. +```sh +sudo -i +umask 077 +mkdir /etc/wireguard/clients +cd /etc/wireguard/clients +``` + +As before, I'll use the built-in `wg` commands to generate the private and public key pair: +```sh +wg genkey | tee phone1.key | wg pubkey > phone1.pub +``` + +I can then use those keys to assemble the config for the phone: +```sh +# /etc/wireguard/clients/phone1.conf +[Interface] +PrivateKey = {PHONE1_PRIVATE_KEY} +Address = 10.200.200.4/24 +DNS = 10.200.200.2, lab.bowdre.net + +[Peer] +PublicKey = {GCP_PUBLIC_KEY} +AllowedIPs = 0.0.0.0/0 +Endpoint = {GCP_PUBLIC_IP}:51820 +``` + +I'll also add the interface address and corresponding public key to a new `[Peer]` section of `/etc/wireguard/wg0.conf`: +```sh +[Peer] +PublicKey = {PHONE1_PUBLIC_KEY} +AllowedIPs = 10.200.200.4/32 +``` + +And reload the WireGuard config: +```sh +wg syncconf wg0 <(wg-quick strip wg0) +``` + +Back in the `clients/` directory, I can use `qrencode` to render the phone configuration file (keys and all!) as a QR code: +```sh +qrencode -t ansiutf8 < phone1.conf +``` +![QR code config](20211028_qrcode_config.png) + +And then I just open the WireGuard app on my phone and use the **Scan from QR Code** option. After a successful scan, it'll prompt me to name the new tunnel, and then I should be able to connect right away. +![Successful mobile connection](20211028_wireguard_mobile.png) + +I can even access my vSphere lab environment - not that it offers a great mobile experience... +![vSphere mobile sucks](20211028_mobile_vsphere_sucks.jpg) + +Before moving on too much further, though, I'm going to clean up the keys and client config file that I generated on the GCP instance. It's not great hygiene to keep a private key stored on the same system it's used to access. + +```sh +rm -f /etc/wireguard/clients/* +``` + +##### Bonus: Automation! +I've [written before](auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker) about a set of [Tasker](https://play.google.com/store/apps/details?id=net.dinglisch.android.taskerm) profiles I put together so that my phone would automatically connect to a VPN whenever it connects to a WiFi network I don't control. It didn't take much effort at all to adapt the profile to work with my new WireGuard setup. + +Two quick pre-requisites first: +1. Open the WireGuard Android app, tap the three-dot menu button at the top right, expand the Advanced section, and enable the _Allow remote control apps_ so that Tasker will be permitted to control WireGuard. +2. Exclude the WireGuard app from Android's battery optimization so that it doesn't have any problems running in the background. On (Pixel-flavored) Android 12, this can be done by going to **Settings > Apps > See all apps > WireGuard > Battery** and selecting the _Unrestricted_ option. + +On to the Tasker config. The only changes will be in the [VPN on Strange Wifi](/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker#vpn-on-strange-wifi) profile. I'll remove the OpenVPN-related actions from the Enter and Exit tasks and replace them with the built-in **Tasker > Tasker Function WireGuard Set Tunnel** action. + +For the Enter task, I'll set the tunnel status to `true` and specify the name of the tunnel as configured in the WireGuard app; the Exit task gets the status set to `false` to disable the tunnel. Both actions will be conditional upon the `%TRUSTED_WIFI` variable being unset. +![Tasker setup](20211028_tasker_setup.png) + +``` +Profile: VPN on Strange WiFi + Settings: Notification: no + State: Wifi Connected [ SSID:* MAC:* IP:* Active:Any ] + + Enter Task: ConnectVPN + A1: Tasker Function [ + Function: WireGuardSetTunnel(true,wireguard-gcp) ] + If [ %TRUSTED_WIFI !Set ] + + Exit Task: DisconnectVPN + A1: Tasker Function [ + Function: WireGuardSetTunnel(false,wireguard-gcp) ] + If [ %TRUSTED_WIFI !Set ] +``` + +_Automagic!_ + +#### Other Peers +Any additional peers that need to be added in the future will likely follow one of the above processes. The steps are always to generate the peer's key pair, use the private key to populate the `[Interface]` portion of the peer's config, configure the `[Peer]` section with the _public_ key, allowed IPs, and endpoint address of the peer it will be connecting to, and then to add the new peer's _public_ key and internal WireGuard IP to a new `[Peer]` section of the existing peer's config. + diff --git a/content/post/create-vms-chromebook-hashicorp-vagrant/index.md b/content/post/create-vms-chromebook-hashicorp-vagrant/index.md new file mode 100644 index 0000000..1051147 --- /dev/null +++ b/content/post/create-vms-chromebook-hashicorp-vagrant/index.md @@ -0,0 +1,248 @@ +--- +title: "Create Virtual Machines on a Chromebook with HashiCorp Vagrant" # Title of the blog post. +date: 2023-02-20 # Date of post creation. +lastmod: 2023-02-25 +description: "Pairing the powerful Linux Development Environment on modern Chromebooks with HashiCorp Vagrant to create and manage local virtual machines for development and testing" # Description used for search engine. +featured: true # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Projects +tags: + - linux + - chromeos + - homelab + - infrastructure-as-code +comment: true # Disable comment if false. +--- +I've lately been trying to do more with [Salt](https://saltproject.io/) at work, but I'm still very much a novice with that tool. I thought it would be great to have a nice little portable lab environment where I could deploy a few lightweight VMs and practice managing them with Salt - without impacting any systems that are actually being used for anything. Along the way, I figured I'd leverage [HashiCorp Vagrant](https://www.vagrantup.com/) to create and manage the VMs, which would provide a declarative way to define what the VMs should look like. The VM (or even groups of VMs) would be specified in a single file, and I'd bypass all the tedious steps of creating the virtual hardware, attaching the installation media, installing the OS, and performing the initial configuration. Vagrant will help me build up, destroy, and redeploy a development environment in a simple and repeatable way. + +Also, because I'm a bit of a sadist, I wanted to do this all on my new [Framework Chromebook](https://frame.work/laptop-chromebook-12-gen-intel). I might as well put my 32GB of RAM to good use, right? + +It took a bit of fumbling, but this article describes what it took to get a Vagrant-powered VM up and running in the [Linux Development Environment](https://chromeos.dev/en/linux) on my Chromebook (which is currently running ChromeOS v111 beta). + +### Install the prerequisites +There are are a few packages which need to be installed before we can move on to the Vagrant-specific stuff. It's quite possible that these are already on your system.... but if they *aren't* already present you'll have a bad problem[^problem]. + +```shell +sudo apt update +sudo apt install \ + build-essential \ + gpg \ + lsb-release \ + wget +``` + +[^problem]: and [will not go to space today](https://xkcd.com/1133/). + +I'll be configuring Vagrant to use [`libvirt`](https://libvirt.org/) to interface with the [Kernel Virtual Machine (KVM)](https://www.linux-kvm.org/page/Main_Page) virtualization solution (rather than something like VirtualBox that would bring more overhead) so I'll need to install some packages for that as well: +```shell +sudo apt install virt-manager libvirt-dev +``` + +And to avoid having to `sudo` each time I interact with `libvirt` I'll add myself to that group: +```shell +sudo gpasswd -a $USER libvirt ; newgrp libvirt +``` + +And to avoid [this issue](https://github.com/virt-manager/virt-manager/issues/333) I'll make a tweak to the `qemu.conf` file: +```shell +echo "remember_owner = 0" | sudo tee -a /etc/libvirt/qemu.conf +sudo systemctl restart libvirtd +``` + +I'm also going to use `rsync` to share a [synced folder](https://developer.hashicorp.com/vagrant/docs/synced-folders/basic_usage) between the host and the VM guest so I'll need to make sure that's installed too: +```shell +sudo apt install rsync +``` + +### Install Vagrant +With that out of the way, I'm ready to move on to the business of installing Vagrant. I'll start by adding the HashiCorp repository: +```shell +wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg +echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list +``` + +I'll then install the Vagrant package: +```shell +sudo apt update +sudo apt install vagrant +``` + +I also need to install the [`vagrant-libvirt` plugin](https://github.com/vagrant-libvirt/vagrant-libvirt) so that Vagrant will know how to interact with `libvirt`: +```shell +vagrant plugin install vagrant-libvirt +``` + +### Create a lightweight VM +Now I can get to the business of creating my first VM with Vagrant! + +Vagrant VMs are distributed as Boxes, and I can browse some published Boxes at [app.vagrantup.com/boxes/search?provider=libvirt](https://app.vagrantup.com/boxes/search?provider=libvirt) (applying the `provider=libvirt` filter so that I only see Boxes which will run on my chosen virtualization provider). For my first VM, I'll go with something light and simple: [`generic/alpine38`](https://app.vagrantup.com/generic/boxes/alpine38). + +So I'll create a new folder to contain the Vagrant configuration: +```shell +mkdir vagrant-alpine +cd vagrant-alpine +``` + +And since I'm referencing a Vagrant Box which is published on Vagrant Cloud, downloading the config is as simple as: +```shell +vagrant init generic/alpine38 +``` + +That lets me know that +```text +A `Vagrantfile` has been placed in this directory. You are now +ready to `vagrant up` your first virtual environment! Please read +the comments in the Vagrantfile as well as documentation on +`vagrantup.com` for more information on using Vagrant. +``` + +Before I `vagrant up` the joint, I do need to make a quick tweak to the default Vagrantfile, which is what tells Vagrant how to configure the VM. By default, Vagrant will try to create a synced folder using NFS and will throw a nasty error when that (inevitably[^inevitable]) fails. So I'll open up the Vagrantfile to review and edit it: +```shell +vim Vagrantfile +``` + +Most of the default Vagrantfile is commented out. Here's the entirey of the configuration *without* the comments: +```ruby +Vagrant.configure("2") do |config| + config.vm.box = "generic/alpine38" +end +``` + +There's not a lot there, is there? Well I'm just going to add these two lines somewhere between the `Vagrant.configure()` and `end` lines: +```ruby + config.nfs.verify_installed = false + config.vm.synced_folder '.', '/vagrant', type: 'rsync' +``` + +The first line tells Vagrant not to bother checking to see if NFS is installed, and will use `rsync` to share the local directory with the VM guest, where it will be mounted at `/vagrant`. + +So here's the full Vagrantfile (sans-comments[^magic], again): +```ruby +Vagrant.configure("2") do |config| + config.vm.box = "generic/alpine38" + config.nfs.verify_installed = false + config.vm.synced_folder '.', '/vagrant', type: 'rsync' +end +``` + +With that, I'm ready to fire up this VM with `vagrant up`! Vagrant will look inside `Vagrantfile` to see the config, pull down the `generic/alpine38` Box from Vagrant Cloud, boot the VM, configure it so I can SSH in to it, and mount the synced folder: +```shell +; vagrant up +Bringing machine 'default' up with 'libvirt' provider... +==> default: Box 'generic/alpine38' could not be found. Attempting to find and install... + default: Box Provider: libvirt + default: Box Version: >= 0 +==> default: Loading metadata for box 'generic/alpine38' + default: URL: https://vagrantcloud.com/generic/alpine38 +==> default: Adding box 'generic/alpine38' (v4.2.12) for provider: libvirt + default: Downloading: https://vagrantcloud.com/generic/boxes/alpine38/versions/4.2.12/providers/libvirt.box + default: Calculating and comparing box checksum... +==> default: Successfully added box 'generic/alpine38' (v4.2.12) for 'libvirt'! +==> default: Uploading base box image as volume into Libvirt storage... +[...] +==> default: Waiting for domain to get an IP address... +==> default: Waiting for machine to boot. This may take a few minutes... + default: SSH address: 192.168.121.41:22 + default: SSH username: vagrant + default: SSH auth method: private key +[...] + default: Key inserted! Disconnecting and reconnecting using new SSH key... +==> default: Machine booted and ready! +==> default: Rsyncing folder: /home/john/projects/vagrant-alpine/ => /vagrant +``` + +And then I can use `vagrant ssh` to log in to the new VM: +```shell +; vagrant ssh +alpine38:~$ cat /etc/os-release +NAME="Alpine Linux" +ID=alpine +VERSION_ID=3.8.5 +PRETTY_NAME="Alpine Linux v3.8" +HOME_URL="http://alpinelinux.org" +BUG_REPORT_URL="http://bugs.alpinelinux.org" +``` + +I can also verify that the synced folder came through as expected: +```shell +alpine38:~$ ls -l /vagrant +total 4 +-rw-r--r-- 1 vagrant vagrant 3117 Feb 20 15:51 Vagrantfile +``` + +Once I'm finished poking at this VM, shutting it down is as easy as: +```shell +vagrant halt +``` + +And if I want to clean up and remove all traces of the VM, that's just: +```shell +vagrant destroy +``` + +[^inevitable]: NFS doesn't work properly from within an LXD container, like the ChromeOS Linux development environment. +[^magic]: Through the magic of `egrep -v "^\s*(#|$)" $file`. + + +### Create a heavy VM, as a treat +Having proven to myself that Vagrant does work on a Chromebook, let's see how it does with a slightly-heavier VM.... like [Windows 11](https://app.vagrantup.com/oopsme/boxes/windows11-22h2). + +{{% notice info "Space Requirement" %}} +Windows 11 makes for a pretty hefty VM which will require significant storage space. My Chromebook's Linux environment ran out of storage space the first time I attempted to deploy this guy. Fortunately ChromeOS makes it easy to allocate more space to Linux (**Settings > Advanced > Developers > Linux development environment > Disk size**). You'll probably need at least 30GB free to provision this VM. +{{% /notice %}} + +Again, I'll create a new folder to hold the Vagrant configuration and do a `vagrant init`: +```shell +mkdir vagrant-win11 +cd vagrant-win11 +vagrant init oopsme/windows11-22h2 +``` + +And, again, I'll edit the Vagrantfile before starting the VM. This time, though, I'm adding a few configuration options to tell `libvirt` that I'd like more compute resources than the default 1 CPU and 512MB RAM[^ram]: +```ruby +Vagrant.configure("2") do |config| + config.vm.box = "oopsme/windows11-22h2" + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 4 + libvirt.memory = 4096 + end +end +``` + +[^ram]: Note here that `libvirt.memory` is specified in MB. Windows 11 boots happily with 4096 MB of RAM.... and somewhat less so with just 4 MB. *Ask me how I know...* + +Now it's time to bring it up. This one's going to take A While as it syncs the ~12GB Box first. +```shell +vagrant up +``` + +Eventually it should spit out that lovely **Machine booted and ready!** message and I can log in! I *can* do a `vagrant ssh` again to gain a shell in the Windows environment, but I'll probably want to interact with those sweet sweet graphics. That takes a little bit more effort. + +First, I'll use `virsh -c qemu:///system list` to see the running VM(s): +```shell +; virsh -c qemu:///system list + Id Name State +--------------------------------------- + 10 vagrant-win11_default running +``` + +Then I can tell `virt-viewer` that I'd like to attach a session there: +```shell +virt-viewer -c qemu:///system -a vagrant-win11_default +``` + +I log in with the default password `vagrant`, and I'm in Windows 11 land! +![Windows 11 running on a Chromebook!](win-11-vm.png) + +### Next steps +Well that about does it for a proof-of-concept. My next steps will be exploring [multi-machine Vagrant environments](https://developer.hashicorp.com/vagrant/docs/multi-machine) to create a portable lab environment including machines running several different operating systems so that I can learn how to manage them effectively with Salt. It should be fun! + diff --git a/content/post/create-vms-chromebook-hashicorp-vagrant/thumbnail.png b/content/post/create-vms-chromebook-hashicorp-vagrant/thumbnail.png new file mode 100644 index 0000000..a754dd9 Binary files /dev/null and b/content/post/create-vms-chromebook-hashicorp-vagrant/thumbnail.png differ diff --git a/content/post/create-vms-chromebook-hashicorp-vagrant/win-11-vm.png b/content/post/create-vms-chromebook-hashicorp-vagrant/win-11-vm.png new file mode 100644 index 0000000..d02b38a Binary files /dev/null and b/content/post/create-vms-chromebook-hashicorp-vagrant/win-11-vm.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/-9apQIUci.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/-9apQIUci.png new file mode 100644 index 0000000..374fb1e Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/-9apQIUci.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/2fbKJc5Y6.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/2fbKJc5Y6.png new file mode 100644 index 0000000..ce213c4 Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/2fbKJc5Y6.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/2xe34VJym.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/2xe34VJym.png new file mode 100644 index 0000000..57f6a9d Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/2xe34VJym.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/5PD1H7b1O.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/5PD1H7b1O.png new file mode 100644 index 0000000..9311a44 Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/5PD1H7b1O.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/6IRPHhr6u.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/6IRPHhr6u.png new file mode 100644 index 0000000..2a5357d Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/6IRPHhr6u.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/8XAB60aqk.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/8XAB60aqk.png new file mode 100644 index 0000000..0d9d14b Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/8XAB60aqk.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/GHVqVOTAE.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/GHVqVOTAE.png new file mode 100644 index 0000000..fbad97e Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/GHVqVOTAE.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/Ki0z1C3g.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/Ki0z1C3g.png new file mode 100644 index 0000000..9a78deb Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/Ki0z1C3g.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/OSbsiOw8E.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/OSbsiOw8E.png new file mode 100644 index 0000000..f025ae2 Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/OSbsiOw8E.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/dMPHvLHkH.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/dMPHvLHkH.png new file mode 100644 index 0000000..71c1129 Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/dMPHvLHkH.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/dZkZUIFum.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/dZkZUIFum.png new file mode 100644 index 0000000..b666554 Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/dZkZUIFum.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/index.md b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/index.md new file mode 100644 index 0000000..e74247c --- /dev/null +++ b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/index.md @@ -0,0 +1,434 @@ +--- +series: Projects +date: "2021-06-28T00:00:00Z" +thumbnail: 2xe34VJym.png +usePageBundles: true +lastmod: "2021-09-17" +tags: +- docker +- linux +- cloud +- containers +- chat +title: Federated Matrix Server (Synapse) on Oracle Cloud's Free Tier +--- + +I've heard a lot lately about how generous [Oracle Cloud's free tier](https://www.oracle.com/cloud/free/) is, particularly when [compared with the free offerings](https://github.com/cloudcommunity/Cloud-Service-Providers-Free-Tier-Overview) from other public cloud providers. Signing up for an account was fairly straight-forward, though I did have to wait a few hours for an actual human to call me on an actual telephone to verify my account. Once in, I thought it would be fun to try building my own [Matrix](https://matrix.org/) homeserver to really benefit from the network's decentralized-but-federated model for secure end-to-end encrypted communications. + +There are two primary projects for Matrix homeservers: [Synapse](https://github.com/matrix-org/synapse/) and [Dendrite](https://github.com/matrix-org/dendrite). Dendrite is the newer, more efficient server, but it's not quite feature complete. I'll be using Synapse for my build to make sure that everything works right off the bat, and I will be running the server in a Docker container to make it (relatively) easy to replace if I feel more comfortable about Dendrite in the future. + +As usual, it took quite a bit of fumbling about before I got everything working correctly. Here I'll share the steps I used to get up and running. + +### Instance creation +Getting a VM spun up on Oracle Cloud was a pretty simple process. I logged into my account, navigated to *Menu -> Compute -> Instances*, and clicked on the big blue **Create Instance** button. +![Create Instance](8XAB60aqk.png) + +I'll be hosting this for my `bowdre.net` domain, so I start by naming the instance accordingly: `matrix.bowdre.net`. Naming it isn't strictly necessary, but it does help with keeping track of things. The instance defaults to using an Oracle Linux image. I'd rather use an Ubuntu one for this, simply because I was able to find more documentation on getting Synapse going on Debian-based systems. So I hit the **Edit** button next to *Image and Shape*, select the **Change Image** option, pick **Canonical Ubuntu** from the list of available images, and finally click **Select Image** to confirm my choice. +![Image Selection](OSbsiOw8E.png) + +This will be an Ubuntu 20.04 image running on a `VM.Standard.E2.1.Micro` instance, which gets a single AMD EPYC 7551 CPU with 2.0GHz base frequency and 1GB of RAM. It's not much, but it's free - and it should do just fine for this project. + +I can leave the rest of the options as their defaults, making sure that the instance will be allotted a public IPv4 address. +![Other default selections](Ki0z1C3g.png) + +Scrolling down a bit to the *Add SSH Keys* section, I leave the default **Generate a key pair for me** option selected, and click the very-important **Save Private Key** button to download the private key to my computer so that I'll be able to connect to the instance via SSH. +![Download Private Key](dZkZUIFum.png) + +Now I can finally click the blue **Create Instance** button at the bottom of the screen, and just wait a few minutes for it to start up. Once the status shows a big green "Running" square, I'm ready to connect! I'll copy the listed public IP and make a note of the default username (`ubuntu`). I can then plug the IP, username, and the private key I downloaded earlier into my SSH client (the [Secure Shell extension](https://chrome.google.com/webstore/detail/secure-shell/iodihamcpbpeioajjeobimgagajmlibd) for Google Chrome since I'm doing this from my Pixelbook), and log in to my new VM in The Cloud. +![Logged in!](5PD1H7b1O.png) + +### DNS setup +According to [Oracle's docs](https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm), the public IP assigned to my instance is mine until I terminate the instance. It should even remain assigned if I stop or restart the instance, just as long as I don't delete the virtual NIC attached to it. So I'll skip the [`ddclient`-based dynamic DNS configuration I've used in the past](/bitwarden-password-manager-self-hosted-on-free-google-cloud-instance#configure-dynamic-dns) and instead go straight to my registrar's DNS management portal and create a new `A` record for `matrix.bowdre.net` with the instance's public IP. + +While I'm managing DNS, it might be good to take a look at the requirements for [federating my new server](https://github.com/matrix-org/synapse/blob/master/docs/federate.md#setting-up-federation) with the other Matrix servers out there. I'd like for users identities on my server to be identified by the `bowdre.net` domain (`@user:bowdre.net`) rather than the full `matrix.bowdre.net` FQDN (`@user:matrix.bowdre.net` is kind of cumbersome). The standard way to do this to leverage [`.well-known` delegation](https://github.com/matrix-org/synapse/blob/master/docs/delegate.md#well-known-delegation), where the URL at `http://bowdre.net/.well-known/matrix/server` would return a JSON structure telling other Matrix servers how to connect to mine: +```json +{ + "m.server": "matrix.bowdre.net:8448" +} +``` + +I don't *currently* have another server already handling requests to `bowdre.net`, so for now I'll add another `A` record with the same public IP address to my DNS configuration. Requests for both `bowdre.net` and `matrix.bowdre.net` will reach the same server instance, but those requests will be handled differently. More on that later. + +An alternative to this `.well-known` delegation would be to use [`SRV` DNS record delegation](https://github.com/matrix-org/synapse/blob/master/docs/delegate.md#srv-dns-record-delegation) to accomplish the same thing. I'd create an `SRV` record for `_matrix._tcp.bowdre.net` with the data `0 10 8448 matrix.bowdre.net` (priority=`0`, weight=`10`, port=`8448`, target=`matrix.bowdre.net`) which would again let other Matrix servers know where to send the federation traffic for my server. This approach has an advantage of not needing to make any changes on the `bowdre.net` web server, but it would require the delegated `matrix.bowdre.net` server to *also* [return a valid certificate for `bowdre.net`](https://matrix.org/docs/spec/server_server/latest#:~:text=If%20the%20/.well-known%20request%20resulted,present%20a%20valid%20certificate%20for%20%3Chostname%3E.). Trying to get a Let's Encrypt certificate for a server name that doesn't resolve authoritatively in DNS sounds more complicated than I want to get into with this project, so I'll move forward with my plan to use the `.well-known` delegation instead. + +But first, I need to make sure that the traffic reaches the server to begin with. + +### Firewall configuration +Synapse listens on port `8008` for connections from messaging clients, and typically uses port `8448` for federation traffic from other Matrix servers. Rather than expose those ports directly, I'm going to put Synapse behind a reverse proxy on HTTPS port `443`. I'll also need to allow inbound traffic HTTP port `80` for ACME certificate challenges. I've got two firewalls to contend with: the Oracle Cloud one which blocks traffic from getting into my virtual cloud network, and the host firewall running inside the VM. + +I'll tackle the cloud firewall first. From the page showing my instance details, I click on the subnet listed under the *Primary VNIC* heading: +![Click on subnet](lBjINolYq.png) + +I then look in the *Security Lists* section and click on the Default Security List: +![Click on default security list](nnQ7aQrpm.png) + +The *Ingress Rules* section lists the existing inbound firewall exceptions, which by default is basically just SSH. I click on **Add Ingress Rules** to create a new one. +![Ingress rules](dMPHvLHkH.png) + +I want this to apply to traffic from any source IP so I enter the CIDR `0.0.0.0/0`, and I enter the *Destination Port Range* as `80,443`. I also add a brief description and click **Add Ingress Rules**. +![Adding an ingress rule](2fbKJc5Y6.png) + +Success! My new ingress rules appear at the bottom of the list. +![New rules added](s5Y0rycng.png) + +That gets traffic from the internet and to my instance, but the OS is still going to drop the traffic at its own firewall. I'll need to work with `iptables` to change that. (You typically use `ufw` to manage firewalls more easily on Ubuntu, but it isn't included on this minimal image and seemed to butt heads with `iptables` when I tried adding it. I eventually decided it was better to just interact with `iptables` directly). I'll start by listing the existing rules on the `INPUT` chain: +``` +$ sudo iptables -L INPUT --line-numbers +Chain INPUT (policy ACCEPT) +num target prot opt source destination +1 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +2 ACCEPT icmp -- anywhere anywhere +3 ACCEPT all -- anywhere anywhere +4 ACCEPT udp -- anywhere anywhere udp spt:ntp +5 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +6 REJECT all -- anywhere anywhere reject-with icmp-host-prohibited +``` + +Note the `REJECT all` statement at line `6`. I'll need to insert my new `ACCEPT` rules for ports `80` and `443` above that implicit deny all: +``` +sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 80 -j ACCEPT +sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 443 -j ACCEPT +``` + +And then I'll confirm that the order is correct: +``` +$ sudo iptables -L INPUT --line-numbers +Chain INPUT (policy ACCEPT) +num target prot opt source destination +1 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +2 ACCEPT icmp -- anywhere anywhere +3 ACCEPT all -- anywhere anywhere +4 ACCEPT udp -- anywhere anywhere udp spt:ntp +5 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +6 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:https +7 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:http +8 REJECT all -- anywhere anywhere reject-with icmp-host-prohibited +``` + +I can use `nmap` running from my local Linux environment to confirm that I can now reach those ports on the VM. (They're still "closed" since nothing is listening on the ports yet, but the connections aren't being rejected.) +``` +$ nmap -Pn matrix.bowdre.net +Starting Nmap 7.70 ( https://nmap.org ) at 2021-06-27 12:49 CDT +Nmap scan report for matrix.bowdre.net(150.136.6.180) +Host is up (0.086s latency). +Other addresses for matrix.bowdre.net (not scanned): 2607:7700:0:1d:0:1:9688:6b4 +Not shown: 997 filtered ports +PORT STATE SERVICE +22/tcp open ssh +80/tcp closed http +443/tcp closed https + +Nmap done: 1 IP address (1 host up) scanned in 8.44 seconds +``` + +Cool! Before I move on, I'll be sure to make the rules persistent so they'll be re-applied whenever `iptables` starts up: + +Make rules persistent: +``` +$ sudo netfilter-persistent save +run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save +run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save +``` + +### Reverse proxy setup +I had initially planned on using `certbot` to generate Let's Encrypt certificates, and then reference the certs as needed from an `nginx` or Apache reverse proxy configuration. While researching how the [proxy would need to be configured to front Synapse](https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md), I found this sample `nginx` configuration: +```conf +server { + listen 443 ssl http2; + listen [::]:443 ssl http2; + + # For the federation port + listen 8448 ssl http2 default_server; + listen [::]:8448 ssl http2 default_server; + + server_name matrix.example.com; + + location ~* ^(\/_matrix|\/_synapse\/client) { + proxy_pass http://localhost:8008; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + + # Nginx by default only allows file uploads up to 1M in size + # Increase client_max_body_size to match max_upload_size defined in homeserver.yaml + client_max_body_size 50M; + } +} +``` + +And this sample Apache one: +```conf + + SSLEngine on + ServerName matrix.example.com + + RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME} + AllowEncodedSlashes NoDecode + ProxyPreserveHost on + ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon + ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix + ProxyPass /_synapse/client http://127.0.0.1:8008/_synapse/client nocanon + ProxyPassReverse /_synapse/client http://127.0.0.1:8008/_synapse/client + + + + SSLEngine on + ServerName example.com + + RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME} + AllowEncodedSlashes NoDecode + ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon + ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix + +``` + +I also found this sample config for another web server called [Caddy](https://caddyserver.com): +``` +matrix.example.com { + reverse_proxy /_matrix/* http://localhost:8008 + reverse_proxy /_synapse/client/* http://localhost:8008 +} + +example.com:8448 { + reverse_proxy http://localhost:8008 +} +``` + +One of these looks much simpler than the other two. I'd never heard of Caddy so I did some quick digging, and I found that it would actually [handle the certificates entirely automatically](https://caddyserver.com/docs/automatic-https) - in addition to having a much easier config. [Installing Caddy](https://caddyserver.com/docs/install#debian-ubuntu-raspbian) wasn't too bad, either: + +```sh +sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https +curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo apt-key add - +curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list +sudo apt update +sudo apt install caddy +``` + +Then I just need to put my configuration into the default `Caddyfile`, including the required `.well-known` delegation piece from earlier. +``` +$ sudo vi /etc/caddy/Caddyfile +matrix.bowdre.net { + reverse_proxy /_matrix/* http://localhost:8008 + reverse_proxy /_synapse/client/* http://localhost:8008 +} + +bowdre.net { + route { + respond /.well-known/matrix/server `{"m.server": "matrix.bowdre.net:443"}` + redir https://virtuallypotato.com + } +} +``` +There's a lot happening in that 11-line `Caddyfile`, but it's not complicated by any means. The `matrix.bowdre.net` section is pretty much exactly yanked from the sample config, and it's going to pass any requests that start like `matrix.bowdre.net/_matrix/` or `matrix.bowdre.net/_synapse/client/` through to the Synapse server listening locally on port `8008`. Caddy will automatically request and apply a Let's Encrypt or ZeroSSL cert for any server names spelled out in the config - very slick! + +I set up the `bowdre.net` section to return the appropriate JSON string to tell other Matrix servers to connect to `matrix.bowdre.net` on port `443` (so that I don't have to open port `8448` through the firewalls), and to redirect all other traffic to one of my favorite technical blogs (maybe you've heard of it?). I had to wrap the `respond` and `redir` directives in a [`route { }` block](https://caddyserver.com/docs/caddyfile/directives/route) because otherwise Caddy's [implicit precedence](https://caddyserver.com/docs/caddyfile/directives#directive-order) would execute the redirect for *all* traffic and never hand out the necessary `.well-known` data. + +(I wouldn't need that section at all if I were using a separate web server for `bowdre.net`; instead, I'd basically just add that `respond /.well-known/matrix/server` line to that other server's config.) + +Now to enable the `caddy` service, start it, and restart it so that it loads the new config: +``` +sudo systemctl enable caddy +sudo systemctl start caddy +sudo systemctl restart caddy +``` + +If I repeat my `nmap` scan from earlier, I'll see that the HTTP and HTTPS ports are now open. The server still isn't actually serving anything on those ports yet, but at least it's listening. +``` +$ nmap -Pn matrix.bowdre.net +Starting Nmap 7.70 ( https://nmap.org ) at 2021-06-27 13:44 CDT +Nmap scan report for matrix.bowdre.net (150.136.6.180) +Host is up (0.034s latency). +Not shown: 997 filtered ports +PORT STATE SERVICE +22/tcp open ssh +80/tcp open http +443/tcp open https + +Nmap done: 1 IP address (1 host up) scanned in 5.29 seconds +``` + +Browsing to `https://matrix.bowdre.net` shows a blank page - but a valid and trusted certificate that I did absolutely nothing to configure! +![Valid cert!](GHVqVOTAE.png) + +The `.well-known` URL also returns the expected JSON: +![.well-known](6IRPHhr6u.png) + +And trying to hit anything else at `https://bowdre.net` brings me right back here. + +And again, the config to do all this (including getting valid certs for two server names!) is just 11 lines long. Caddy is seriously and magically cool. + +Okay, let's actually serve something up now. + +### Synapse installation +#### Docker setup +Before I can get on with [deploying Synapse in Docker](https://hub.docker.com/r/matrixdotorg/synapse), I first need to [install Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository) on the system: + +```sh +sudo apt-get install \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg \ + lsb-release + +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + +echo \ + "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +sudo apt update + +sudo apt install docker-ce docker-ce-cli containerd.io +``` + +I'll also [install Docker Compose](https://docs.docker.com/compose/install/#install-compose): +```sh +sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose + +sudo chmod +x /usr/local/bin/docker-compose +``` + +And I'll add my `ubuntu` user to the `docker` group so that I won't have to run every docker command with `sudo`: +``` +sudo usermod -G docker -a ubuntu +``` + +I'll log out and back in so that the membership change takes effect, and then test both `docker` and `docker-compose` to make sure they're working: +``` +$ docker --version +Docker version 20.10.7, build f0df350 + +$ docker-compose --version +docker-compose version 1.29.2, build 5becea4c +``` + +#### Synapse setup +Now I'll make a place for the Synapse installation to live, including a `data` folder that will be mounted into the container: +``` +sudo mkdir -p /opt/matrix/synapse/data +cd /opt/matrix/synapse +``` + +And then I'll create the compose file to define the deployment: +```yaml +$ sudo vi docker-compose.yml +services: + synapse: + container_name: "synapse" + image: "matrixdotorg/synapse" + restart: "unless-stopped" + ports: + - "127.0.0.1:8008:8008" + volumes: + - "./data/:/data/" +``` + +Before I can fire this up, I'll need to generate an initial configuration as [described in the documentation](https://hub.docker.com/r/matrixdotorg/synapse). Here I'll specify the server name that I'd like other Matrix servers to know mine by (`bowdre.net`): + +```sh +$ docker run -it --rm \ + -v "/opt/matrix/synapse/data:/data" \ + -e SYNAPSE_SERVER_NAME=bowdre.net \ + -e SYNAPSE_REPORT_STATS=yes \ + matrixdotorg/synapse generate + +Unable to find image 'matrixdotorg/synapse:latest' locally +latest: Pulling from matrixdotorg/synapse +69692152171a: Pull complete +66a3c154490a: Pull complete +3e35bdfb65b2: Pull complete +f2c4c4355073: Pull complete +65d67526c337: Pull complete +5186d323ad7f: Pull complete +436afe4e6bba: Pull complete +c099b298f773: Pull complete +50b871f28549: Pull complete +Digest: sha256:5ccac6349f639367fcf79490ed5c2377f56039ceb622641d196574278ed99b74 +Status: Downloaded newer image for matrixdotorg/synapse:latest +Creating log config /data/bowdre.net.log.config +Generating config file /data/homeserver.yaml +Generating signing key file /data/bowdre.net.signing.key +A config file has been generated in '/data/homeserver.yaml' for server name 'bowdre.net'. Please review this file and customise it to your needs. +``` + +As instructed, I'll use `sudo vi data/homeserver.yaml` to review/modify the generated config. I'll leave +```yaml +server_name: "bowdre.net" +``` +since that's how I'd like other servers to know my server, and I'll uncomment/edit in: +```yaml +public_baseurl: https://matrix.bowdre.net +``` +since that's what users (namely, me) will put into their Matrix clients to connect. + +And for now, I'll temporarily set: +```yaml +enable_registration: true +``` +so that I can create a user account without fumbling with the CLI. I'll be sure to set `enable_registration: false` again once I've registered the account(s) I need to have on my server. The instance has limited resources so it's probably not a great idea to let just anybody create an account on it. + +There are a bunch of other useful configurations that can be made here, but these will do to get things going for now. + +Time to start it up: +``` +$ docker-compose up -d +Creating network "synapse_default" with the default driver +Creating synapse ... done +``` + +And use `docker ps` to confirm that it's running: +``` +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +573612ec5735 matrixdotorg/synapse "/start.py" 25 seconds ago Up 23 seconds (healthy) 8009/tcp, 127.0.0.1:8008->8008/tcp, 8448/tcp synapse +``` + +### Testing +And I can point my browser to `https://matrix.bowdre.net/_matrix/static/` and see the Matrix landing page: +![Synapse is running!](-9apQIUci.png) + +Before I start trying to connect with a client, I'm going to plug the server address in to the [Matrix Federation Tester](https://federationtester.matrix.org/) to make sure that other servers will be able to talk to it without any problems: +![Good to go](xqOt3SydX.png) + +And I can view the JSON report at the bottom of the page to confirm that it's correctly pulling my `.well-known` delegation: +```json +{ + "WellKnownResult": { + "m.server": "matrix.bowdre.net:443", + "CacheExpiresAt": 0 + }, +``` + +Now I can fire up my [Matrix client of choice](https://element.io/get-started)), specify my homeserver using its full FQDN, and [register](https://app.element.io/#/register) a new user account: +![image.png](2xe34VJym.png) + +(Once my account gets created, I go back to edit `/opt/matrix/synapse/data/homeserver.yaml` again and set `enable_registration: false`, then fire a `docker-compose restart` command to restart the Synapse container.) + +### Wrap-up +And that's it! I now have my own Matrix server, and I can use my new account for secure chats with Matrix users on any other federated homeserver. It works really well for directly messaging other individuals, and also for participating in small group chats. The server *does* kind of fall on its face if I try to join a massively-populated (like 500+ users) room, but I'm not going to complain about that too much on a free-tier server. + +All in, I'm pretty pleased with how this little project turned out, and I learned quite a bit along the way. I'm tremendously impressed by Caddy's power and simplicity, and I look forward to using it more in future projects. + +### Update: Updating +After a while, it's probably a good idea to update both the Ubntu server and the Synapse container running on it. Updating the server itself is as easy as: +```sh +sudo apt update +sudo apt upgrade +# And, if needed: +sudo reboot +``` + +Here's what I do to update the container: +```sh +# Move to the working directory +cd /opt/matrix/synapse +# Pull a new version of the synapse image +docker-compose pull +# Stop the container +docker-compose down +# Start it back up without the old version +docker-compose up -d --remove-orphans +# Periodically remove the old docker images +docker image prune +``` diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/lBjINolYq.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/lBjINolYq.png new file mode 100644 index 0000000..2102f1d Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/lBjINolYq.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/nnQ7aQrpm.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/nnQ7aQrpm.png new file mode 100644 index 0000000..dd8df23 Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/nnQ7aQrpm.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/s5Y0rycng.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/s5Y0rycng.png new file mode 100644 index 0000000..c34bfdf Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/s5Y0rycng.png differ diff --git a/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/xqOt3SydX.png b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/xqOt3SydX.png new file mode 100644 index 0000000..c3b9036 Binary files /dev/null and b/content/post/federated-matrix-server-synapse-on-oracle-clouds-free-tier/xqOt3SydX.png differ diff --git a/content/post/finding-the-most-popular-ips-in-a-log-file/index.md b/content/post/finding-the-most-popular-ips-in-a-log-file/index.md new file mode 100644 index 0000000..98f6c66 --- /dev/null +++ b/content/post/finding-the-most-popular-ips-in-a-log-file/index.md @@ -0,0 +1,53 @@ +--- +series: Tips +date: "2020-09-13T08:34:30Z" +usePageBundles: true +tags: +- linux +- shell +- logs +- regex +title: Finding the most popular IPs in a log file +--- + +I found myself with a sudden need for parsing a Linux server's logs to figure out which host(s) had been slamming it with an unexpected burst of traffic. Sure, there are proper log analysis tools out there which would undoubtedly make short work of this but none of those were installed on this hardened system. So this is what I came up with. + +### Find IP-ish strings +This will get you all occurrences of things which look vaguely like IPv4 addresses: +```shell +grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT +``` +(It's not a perfect IP address regex since it would match things like `987.654.321.555` but it's close enough for my needs.) + +### Filter out `localhost` +The log likely include a LOT of traffic to/from `127.0.0.1` so let's toss out `localhost` by piping through `grep -v "127.0.0.1"` (`-v` will do an inverse match - only return results which *don't* match the given expression): +```shell +grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" +``` + +### Count up the duplicates +Now we need to know how many times each IP shows up in the log. We can do that by passing the output through `uniq -c` (`uniq` will filter for unique entries, and the `-c` flag will return a count of how many times each result appears): +```shell +grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c +``` + +### Sort the results +We can use `sort` to sort the results. `-n` tells it sort based on numeric rather than character values, and `-r` reverses the list so that the larger numbers appear at the top: +```shell +grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c | sort -n -r +``` + +### Top 5 +And, finally, let's use `head -n 5` to only get the first five results: +```shell +grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c | sort -n -r | head -n 5 +``` + +### Bonus round! +You know how old log files get rotated and compressed into files like `logname.1.gz`? I *very* recently learned that there are versions of the standard Linux text manipulation tools which can work directly on compressed log files, without having to first extract the files. I'd been doing things the hard way for years - no longer, now that I know about `zcat`, `zdiff`, `zgrep`, and `zless`! + +So let's use a `for` loop to iterate through 20 of those compressed logs, and use `date -r [filename]` to get the timestamp for each log as we go: +```bash +for i in {1..20}; do date -r ACCESS_LOG.$i.gz; zgrep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' \ACCESS_LOG.log.$i.gz | grep -v "127.0.0.1" | uniq -c | sort -n -r | head -n 5; done +``` +Nice! \ No newline at end of file diff --git a/content/post/fixing-wsl2-connectivity-when-connected-to-a-vpn-with-wsl-vpnkit/MnmMuA0HC.png b/content/post/fixing-wsl2-connectivity-when-connected-to-a-vpn-with-wsl-vpnkit/MnmMuA0HC.png new file mode 100644 index 0000000..4c4e89d Binary files /dev/null and b/content/post/fixing-wsl2-connectivity-when-connected-to-a-vpn-with-wsl-vpnkit/MnmMuA0HC.png differ diff --git a/content/post/fixing-wsl2-connectivity-when-connected-to-a-vpn-with-wsl-vpnkit/index.md b/content/post/fixing-wsl2-connectivity-when-connected-to-a-vpn-with-wsl-vpnkit/index.md new file mode 100644 index 0000000..c09df0e --- /dev/null +++ b/content/post/fixing-wsl2-connectivity-when-connected-to-a-vpn-with-wsl-vpnkit/index.md @@ -0,0 +1,28 @@ +--- +date: "2020-10-07T08:34:30Z" +thumbnail: MnmMuA0HC.png +usePageBundles: true +tags: +- windows +- linux +- wsl +- vpn +title: Fixing WSL2 connectivity when connected to a VPN with wsl-vpnkit +toc: false +--- + +I was pretty excited to get [WSL2 and Docker working on my Windows 10 1909](/docker-on-windows-10-with-wsl2) laptop a few weeks ago, but I quickly encountered a problem: WSL2 had no network connectivity when connected to my work VPN. + +Well, that's not *entirely* true; Docker worked just fine, but nothing else could talk to anything outside of the WSL environment. I found a few open issues for this problem in the [WSL2 Github](https://github.com/microsoft/WSL/issues?q=is%3Aissue+is%3Aopen+VPN) with suggested workarounds including modifying Windows registry entries, adjusting the metrics assigned to various virtual network interfaces within Windows, and manually setting DNS servers in `/etc/resolv.conf`. None of these worked for me. + +I eventually came across a solution [here](https://github.com/sakai135/wsl-vpnkit) which did the trick. This takes advantage of the fact that Docker for Windows is already utilizing `vpnkit` for connectivity - so you may also want to be sure Docker Desktop is configured to start at login. + +The instructions worked well for me so I won't rehash them all here. When it came time to modify my `/etc/resolv.conf` file, I added in two of the internal DNS servers followed by the IP for my home router's DNS service. This allows me to use WSL2 both on and off the corporate network without having to reconfigure things. + +All I need to do now is execute `sudo ./wsl-vpnkit` and leave that running in the background when I need to use WSL while connected to the corporate VPN. + + +![Successful connection via wsl-vpnkit](MnmMuA0HC.png) + +Whew! Okay, back to work. + diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_add_mapping_1.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_add_mapping_1.png new file mode 100644 index 0000000..a4a9b27 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_add_mapping_1.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_add_mapping_2.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_add_mapping_2.png new file mode 100644 index 0000000..7ff6598 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_add_mapping_2.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_authorize_cloud_shell.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_authorize_cloud_shell.png new file mode 100644 index 0000000..c51a490 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_authorize_cloud_shell.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_cloud_shell.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_cloud_shell.png new file mode 100644 index 0000000..4a569f0 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_cloud_shell.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_create_project.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_create_project.png new file mode 100644 index 0000000..5ffb012 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_create_project.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_domain_mapping.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_domain_mapping.png new file mode 100644 index 0000000..5e8d637 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_domain_mapping.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_enable_sheets_api.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_enable_sheets_api.png new file mode 100644 index 0000000..50ab804 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_enable_sheets_api.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_home_page.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_home_page.png new file mode 100644 index 0000000..17608c8 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_home_page.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_landing_page.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_landing_page.png new file mode 100644 index 0000000..3c9d86e Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_landing_page.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_manage_custom_domain.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_manage_custom_domain.png new file mode 100644 index 0000000..7ce9c44 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_manage_custom_domain.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_open_in_cloud_shell.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_open_in_cloud_shell.png new file mode 100644 index 0000000..114bff6 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_open_in_cloud_shell.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_service_account.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_service_account.png new file mode 100644 index 0000000..9534112 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_service_account.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_share_with_svc_account.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_share_with_svc_account.png new file mode 100644 index 0000000..857d429 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_share_with_svc_account.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_sheet.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_sheet.png new file mode 100644 index 0000000..94d2278 Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_sheet.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_sheets_api_disabled.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_sheets_api_disabled.png new file mode 100644 index 0000000..36d409b Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_sheets_api_disabled.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/20210820_successful_redirect.png b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_successful_redirect.png new file mode 100644 index 0000000..da7ae3e Binary files /dev/null and b/content/post/free-serverless-url-shortener-google-cloud-run/20210820_successful_redirect.png differ diff --git a/content/post/free-serverless-url-shortener-google-cloud-run/index.md b/content/post/free-serverless-url-shortener-google-cloud-run/index.md new file mode 100644 index 0000000..0f863b4 --- /dev/null +++ b/content/post/free-serverless-url-shortener-google-cloud-run/index.md @@ -0,0 +1,91 @@ +--- +series: Projects +date: "2021-08-20T00:00:00Z" +lastmod: 2022-02-03 +usePageBundles: true +tags: +- gcp +- cloud +- serverless +title: Free serverless URL shortener on Google Cloud Run +--- +### Intro +I've been [using short.io with a custom domain](https://twitter.com/johndotbowdre/status/1370125198196887556) to keep track of and share messy links for a few months now. That approach has worked very well, but it's also seriously overkill for my needs. I don't need (nor want) tracking metrics to know anything about when those links get clicked, and short.io doesn't provide an easy way to turn that off. I was casually looking for a lighter self-hosted alternative today when I stumbled upon a *serverless* alternative: **[sheets-url-shortener](https://github.com/ahmetb/sheets-url-shortener)**. This uses [Google Cloud Run](https://cloud.google.com/run/) to run an ultralight application container which receives an incoming web request, looks for the path in a Google Sheet, and redirects the client to the appropriate URL. It supports connecting with a custom domain, and should run happily within the [Cloud Run Free Tier limits](https://cloud.google.com/run/pricing). + +The Github instructions were pretty straight-forward but I did have to fumble through a few additional steps to get everything up and running. Here we go: + +### Shortcut mapping +Since the setup uses a simple Google Sheets document to map the shortcuts to the original long-form URLs, I started by going to [https://sheets.new](https://sheets.new) to create a new Sheet. I then just copied in the shortcuts and URLs I was already using in short.io. By the way, I learned on a previous attempt that this solution only works with lowercase shortcuts so I made sure to convert my `MixedCase` ones as I went. +![Creating a new sheet](20210820_sheet.png) + +I then made a note of the Sheet ID from the URL; that's the bit that looks like `1SMeoyesCaGHRlYdGj9VyqD-qhXtab1jrcgHZ0irvNDs`. That will be needed later on. + +### Create a new GCP project +I created a new project in my GCP account by going to [https://console.cloud.google.com/projectcreate](https://console.cloud.google.com/projectcreate) and entering a descriptive name. +![Creating a new GCP project](20210820_create_project.png) + +### Deploy to GCP +At this point, I was ready to actually kick off the deployment. Ahmet made this part exceptionally easy: just hit the **Run on Google Cloud** button from the [Github project page](https://github.com/ahmetb/sheets-url-shortener#setup). That opens up a Google Cloud Shell instance which prompts for authorization before it starts the deployment script. +![Open in Cloud Shell prompt](20210820_open_in_cloud_shell.png) + +![Authorize Cloud Shell prompt](20210820_authorize_cloud_shell.png) + +The script prompted me to select a project and a region, and then asked for the Sheet ID that I copied earlier. +![Cloud Shell deployment](20210820_cloud_shell.png) + +### Grant access to the Sheet +In order for the Cloud Run service to be able to see the URL mappings in the Sheet I needed to share the Sheet with the service account. That service account is found by going to [https://console.cloud.google.com/run](https://console.cloud.google.com/run), clicking on the new `sheets-url-shortener` service, and then viewing the **Permissions** tab. I'm interested in the one that's `############-computer@developer.gserviceaccount.com`. +![Finding the service account](20210820_service_account.png) + +I then went back to the Sheet, hit the big **Share** button at the top, and shared the Sheet to the service account with *Viewer* access. +![Sharing to the service account](20210820_share_with_svc_account.png) + +### Quick test +Back in GCP land, the details page for the `sheets-url-shortener` Cloud Run service shows a gross-looking URL near the top: `https://sheets-url-shortener-vrw7x6wdzq-uc.a.run.app`. That doesn't do much for *shortening* my links, but it'll do just fine for a quick test. First, I pointed my browser straight to that listed URL: +![Testing the web server](20210820_home_page.png) + +This at least tells me that the web server portion is working. Now to see if I can redirect to my [project car posts on Polywork](https://john.bowdre.net/?badges%5B%5D=Car+Nerd): +![Testing a redirect](20210820_sheets_api_disabled.png) + +Hmm, not quite. Luckily the error tells me exactly what I need to do... + +### Enable Sheets API +I just needed to visit `https://console.developers.google.com/apis/api/sheets.googleapis.com/overview?project=############` to enable the Google Sheets API. +![Enabling Sheets API](20210820_enable_sheets_api.png) + +Once that's done, I can try my redirect again - and, after a brief moment, it successfully sends me on to Polywork! +![Successful redirect](20210820_successful_redirect.png) + +### Link custom domain +The whole point of this project is to *shorten* URLs, but I haven't done that yet. I'll want to link in my `go.bowdre.net` domain to use that in place of the rather unwieldy `https://sheets-url-shortener-vrw7x6wdzq-uc.a.run.app`. I do that by going back to the [Cloud Run console](https://console.cloud.google.com/run) and selecting the option at the top to **Manage Custom Domains**. +![Manage custom domains](20210820_manage_custom_domain.png) + +I can then use the **Add Mapping** button, select my `sheets-url-shortener` service, choose one of my verified domains (which I *think* are already verified since they're registered through Google Domains with the same account), and then specify the desired subdomain. +![Adding a domain mapping](20210820_add_mapping_1.png) + +The wizard then tells me exactly what record I need to create/update with my domain host: +![CNAME details](20210820_add_mapping_2.png) + +It took a while for the domain mapping to go live once I've updated the record. +![Processing mapping...](20210820_domain_mapping.png) + +### Final tests +Once it did finally update, I was able to hit `https://go.bowdre.net` to get the error/landing page, complete with a valid SSL cert: +![Successful error!](20210820_landing_page.png) + +And testing [go.bowdre.net/ghia](https://go.bowdre.net/ghia) works as well! + +### Outro +I'm very pleased with how this quick little project turned out. Managing my shortened links with a Google Sheet is quite convenient, and I really like the complete lack of tracking or analytics. Plus I'm a sucker for an excuse to use a cloud technology I haven't played a lot with yet. + +And now I can hand out handy-dandy short links! + +| Link | Description| +| --- | --- | +| [go.bowdre.net/ghia](https://go.bowdre.net/ghia) | 1974 VW Karmann Ghia project | +| [go.bowdre.net/conedoge](https://go.bowdre.net/conedoge) | 2014 Subaru BRZ autocross videos | +| [go.bowdre.net/matrix](https://go.bowdre.net/matrix) | Chat with me on Matrix | +| [go.bowdre.net/twits](https://go.bowdre.net/twits) | Follow me on Twitter | +| [go.bowdre.net/stadia](https://go.bowdre.net/stadia) | Game with me on Stadia | +| [go.bowdre.net/shorterer](https://go.bowdre.net/shorterer) | This post! | + diff --git a/content/post/gitea-self-hosted-git-server/add_key.png b/content/post/gitea-self-hosted-git-server/add_key.png new file mode 100644 index 0000000..efbaeaf Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/add_key.png differ diff --git a/content/post/gitea-self-hosted-git-server/admin_menu.png b/content/post/gitea-self-hosted-git-server/admin_menu.png new file mode 100644 index 0000000..fea449a Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/admin_menu.png differ diff --git a/content/post/gitea-self-hosted-git-server/create_menu.png b/content/post/gitea-self-hosted-git-server/create_menu.png new file mode 100644 index 0000000..f7d161f Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/create_menu.png differ diff --git a/content/post/gitea-self-hosted-git-server/create_user_1.png b/content/post/gitea-self-hosted-git-server/create_user_1.png new file mode 100644 index 0000000..3459844 Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/create_user_1.png differ diff --git a/content/post/gitea-self-hosted-git-server/create_user_2.png b/content/post/gitea-self-hosted-git-server/create_user_2.png new file mode 100644 index 0000000..bc23882 Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/create_user_2.png differ diff --git a/content/post/gitea-self-hosted-git-server/empty_repo.png b/content/post/gitea-self-hosted-git-server/empty_repo.png new file mode 100644 index 0000000..0e9d5f7 Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/empty_repo.png differ diff --git a/content/post/gitea-self-hosted-git-server/gitea-logo.png b/content/post/gitea-self-hosted-git-server/gitea-logo.png new file mode 100644 index 0000000..058f9b9 Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/gitea-logo.png differ diff --git a/content/post/gitea-self-hosted-git-server/index.md b/content/post/gitea-self-hosted-git-server/index.md new file mode 100644 index 0000000..fe60b3c --- /dev/null +++ b/content/post/gitea-self-hosted-git-server/index.md @@ -0,0 +1,490 @@ +--- +title: "Gitea: Ultralight Self-Hosted Git Server" # Title of the blog post. +date: 2022-07-22 # Date of post creation. +lastmod: 2023-01-19 +description: "Deploying the lightweight Gitea Git server on Oracle Cloud's free Ampere Compute." +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "gitea-logo.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Projects +tags: + - linux + - docker + - cloud + - tailscale +comment: true # Disable comment if false. +--- +I recently started using [Obsidian](https://obsidian.md/) for keeping notes, tracking projects, and just generally organizing all the information that would otherwise pass into my brain and then fall out the other side. Unlike other similar solutions which operate entirely in *The Cloud*, Obsidian works with Markdown files stored in a local folder[^sync], which I find to be very attractive. Not only will this allow me to easily transfer my notes between apps if I find something I like better than Obsidian, but it also opens the door to using `git` to easily back up all this important information. + +Some of the contents might be somewhat sensitive, though, and I'm not sure I'd want to keep that data on a service outside of my control. A self-hosted option would be ideal. Gitlab seemed like an obvious choice, but the resource requirements are a bit higher than would be justified by my single-user use case. I eventually came across [Gitea](https://gitea.io/), a lightweight Git server with a simple web interface (great for a Git novice like myself!) which boasts the ability to run on a Raspberry Pi. This sounded like a great candidate for running on an [Ampere ARM-based compute instance](https://www.oracle.com/cloud/compute/arm/) in my [Oracle Cloud free tier](https://www.oracle.com/cloud/free/) environment! + +In this post, I'll describe what I did to get Gitea up and running on a tiny ARM-based cloud server (though I'll just gloss over the cloud-specific configurations), as well as how I'm leveraging [Tailscale](/secure-networking-made-simple-with-tailscale/) to enable SSH Git access without having to expose that service to the internet. I based the bulk of this on the information provided in Gitea's [Install With Docker](https://docs.gitea.io/en-us/install-with-docker/) documentation. + + +[^sync]: Obsidian *does* offer a paid [Sync](https://obsidian.md/sync) plugin for keeping the content on multiple devices in sync, but it's somewhat spendy at $10 month. And much of the appeal of using a Markdown-based system for managing my notes is being in full control of the content. Plus I wanted an excuse to build a git server. + +### Create the server +I'll be deploying this on a cloud server with these specs: + +| | | +| --- | --- | +| Shape | `VM.Standard.A1.Flex` | +| Image | Ubuntu 22.04 | +| CPU Count | 1 | +| Memory (GB) | 6 | +| Boot Volume (GB) | 50 | + +I've described the [process of creating a new instance on OCI in a past post](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#instance-creation) so I won't reiterate that here. The only gotcha this time is switching the shape to `VM.Standard.A1.Flex`; the [OCI free tier](https://docs.oracle.com/en-us/iaas/Content/FreeTier/freetier_topic-Always_Free_Resources.htm) allows two AMD Compute VMs (which I've already used up) as well as *up to four* ARM Ampere A1 instances[^free_ampere]. + +[^free_ampere]: The first 3000 OCPU hours and 18,000 GB hours per month are free, equivalent to 4 OCPUs and 24 GB of memory allocated however you see fit. + +### Prepare the server +Once the server's up and running, I go through the usual steps of applying any available updates: +```bash +sudo apt update +sudo apt upgrade +``` + +#### Install Tailscale +And then I'll install Tailscale using their handy-dandy bootstrap script: + +```bash +curl -fsSL https://tailscale.com/install.sh | sh +``` + +When I bring up the Tailscale interface, I'll use the `--advertise-tags` flag to identify the server with an [ACL tag](https://tailscale.com/kb/1068/acl-tags/). ([Within my tailnet](/secure-networking-made-simple-with-tailscale/#acls)[^tailnet], all of my other clients are able to connect to devices bearing the `cloud` tag but `cloud` servers can only reach back to other devices for performing DNS lookups.) +```bash +sudo tailscale up --advertise-tags "tag:cloud" +``` + +[^tailnet]: [Tailscale's term](https://tailscale.com/kb/1136/tailnet/) for the private network which securely links Tailscale-connected devices. + +#### Install Docker +Next I install Docker and `docker-compose`: + +```bash +sudo apt install ca-certificates curl gnupg lsb-release +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt update +sudo apt install docker-ce docker-ce-cli containerd.io docker-compose docker-compose-plugin +``` + +#### Configure firewall +This server automatically had an iptables firewall rule configured to permit SSH access. For Gitea, I'll also need to configure HTTP/HTTPS access. [As before](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#firewall-configuration), I need to be mindful of the explicit `REJECT all` rule at the bottom of the `INPUT` chain: + +```bash +$ sudo iptables -L INPUT --line-numbers +Chain INPUT (policy ACCEPT) +num target prot opt source destination +1 ts-input all -- anywhere anywhere +2 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +3 ACCEPT icmp -- anywhere anywhere +4 ACCEPT all -- anywhere anywhere +5 ACCEPT udp -- anywhere anywhere udp spt:ntp +6 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +7 REJECT all -- anywhere anywhere reject-with icmp-host-prohibited +``` + +So I'll insert the new rules at line 6: +```bash +sudo iptables -L INPUT --line-numbers +sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 80 -j ACCEPT +sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 443 -j ACCEPT +``` + +And confirm that it did what I wanted it to: +```bash +$ sudo iptables -L INPUT --line-numbers +Chain INPUT (policy ACCEPT) +num target prot opt source destination +1 ts-input all -- anywhere anywhere +2 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +3 ACCEPT icmp -- anywhere anywhere +4 ACCEPT all -- anywhere anywhere +5 ACCEPT udp -- anywhere anywhere udp spt:ntp +6 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:https +7 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:http +8 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +9 REJECT all -- anywhere anywhere reject-with icmp-host-prohibited +``` + +That looks good, so let's save the new rules: +```bash +$ sudo netfilter-persistent save +run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save +run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save +``` + +{{% notice info "Cloud Firewall" %}} +Of course I will also need to create matching rules in the cloud firewall, but I'm going not going to detail [those steps](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#firewall-configuration) again here. And since I've now got Tailscale up and running I can remove the pre-created rule to allow SSH access through the cloud firewall. +{{% /notice %}} + +### Install Gitea +I'm now ready to move on with installing Gitea itself. + +#### Prepare `git` user +I'll start with creating a `git` user. This account will be set as the owner of the data volume used by the Gitea container, but will also (perhaps more importantly) facilitate [SSH passthrough](https://docs.gitea.io/en-us/install-with-docker/#ssh-container-passthrough) into the container for secure git operations. + +Here's where I create the account and also generate what will become the SSH key used by the git server: +```bash +sudo useradd -s /bin/bash -m git +sudo -u git ssh-keygen -t ecdsa -C "Gitea Host Key" +``` + +The `git` user's SSH public key gets added as-is directly to that user's `authorized_keys` file: +```bash +sudo -u git cat /home/git/.ssh/id_ecdsa.pub | sudo -u git tee -a /home/git/.ssh/authorized_keys +sudo -u git chmod 600 /home/git/.ssh/authorized_keys +``` + +When other users add their SSH public keys into Gitea's web UI, those will get added to `authorized_keys` with a little something extra: an alternate command to perform git actions instead of just SSH ones: +``` +command="/usr/local/bin/gitea --config=/data/gitea/conf/app.ini serv key-1",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty +``` + +{{% notice info "Not just yet" %}} +No users have added their keys to Gitea just yet so if you look at `/home/git/.ssh/authorized_keys` right now you won't see this extra line, but I wanted to go ahead and mention it to explain the next step. It'll show up later. I promise. +{{% /notice %}} + +So I'll go ahead and create that extra command: +```bash +cat <<"EOF" | sudo tee /usr/local/bin/gitea +#!/bin/sh +ssh -p 2222 -o StrictHostKeyChecking=no git@127.0.0.1 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" +EOF +sudo chmod +x /usr/local/bin/gitea +``` + +So when I use a `git` command to interact with the server via SSH, the commands will get relayed into the Docker container on port 2222. + +#### Create `docker-compose` definition +That takes care of most of the prep work, so now I'm ready to create the `docker-compose.yaml` file which will tell Docker how to host Gitea. + +I'm going to place this in `/opt/gitea`: +```bash +sudo mkdir -p /opt/gitea +cd /opt/gitea +``` + +And I want to be sure that my new `git` user owns the `./data` directory which will be where the git contents get stored: +```bash +sudo mkdir data +sudo chown git:git -R data +``` + +Now to create the file: +```bash +sudo vi docker-compose.yaml +``` + +The basic contents of the file came from the [Gitea documentation for Installation with Docker](https://docs.gitea.io/en-us/install-with-docker/), but I also included some (highlighted) additional environment variables based on the [Configuration Cheat Sheet](https://docs.gitea.io/en-us/config-cheat-sheet/): + +`docker-compose.yaml`: +```yaml {hl_lines=["12-13","19-31",38,43]} +version: "3" + +networks: + gitea: + external: false + +services: + server: + image: gitea/gitea:latest + container_name: gitea + environment: + - USER_UID=1003 + - USER_GID=1003 + - GITEA__database__DB_TYPE=postgres + - GITEA__database__HOST=db:5432 + - GITEA__database__NAME=gitea + - GITEA__database__USER=gitea + - GITEA__database__PASSWD=gitea + - GITEA____APP_NAME=Gitea + - GITEA__log__MODE=file + - GITEA__openid__ENABLE_OPENID_SIGNIN=false + - GITEA__other__SHOW_FOOTER_VERSION=false + - GITEA__repository__DEFAULT_PRIVATE=private + - GITEA__repository__DISABLE_HTTP_GIT=true + - GITEA__server__DOMAIN=git.bowdre.net + - GITEA__server__SSH_DOMAIN=git.tadpole-jazz.ts.net + - GITEA__server__ROOT_URL=https://git.bowdre.net/ + - GITEA__server__LANDING_PAGE=explore + - GITEA__service__DISABLE_REGISTRATION=true + - GITEA__service_0X2E_explore__DISABLE_USERS_PAGE=true + - GITEA__ui__DEFAULT_THEME=arc-green + + restart: always + networks: + - gitea + volumes: + - ./data:/data + - /home/git/.ssh/:/data/git/.ssh + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + ports: + - "3000:3000" + - "127.0.0.1:2222:22" + depends_on: + - db + + db: + image: postgres:14 + container_name: gitea_db + restart: always + environment: + - POSTGRES_USER=gitea + - POSTGRES_PASSWORD=gitea + - POSTGRES_DB=gitea + networks: + - gitea + volumes: + - ./postgres:/var/lib/postgresql/data +``` +{{% notice info "Pin the PostgreSQL version" %}} +The format of PostgreSQL data changes with new releases, and that means that the data created by different major releases are not compatible. Unless you take steps to upgrade the data format, you'll have problems when a new major release of PostgreSQL arrives. Avoid the headache: pin this to a major version (as I did with `image: postgres:14` above) so you can upgrade on your terms. +{{% /notice %}} + +Let's go through the extra configs in a bit more detail: +| Variable setting | Purpose | +|:--- |:--- | +|`USER_UID=1003` | User ID of the `git` user on the container host | +|`USER_GID=1003` | GroupID of the `git` user on the container host | +|`GITEA____APP_NAME=Gitea` | Sets the title of the site. I shortened it from `Gitea: Git with a cup of tea` because that seems unnecessarily long. | +|`GITEA__log__MODE=file` | Enable logging | +|`GITEA__openid__ENABLE_OPENID_SIGNIN=false` | Disable signin through OpenID | +|`GITEA__other__SHOW_FOOTER_VERSION=false` | Anyone who hits the web interface doesn't need to know the version | +|`GITEA__repository__DEFAULT_PRIVATE=private` | All repos will default to private unless I explicitly override that | +|`GITEA__repository__DISABLE_HTTP_GIT=true` | Require that all Git operations occur over SSH | +|`GITEA__server__DOMAIN=git.bowdre.net` | Domain name of the server | +|`GITEA__server__SSH_DOMAIN=git.tadpole-jazz.ts.net` | Leverage Tailscale's [MagicDNS](https://tailscale.com/kb/1081/magicdns/) to tell clients how to SSH to the Tailscale internal IP | +|`GITEA__server__ROOT_URL=https://git.bowdre.net/` | Public-facing URL | +|`GITEA__server__LANDING_PAGE=explore` | Defaults to showing the "Explore" page (listing any public repos) instead of the "Home" page (which just tells about the Gitea project) | +|`GITEA__service__DISABLE_REGISTRATION=true` | New users will not be able to self-register for access; they will have to be manually added by the Administrator account that will be created during the initial setup | +|`GITEA__service_0X2E_explore__DISABLE_USERS_PAGE=true` | Don't allow browsing of user accounts | +|`GITEA__ui__DEFAULT_THEME=arc-green` | Default to the darker theme | + +Beyond the environment variables, I also defined a few additional options to allow the SSH passthrough to function. Mounting the `git` user's SSH config directory into the container will ensure that user keys defined in Gitea will also be reflected outside of the container, and setting the container to listen on local port `2222` will allow it to receive the forwarded SSH connections: + +```yaml + volumes: + [...] + - /home/git/.ssh/:/data/git/.ssh + [...] + ports: + [...] + - "127.0.0.1:2222:22" +``` + +With the config in place, I'm ready to fire it up: + +#### Start containers +Starting Gitea is as simple as +```bash +sudo docker-compose up -d +``` +which will spawn both the Gitea server as well as a `postgres` database to back it. + +Gitea will be listening on port `3000`.... which isn't exposed outside of the VM it's running on so I can't actually do anything with it just yet. Let's see about changing that. + +### Configure Caddy reverse proxy +I've [written before](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#reverse-proxy-setup) about [Caddy server](https://caddyserver.com/) and how simple it makes creating a reverse proxy with automatic HTTPS. While Gitea does include [built-in HTTPS support](https://docs.gitea.io/en-us/https-setup/), configuring that to work within Docker seems like more work to me. + +#### Install Caddy +So exactly how simple does Caddy make this? Well let's start with installing Caddy on the system: + +```bash +sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https +curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg +curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list +sudo apt update +sudo apt install caddy +``` + +#### Configure Caddy +Configuring Caddy is as simple as creating a Caddyfile: +```bash +sudo vi /etc/caddy/Caddyfile +``` + +Within that file, I tell it which fully-qualified domain name(s) I'd like it to respond to (and manage SSL certificates for), as well as that I'd like it to function as a reverse proxy and send the incoming traffic to the same port `3000` that used by the Docker container: +``` +git.bowdre.net { + reverse_proxy localhost:3000 +} +``` + +That's it. I don't need to worry about headers or ACME configurations or anything else. Those three lines are all that's required for this use case. It almost seems too easy! + +#### Start Caddy +All that's left at this point is to start up Caddy: +```bash +sudo systemctl enable caddy +sudo systemctl start caddy +sudo systemctl restart caddy +``` + +I found that the `restart` is needed to make sure that the config file gets loaded correctly. And after a moment or two, I can point my browser over to `https://git.bowdre.net` and see the default landing page, complete with a valid certificate. + +### Configure Gitea +Now that Gitea is installed, I'll need to go through the initial configuration process to actually be able to use it. Fortunately most of this stuff was taken care of by all the environment variables I crammed into the the `docker-compose.yaml` file earlier. All I *really* need to do is create an administrative user: +![Initial configuration](initial_config.png) + +I can now press the friendly **Install Gitea** button, and after just a few seconds I'll be able to log in with that new administrator account. + +#### Create user account +I don't want to use that account for all my git actions though so I click on the menu at the top right and select the **Site Administration** option: +![Admin menu](admin_menu.png) + +From there I can navigate to the *User Accounts* tab and use the **Create User Account** button to make a new account: +![User Accounts page](create_user_1.png) + +![Creating a new user](create_user_2.png) + +And then I can log out and log back in with my new non-admin identity! + +#### Add SSH public key +Associating a public key with my new Gitea account will allow me to easily authenticate my pushes from the command line. I can create a new SSH public/private keypair by following [GitHub's instructions](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent): +```shell +ssh-keygen -t ed25519 -C "user@example.com" +``` + +I'll view the contents of the public key - and go ahead and copy the output for future use: +``` +; cat ~/.ssh/id_ed25519.pub +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF5ExSsQfr6pAFBEZ7yx0oljSnpnOixvp8DS26STcx2J user@example.com +``` + +Back in the Gitea UI, I'll click the user menu up top and select **Settings**, then the *SSH / GPG Keys* tab, and click the **Add Key** button: +![User menu](user_menu.png) +![Adding a public key](add_key.png) + +I can give the key a name and then paste in that public key, and then click the lower **Add Key** button to insert the new key. + +To verify that the SSH passthrough magic I [configured earlier](#prepare-git-user) is working, I can take a look at `git`'s `authorized_keys` file: +```shell{hl_lines=3} +; sudo tail -2 /home/git/.ssh/authorized_keys +# gitea public key +command="/usr/local/bin/gitea --config=/data/gitea/conf/app.ini serv key-3",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty,no-user-rc,restrict ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF5ExSsQfr6pAFBEZ7yx0oljSnpnOixvp8DS26STcx2J user@example.com +``` + +Hey - there's my public key, being preceded by the customized command I defined earlier. There's one last thing I'd like to do before I get to populating my new server with content... + +### Configure Fail2ban +I'm already limiting this server's exposure by blocking inbound SSH (except for what's magically tunneled through Tailscale) at the Oracle Cloud firewall, but I still have to have TCP ports `80` and `443` open for the web interface. It would be nice if those web ports didn't get hammered with invalid login attempts. + +[Fail2ban](https://www.fail2ban.org/wiki/index.php/Main_Page) can help with that by monitoring log files for repeated authentication failures and then creating firewall rules to block the offender. + +Installing Fail2ban is simple: +```shell +sudo apt update +sudo apt install fail2ban +``` + +Then I need to tell Fail2ban what to look for when detecting failed logins to Gitea. This can often be a tedious process of crawling through logs looking for example failure messages, but fortunately the [Gitea documentation](https://docs.gitea.io/en-us/fail2ban-setup/) tells me what I need to know. + +Specifically, I'll want to watch `log/gitea.log` for messages like the following: +``` +2018/04/26 18:15:54 [I] Failed authentication attempt for user from xxx.xxx.xxx.xxx +``` +``` +2020/10/15 16:08:44 ...s/context/context.go:204:HandleText() [E] invalid credentials from xxx.xxx.xxx.xxx +``` + +So let's create that filter: +```shell +sudo vi /etc/fail2ban/filter.d/gitea.conf +``` +`/etc/fail2ban/filter.d/gitea.conf`: +``` +[Definition] +failregex = .*(Failed authentication attempt|invalid credentials).* from +ignoreregex = +``` + +Next I create the jail, which tells Fail2ban what to do: +```shell +sudo vi /etc/fail2ban/jail.d/gitea.conf +``` +`/etc/fail2ban/jail.d/gitea.conf`: +``` +[gitea] +enabled = true +filter = gitea +logpath = /opt/gitea/data/gitea/log/gitea.log +maxretry = 5 +findtime = 3600 +bantime = 86400 +action = iptables-allports +``` + +This configures Fail2ban to watch the log file (`logpath`) inside the data volume mounted to the Gitea container for messages which match the pattern I just configured (`gitea`). If a system fails to log in 5 times (`maxretry`) within 1 hour (`findtime`, in seconds) then the offending IP will be banned for 1 day (`bantime`, in seconds). + +Then I just need to enable and start Fail2ban: +```shell +sudo systemctl enable fail2ban +sudo systemctl start fail2ban +``` + +To verify that it's working, I can deliberately fail to log in to the web interface and watch `/var/log/fail2ban.log`: +```shell +; sudo tail -f /var/log/fail2ban.log +2022-07-17 21:52:26,978 fail2ban.filter [36042]: INFO [gitea] Found ${MY_HOME_IP}| - 2022-07-17 21:52:26 +``` + +Excellent, let's now move on to creating some content. +### Work with Gitea +#### Mirror content from GitHub +As an easy first sync, I'm going to simply link a new repository on this server to an existing one I have at GitHub, namely [this one](https://github.com/jbowdre/vrealize) which I'm using to track some of my vRealize work. I'll set this up as a one-way mirror so that it will automatically pull in any new upstream changes but new commits made through Gitea will stay in Gitea. And I'll do that by clicking the **+** button at the top right and selecting **New Migration**. + +![New migration menu](new_migration.png) + +Gitea includes support for easy migrations from several content sources: +![Migration sources](migration_sources.png) + +I pick the GitHub one and then plug in the details of the GitHub repo: +![Migrating from GitHub](migrate_github.png) + +And after just a few moments, all the content from my GitHub repo shows up in my new Gitea one: +![Mirrored repo](mirrored_repo.png) + + +You might noticed that I unchecked the *Make Repository Private* option for this one, so feel free to browse the mirrored repo at https://git.bowdre.net/vPotato/vrealize if you'd like to check out Gitea for yourself. + +#### Create a new repo +The real point of this whole exercise was to sync my Obsidian vault to a Git server under my control, so it's time to create a place for that content to live. I'll go to the **+** menu again but this time select **New Repository**, and then enter the required information: +![New repository](new_repository.png) + +Once it's created, the new-but-empty repository gives me instructions on how I can interact with it. Note that the SSH address uses the special `git.tadpole-jazz.ts.net` Tailscale domain name which is only accessible within my tailnet. + + +![Emtpy repository](empty_repo.png) + +Now I can follow the instructions to initialize my local Obsidian vault (stored at `~/obsidian-vault/`) as a git repository and perform my initial push to Gitea: +```shell +cd ~/obsidian-vault/ +git init +git add . +git commit -m "initial commit" +git remote add origin git@git.tadpole-jazz.ts.net:john/obsidian-vault.git +git push -u origin main +``` + +And if I refresh the page in my browser, I'll see all that content which has just been added: +![Populated repo](populated_repo.png) + +### Conclusion +So now I've got a lightweight, web-enabled, personal git server running on a (free!) cloud server under my control. It's working brilliantly in conjunction with the community-maintained [obsidian-git](https://github.com/denolehov/obsidian-git) plugin for keeping my notes synced across my various computers. On Android, I'm leveraging the free [GitJournal](https://play.google.com/store/apps/details?id=io.gitjournal.gitjournal) app as a simple git client for pulling the latest changes (as described [on another blog I found](https://orth.uk/obsidian-sync/#clone-the-repo-on-your-android-phone-)). + + + diff --git a/content/post/gitea-self-hosted-git-server/initial_config.png b/content/post/gitea-self-hosted-git-server/initial_config.png new file mode 100644 index 0000000..7b2e989 Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/initial_config.png differ diff --git a/content/post/gitea-self-hosted-git-server/migrate_github.png b/content/post/gitea-self-hosted-git-server/migrate_github.png new file mode 100644 index 0000000..3e5a198 Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/migrate_github.png differ diff --git a/content/post/gitea-self-hosted-git-server/migration_sources.png b/content/post/gitea-self-hosted-git-server/migration_sources.png new file mode 100644 index 0000000..4f8652b Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/migration_sources.png differ diff --git a/content/post/gitea-self-hosted-git-server/mirrored_repo.png b/content/post/gitea-self-hosted-git-server/mirrored_repo.png new file mode 100644 index 0000000..0456519 Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/mirrored_repo.png differ diff --git a/content/post/gitea-self-hosted-git-server/new_migration.png b/content/post/gitea-self-hosted-git-server/new_migration.png new file mode 100644 index 0000000..7adbbac Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/new_migration.png differ diff --git a/content/post/gitea-self-hosted-git-server/new_repository.png b/content/post/gitea-self-hosted-git-server/new_repository.png new file mode 100644 index 0000000..3fbe738 Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/new_repository.png differ diff --git a/content/post/gitea-self-hosted-git-server/populated_repo.png b/content/post/gitea-self-hosted-git-server/populated_repo.png new file mode 100644 index 0000000..819fa6a Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/populated_repo.png differ diff --git a/content/post/gitea-self-hosted-git-server/user_menu.png b/content/post/gitea-self-hosted-git-server/user_menu.png new file mode 100644 index 0000000..36872de Binary files /dev/null and b/content/post/gitea-self-hosted-git-server/user_menu.png differ diff --git a/content/post/hello-hugo/celebration.gif b/content/post/hello-hugo/celebration.gif new file mode 100644 index 0000000..3f3c3f0 Binary files /dev/null and b/content/post/hello-hugo/celebration.gif differ diff --git a/content/post/hello-hugo/hugo-logo-wide.png b/content/post/hello-hugo/hugo-logo-wide.png new file mode 100644 index 0000000..dacc828 Binary files /dev/null and b/content/post/hello-hugo/hugo-logo-wide.png differ diff --git a/content/post/hello-hugo/index.md b/content/post/hello-hugo/index.md new file mode 100644 index 0000000..8d7b89f --- /dev/null +++ b/content/post/hello-hugo/index.md @@ -0,0 +1,42 @@ +--- +title: "Hello Hugo" # Title of the blog post. +date: 2021-12-19 # Date of post creation. +lastmod: 2021-12-20 +description: "I migrated my blog from a Jekyll site hosted on Github Pages to a Hugo site stored in Gitlab and published via Netlify" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: false # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +featureImage: "/hugo-logo-wide.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "/hugo-logo-wide.png" # Sets thumbnail image appearing inside card on homepage. +shareImage: "/hugo-logo-wide.png" +# shareImage: "/images/path/share.png" # Designate a separate image for social media sharing. +codeMaxLines: 10 # Override global value for how many lines within a code block before auto-collapsing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +tags: + - meta + - hugo +comment: true # Disable comment if false. +--- +**Oops, I did it again.** + +It wasn't [all that long ago](/virtually-potato-migrated-to-github-pages) that I migrated this blog from Hashnode to a Jekyll site published via GitHub Pages. Well, a few weeks ago I learned a bit about another static site generator called [Hugo](https://gohugo.io/), and I just *had* to give it a try. And I came away from my little experiment quite impressed! + +While Jekyll is built on Ruby and requires you to install and manage a Ruby environment before being able to use it to generate a site, Hugo is built on Go and requires nothing more than the `hugo` binary. That makes it much easier for me to hop between devices. Getting started with Hugo is [pretty damn simple](https://gohugo.io/getting-started/quick-start/), and Hugo provides some very cool [built-in features](https://gohugo.io/about/features/) which Jekyll would need external plugins to provide. And there are of course [plenty of lovely themes](https://themes.gohugo.io/) to help your site look its best. + +Hugo's real claim to fame, though, is its speed. Building a site with Hugo is *much* faster than with Jekyll, and that makes it quicker to test changes locally before pushing them out onto the internet. + +Jekyll was a great way for me to get started on managing my own site with a SSG, but Hugo seems to me like a more modern approach. I decided to start working on migrating Virtually Potato over to Hugo. Hugo even made it easy to import my existing content with the `hugo import jekyll` command. + +After a few hours spent trying out different themes, I landed on the [Hugo Clarity theme](https://github.com/chipzoller/hugo-clarity) which is based on [VMware's Clarity Design](https://clarity.design/). This theme offers a user-selectable light/dark theme, lots of great enhancements for displaying code snippets, and a responsive mobile layout, and I just thought that incorporating some of VMware's style into this site felt somehow appropriate. It did take quite a bit of tweaking to get everything integrated and working the way I wanted it to (and to update the existing content to fit), but I learned a ton in the process so I consider that time well spent. + +Along the way I also wanted to try out [Netlify](https://www.netlify.com/) for building and serving the site online instead of the rather bare-bones GitHub Pages that I'd been using. Like GitHub Pages, you can configure Netlify to watch a repository (on GitHub, GitLab, or Bitbucket) and it will fire off a build whenever new stuff is committed. By default, that latest build will be automatically published to your site, but Netlify also provides much more control of this process. You can pause publishing, manually publish a certain deployment, quickly rollback in case of any issues, and also preview deployments before they get published to the live site. + +Putting Netlify in front of the repositories where my site content is stored also enabled a pretty seamless transition once I was ready to actually flip the switch on the new-and-improved Virtually Potato. I had actually been using Netlify to serve the Jekyll version of this site for a week or two. When it was time to change, I disabled the auto-publish feature to pin that version of the site and then reconfigured which repository Netlify was watching. That kicked off a new (unpublished) deploy of the new Hugo site and I was able to preview it to confirm that everything looked just as it had in my local environment. Once I was satisfied I just clicked a button to start publishing the Hugo-based deploy, and the new site was live, instantly - no messing with DNS records or worrying about certificates, that was all taken care of by Netlify. + +**Anyway, here we are: the new Virtually Potato, powered by Hugo and Netlify!** + +![Woohoo!](celebration.gif) \ No newline at end of file diff --git a/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/8Jg4zgrgB.png b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/8Jg4zgrgB.png new file mode 100644 index 0000000..4a5b924 Binary files /dev/null and b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/8Jg4zgrgB.png differ diff --git a/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/Gu5I3LUep.png b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/Gu5I3LUep.png new file mode 100644 index 0000000..df0da69 Binary files /dev/null and b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/Gu5I3LUep.png differ diff --git a/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/U3LfmEJ_7.png b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/U3LfmEJ_7.png new file mode 100644 index 0000000..8747857 Binary files /dev/null and b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/U3LfmEJ_7.png differ diff --git a/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/aeIOr8w6k.png b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/aeIOr8w6k.png new file mode 100644 index 0000000..d6fa551 Binary files /dev/null and b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/aeIOr8w6k.png differ diff --git a/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/h7tl6facr.png b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/h7tl6facr.png new file mode 100644 index 0000000..ec1b74f Binary files /dev/null and b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/h7tl6facr.png differ diff --git a/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/index.md b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/index.md new file mode 100644 index 0000000..77ad62e --- /dev/null +++ b/content/post/safeguard-your-androids-battery-with-tasker-home-assistant/index.md @@ -0,0 +1,45 @@ +--- +series: Projects +date: "2020-11-14T08:34:30Z" +thumbnail: aeIOr8w6k.png +usePageBundles: true +tags: +- android +- tasker +- automation +- homeassistant +title: Safeguard your Android's battery with Tasker + Home Assistant +--- + +A few months ago, I started using the [AccuBattery app](https://play.google.com/store/apps/details?id=com.digibites.accubattery) to keep a closer eye on how I'd been charging my phones. The app has a handy feature that notifies you once the battery level reaches a certain threshold so you can pull the phone off the charger and extend the lithium battery's service life, and it even offers an estimate for what that impact might be. For instance, right now the app indicates that charging my Pixel 5 from 51% to 100% would cause 0.92 wear cycles, while stopping the charge at 80% would impose just 0.17 cycles. + +![AccuBattery screenshot](aeIOr8w6k.png) + +But that depends on me being near my phone and conscious so I can take action when the notification goes off. That's often a big assumption to make - and, frankly, I'm lazy. + +I'm fortunately also fairly crafty, so I came up with a way to combine my favorite Android automation app with my chosen home automation platform to take my laziness out of the picture. + +### The Ingredients +- [Wemo Mini Smart Plug](https://amzn.to/32G75Nt) +- [Raspberry Pi 3](https://amzn.to/331ZHwb) with [Home Assistant](https://www.home-assistant.io/) installed +- [Tasker](https://play.google.com/store/apps/details?id=net.dinglisch.android.taskerm) +- [Home Assistant Plug-In for Tasker](https://play.google.com/store/apps/details?id=com.markadamson.taskerplugin.homeassistant) + +I'm not going to go through how to install Home Assistant on the Pi or how to configure it beyond what's strictly necessary for this particular recipe. The official [getting started documentation](https://www.home-assistant.io/getting-started/) is a great place to start. + +### The Recipe +1. Plug the Wemo into a wall outlet, and plug a phone charger into the Wemo. Add the Belkin Wemo integration in Home Assistant, and configure the device and entity. I named mine `switchy`. Make a note of the Entity ID: `switch.switchy`. We'll need that later. +![The new entity in HA](Gu5I3LUep.png) +2. Either point your phone's browser to your [Home Assistant instance's local URL](http://homeassistant.local:8123/), or use the [Home Assistant app](https://play.google.com/store/apps/details?id=io.homeassistant.companion.android) to access it. Tap your username at the bottom of the menu and scroll all the way down to the Long-Lived Access Tokens section. Tap to create a new token. It doesn't matter what you name it, but be sure to copy to token data once it is generated since you won't be able to display it again. +3. Install the [Home Assistant Plug-In for Tasker](https://play.google.com/store/apps/details?id=com.markadamson.taskerplugin.homeassistant). Open Tasker, create a new Task called 'ChargeOff', and set the action to `Plugin > Home Assistant Plug-in for Tasker > Call Service`. Tap the pencil icon to edit the configuration, and then tap the plus sign to add a new server. Give it whatever name you like, and then enter your Home Assistant's IP address for the Base URL, followed by the port number `8123`. For example, `http://192.168.1.99:8123`. Paste in the Long-Lived Access Token you generated earlier. Go on and hit the Test Server button to make sure you got it right. It'll wind up looking something like this: +![Configuring the HA connection in Tasker](8Jg4zgrgB.png) +For the Service field, you need to tell HA what you want it to do. We want it to turn off a switch so enter `switch.turn_off`. We'll use the Service Data field to tell it which switch, in JSON format: +```json +{"entity_id": "switch.switchy"} +``` +Tap Test Service to make sure it works - and verify that the switch does indeed turn off. +![Creating and testing the service](U3LfmEJ_7.png) +4. Hard part is over. Now we just need to set up a profile in Tasker to fire our new task. I named mine 'Charge Limiter'. I started with `State > Power > Battery Level` and set it to trigger between 81-100%., and also added `State > Power > Source: Any` so it will only be active while charging. I also only want this to trigger while my phone is charging at home, so I added `State > Net > Wifi Connected` and then specified my home SSID. Link this profile to the Task you created earlier, and never worry about overcharging your phone again. +![Tasker profile to kill power above 80%](h7tl6facr.png) + +You can use a similar Task to turn the switch back on at a set time - or you could configure that automation directly in Home Assistant. I added an action to turn on the switch to my Google Assistant bedtime routine and that works quite well for my needs. diff --git a/content/post/script-to-convert-posts-to-hugo-page-bundles/index.md b/content/post/script-to-convert-posts-to-hugo-page-bundles/index.md new file mode 100644 index 0000000..0b0e2e1 --- /dev/null +++ b/content/post/script-to-convert-posts-to-hugo-page-bundles/index.md @@ -0,0 +1,138 @@ +--- +title: "Script to Convert Posts to Hugo Page Bundles" # Title of the blog post. +date: 2021-12-21T11:18:58-06:00 # Date of post creation. +# lastmod: 2021-12-21T11:18:58-06:00 # Date when last modified +description: "A hacky script to convert traditional posts (with images stored separately) to a Hugo Page Bundle" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +# draft: true # Sets whether to render this page. Draft of true will not be rendered. +toc: false # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "/images/posts-2021/12/file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "/images/path/share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +codeMaxLines: 30 +series: Scripts +tags: + - hugo + - meta + - shell +comment: true # Disable comment if false. +--- +In case you missed [the news](/hello-hugo), I recently migrated this blog from a site built with Jekyll to one built with Hugo. One of Hugo's cool features is the concept of [Page Bundles](https://gohugo.io/content-management/page-bundles/), which _bundle_ a page's resources together in one place instead of scattering them all over the place. + +Let me illustrate this real quick-like. Focusing only on the content-generating portions of a Hugo site directory might look something like this: + +``` +site +├── content +│   └── post +│   ├── first-post.md +│   ├── second-post.md +│   └── third-post.md +└── static + └── images + ├── logo.png + └── post + ├── first-post-image-1.png + ├── first-post-image-2.png + ├── first-post-image-3.png + ├── second-post-image-1.png + ├── second-post-image-2.png + ├── third-post-image-1.png + ├── third-post-image-2.png + ├── third-post-image-3.png + └── third-post-image-4.png +``` + +So the article contents go under `site/content/post/` in a file called `name-of-article.md`. Each article may embed image (or other file types), and those get stored in `site/static/images/post/` and referenced like `![Image for first post](/images/post/first-post-image-1.png)`. When Hugo builds a site, it processes the stuff under the `site/content/` folder to render the Markdown files into browser-friendly HTML pages but it _doesn't_ process anything in the `site/static/` folder; that's treated as static content and just gets dropped as-is into the resulting site. + +It's functional, but things can get pretty messy when you've got a bunch of image files and are struggling to keep track of which images go with which post. + +Like I mentioned earlier, Hugo's Page Bundles group a page's resources together in one place. Each post gets its own folder under `site/content/` and then all of the other files it needs to reference can get dropped in there too. With Page Bundles, the folder tree looks like this: + +``` +site +├── content +│   └── post +│   ├── first-post +│   │   ├── first-post-image-1.png +│   │   ├── first-post-image-2.png +│   │   ├── first-post-image-3.png +│   │   └── index.md +│   ├── second-post +│   │   ├── index.md +│   │   ├── second-post-image-1.png +│   │   └── second-post-image-2.png +│   └── third-post +│   ├── index.md +│   ├── third-post-image-1.png +│   ├── third-post-image-2.png +│   ├── third-post-image-3.png +│   └── third-post-image-4.png +└── static + └── images + └── logo.png +``` + +Images and other files are now referenced in the post directly like `![Image for post 1](/first-post-image-1.png)`, and this makes it a lot easier to keep track of which images go with which post. And since the files aren't considered to be static anymore, Page Bundles enables Hugo to perform certain [Image Processing tasks](https://gohugo.io/content-management/image-processing/) when the site gets built. + +Anyway, I wanted to start using Page Bundles but didn't want to have to manually go through all my posts to move the images and update the paths so I spent a few minutes cobbling together a quick script to help me out. It's pretty similar to the one I created to help [migrate images from Hashnode to my Jekyll site](/script-to-update-image-embed-links-in-markdown-files/) last time around - and, like that script, it's not pretty, polished, or flexible in the least, but it did the trick for me. + +This one needs to be run from one step above the site root (`../site/` in the example above), and it gets passed the relative path to a post (`site/content/posts/first-post.md`). From there, it will create a new folder with the same name (`site/content/posts/first-post/`) and move the post into there while renaming it to `index.md` (`site/content/posts/first-post/index.md`). + +It then looks through the newly-relocated post to find all the image embeds. It moves the image files into the post directory, and then updates the post to point to the new image locations. + +Next it updates the links for any thumbnail images mentioned in the front matter post metadata. In most of my past posts, I reused an image already embedded in the post as the thumbnail so those files would already be moved by the time the script gets to that point. For the few exceptions, it also needs to move those image files over as well. + +Lastly, it changes the `usePageBundles` flag from `false` to `true` so that Hugo knows what we've done. + +```bash +#!/bin/bash +# Hasty script to convert a given standard Hugo post (where the post content and +# images are stored separately) to a Page Bundle (where the content and images are +# stored together in the same directory). +# +# Run this from the directory directly above the site root, and provide the relative +# path to the existing post that needs to be converted. +# +# Usage: ./convert-to-pagebundle.sh vpotato/content/posts/hello-hugo.md + +inputPost="$1" # vpotato/content/posts/hello-hugo.md +postPath=$(dirname $inputPost) # vpotato/content/posts +postTitle=$(basename $inputPost .md) # hello-hugo +newPath="$postPath/$postTitle" # vpotato/content/posts/hello-hugo +newPost="$newPath/index.md" # vpotato/content/posts/hello-hugo/index.md + +siteBase=$(echo "$inputPost" | awk -F/ '{ print $1 }') # vpotato +mkdir -p "$newPath" # make 'hello-hugo' dir +mv "$inputPost" "$newPost" # move 'hello-hugo.md' to 'hello-hugo/index.md' + +imageLinks=($(grep -o -P '(?<=!\[)(?:[^\]]+)\]\(([^\)]+)' $newPost | grep -o -P '/images.*')) +# Ex: '/images/posts/image-name.png' +imageFiles=($(for file in ${imageLinks[@]}; do basename $file; done)) +# Ex: 'image-name.png' +imagePaths=($(for file in ${imageLinks[@]}; do echo "$siteBase/static$file"; done)) +# Ex: 'vpotato/static/images/posts/image-name.png' +for index in ${!imagePaths[@]}; do + mv ${imagePaths[index]} $newPath + # vpotato/static/images/posts/image-name.png --> vpotato/content/posts/hello-hugo/image-name.png + sed -i "s^${imageLinks[index]}^${imageFiles[index]}^" $newPost +done + +thumbnailLink=$(grep -P '^thumbnail:' $newPost | grep -o -P 'images.*') +# images/posts/thumbnail-name.png +if [[ $thumbnailLink ]]; then + thumbnailFile=$(basename $thumbnailLink) # thumbnail-name.png + sed -i "s|thumbnail: $thumbnailLink|thumbnail: $thumbnailFile|" $newPost + # relocate the thumbnail file if it hasn't already been moved + if [[ ! -f "$newPath/$thumbnailFile" ]]; then + mv "$siteBase/static/$thumbnailLink" "$newPath" + done +fi +# enable page bundles +sed -i "s|usePageBundles: false|usePageBundles: true|" $newPost +``` \ No newline at end of file diff --git a/content/post/script-to-convert-posts-to-hugo-page-bundles/thumbnail.png b/content/post/script-to-convert-posts-to-hugo-page-bundles/thumbnail.png new file mode 100644 index 0000000..7e7100c Binary files /dev/null and b/content/post/script-to-convert-posts-to-hugo-page-bundles/thumbnail.png differ diff --git a/content/post/script-to-update-image-embed-links-in-markdown-files/index.md b/content/post/script-to-update-image-embed-links-in-markdown-files/index.md new file mode 100644 index 0000000..5d6174e --- /dev/null +++ b/content/post/script-to-update-image-embed-links-in-markdown-files/index.md @@ -0,0 +1,64 @@ +--- +series: Scripts +date: "2021-07-19T16:03:30Z" +usePageBundles: true +tags: +- linux +- shell +- regex +- jekyll +- meta +title: Script to update image embed links in Markdown files +toc: false +--- + +I'm preparing to migrate this blog thingy from Hashnode (which has been great!) to a [GitHub Pages site with Jekyll](https://docs.github.com/en/pages/setting-up-a-github-pages-site-with-jekyll/creating-a-github-pages-site-with-jekyll) so that I can write posts locally and then just do a `git push` to publish them - and get some more practice using `git` in the process. Of course, I've written some admittedly-great content here and I don't want to abandon that. + +Hashnode helpfully automatically backs up my posts in Markdown format to a private GitHub repo so it was easy to clone those into a local working directory, but all the embedded images were still hosted on Hashnode: + +```markdown + +![Clever image title](https://cdn.hashnode.com/res/hashnode/image/upload/v1600098180227/lhTnVwCO3.png) + +``` + +I wanted to download those images to `./assets/images/posts-2020/` within my local Jekyll working directory, and then update the `*.md` files to reflect the correct local path... without doing it all manually. It took a bit of trial and error to get the regex working just right (and the result is neither pretty nor elegant), but here's what I came up with: + +```bash +#!/bin/bash +# Hasty script to process a blog post markdown file, capture the URL for embedded images, +# download the image locally, and modify the markdown file with the relative image path. +# +# Run it from the top level of a Jekyll blog directory for best results, and pass the +# filename of the blog post you'd like to process. +# +# Ex: ./imageMigration.sh 2021-07-19-Bulk-migrating-images-in-a-blog-post.md + +postfile="_posts/$1" + +imageUrls=($(grep -o -P '(?<=!\[)(?:[^\]]+)\]\(([^\)]+)' $postfile | grep -o -P 'http.*')) +imageNames=($(for name in ${imageUrls[@]}; do echo $name | grep -o -P '[^\/]+\.[[:alnum:]]+$'; done)) +imagePaths=($(for name in ${imageNames[@]}; do echo "assets/images/posts-2020/${name}"; done)) +echo -e "\nProcessing $postfile...\n" +for index in ${!imageUrls[@]}; do + echo -e "${imageUrls[index]}\n => ${imagePaths[index]}" + curl ${imageUrls[index]} --output ${imagePaths[index]} + sed -i "s|${imageUrls[index]}|${imagePaths[index]}|" $postfile +done +``` + +I could then run that against all of the Markdown posts under `./_posts/` with: + +```bash +for post in $(ls _posts/); do ~/scripts/imageMigration.sh $post; done +``` + +And the image embeds in the local copy of my posts now all look like this: + +```markdown + +![Clever image title](lhTnVwCO3.png) + +``` + +Brilliant! \ No newline at end of file diff --git a/content/post/secure-networking-made-simple-with-tailscale/Tailscale-AppIcon.png b/content/post/secure-networking-made-simple-with-tailscale/Tailscale-AppIcon.png new file mode 100644 index 0000000..0233dbe Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/Tailscale-AppIcon.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/Tailscale-Logo-Black.png b/content/post/secure-networking-made-simple-with-tailscale/Tailscale-Logo-Black.png new file mode 100644 index 0000000..632a9d4 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/Tailscale-Logo-Black.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/acl_menu.png b/content/post/secure-networking-made-simple-with-tailscale/acl_menu.png new file mode 100644 index 0000000..8fcc3ae Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/acl_menu.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/add_global_ns.png b/content/post/secure-networking-made-simple-with-tailscale/add_global_ns.png new file mode 100644 index 0000000..3215667 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/add_global_ns.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/admin_console.png b/content/post/secure-networking-made-simple-with-tailscale/admin_console.png new file mode 100644 index 0000000..1b51e5a Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/admin_console.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/dns_tab.png b/content/post/secure-networking-made-simple-with-tailscale/dns_tab.png new file mode 100644 index 0000000..51c9f08 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/dns_tab.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/edit_menu.png b/content/post/secure-networking-made-simple-with-tailscale/edit_menu.png new file mode 100644 index 0000000..6d75df2 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/edit_menu.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/enabled_routes.png b/content/post/secure-networking-made-simple-with-tailscale/enabled_routes.png new file mode 100644 index 0000000..53b2fc3 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/enabled_routes.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/index.md b/content/post/secure-networking-made-simple-with-tailscale/index.md new file mode 100644 index 0000000..21c706b --- /dev/null +++ b/content/post/secure-networking-made-simple-with-tailscale/index.md @@ -0,0 +1,377 @@ +--- +title: "Secure Networking Made Simple with Tailscale" # Title of the blog post. +date: 2022-01-01 # Date of post creation. +lastmod: 2022-07-10 +description: "Tailscale makes it easy to set up and manage a secure network by building a flexible control plane on top of a high-performance WireGuard VPN." # Description used for search engine. +featured: true # Sets if post is a featured post, making appear on the home page side bar. +# draft: true # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +featureImage: "Tailscale-Logo-Black.png" # Sets featured image on blog post. +featureImageAlt: 'Tailscale Logo' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "Tailscale-AppIcon.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Projects +tags: + - vpn + - wireguard + - homelab + - cloud + - linux + - networking + - security +comment: true # Disable comment if false. +--- +Not all that long ago, I shared about a [somewhat-complicated WireGuard VPN setup](/cloud-based-wireguard-vpn-remote-homelab-access/) that I had started using to replace my previous OpenVPN solution. I raved about WireGuard's speed, security, and flexible (if complex) Cryptokey Routing, but adding and managing peers with WireGuard is a fairly manual (and tedious) process. And while I thought I was pretty clever for using a WireGuard peer in GCP to maintain a secure tunnel into my home network without having to punch holes through my firewall, routing all my traffic through The Cloud wasn't really optimal[^egress_fees]. + +And then I discovered [Tailscale](https://tailscale.com/), which is built on the WireGuard protocol with an additional control plane on top. It delivers the same high security and high speed but dramatically simplifies the configuration and management of peer devices, and also adds in some other handy features like easy-to-configure [Access Control Lists (ACLs)](https://tailscale.com/kb/1018/acls/) to restrict traffic between peers and a [MagicDNS](https://tailscale.com/kb/1081/magicdns/) feature to automatically register DNS records for connected devices so you don't have to keep up with their IPs. + +There's already a great write-up (from the source!) on [How Tailscale Works](https://tailscale.com/blog/how-tailscale-works/), and it's really worth a read so I won't rehash it fully here. The tl;dr though is that Tailscale makes securely connecting remote systems incredibly easy, and it lets those systems connect with each other directly ("mesh") rather than needing traffic to go through a single VPN endpoint ("hub-and-spoke"). It uses a centralized coordination server to *coordinate* the complicated key exchanges needed for all members of a Tailscale network (a "[tailnet](https://tailscale.com/kb/1136/tailnet/)") to trust each other, and this removes the need for a human to manually edit configuration files on every existing device just to add a new one to the mix. Tailscale also leverages [magic :tada:](https://tailscale.com/blog/how-nat-traversal-works/) to allow Tailscale nodes to communicate with each other without having to punch holes in firewall configurations or forward ports or anything else tedious and messy. (And in case that the typical NAT traversal techniques don't work out, Tailscale created the Detoured Encrypted Routing Protocol (DERP[^derp]) to make sure Tailscale can still function seamlessly even on extremely restrictive networks that block UDP entirely or otherwise interfere with NAT traversal.) + +{{% notice info "Not a VPN Service" %}} +It's a no-brainer solution for remote access, but it's important to note that Tailscale is not a VPN *service*; it won't allow you to internet anonymously or make it appear like you're connecting from a different country (unless you configure a Tailscale Exit Node hosted somewhere in The Cloud to do just that). +{{% /notice %}} + +Tailscale's software is [open-sourced](https://github.com/tailscale) so you *could* host your own Tailscale control plane and web front end, but much of the appeal of Tailscale is how easy it is to set up and use. To that end, I'm using the Tailscale-hosted option. Tailscale offers a very generous free Personal tier which supports a single admin user, 20 connected devices, 1 subnet router, plus all of the bells and whistles, and the company also sells [Team, Business, and Enterprise plans](https://tailscale.com/pricing/) if you need more users, devices, subnet routers, or additional capabilities[^personal_pro]. + +Tailscale provides surprisingly-awesome documentation, and the [Getting Started with Tailscale](https://tailscale.com/kb/1017/install/) article does a great job of showing how easy it is to get up and running with just three steps: +1. Sign up for an account +2. Add a machine to your network +3. Add another machine to your network (repeat until satisfied) + +This post will start there but then also expand some of the additional features and capabilities that have me so excited about Tailscale. + +[^egress_fees]: Plus the GCP egress charges started to slowly stack up to a few ones of dollars each month. +[^derp]: May I just say that I *love* this acronym? +[^personal_pro]: There's also a reasonably-priced Personal Pro option which comes with 100 devices, 2 routers, and custom auth periods for $48/year. I'm using that since it's less than I was going to spend on WireGuard egress through GCP and I want to support the project in a small way. + +### Getting started +The first step in getting up and running with Tailscale is to sign up at [https://login.tailscale.com/start](https://login.tailscale.com/start). You'll need to use an existing Google, Microsoft, or GitHub account to sign up, which also lets you leverage the 2FA and other security protections already enabled on those accounts. + +Once you have a Tailscale account, you're ready to install the Tailscale client. The [download page](https://tailscale.com/download) outlines how to install it on various platforms, and also provides a handy-dandy one-liner to install it on Linux: + +```bash +curl -fsSL https://tailscale.com/install.sh | sh +``` + +After the install completes, it will tell you exactly what you need to do next: + +``` +Installation complete! Log in to start using Tailscale by running: + +sudo tailscale up +``` + +There are also Tailscale apps available for [iOS](https://tailscale.com/download/ios) and [Android](https://tailscale.com/download/android) - and the Android app works brilliantly on Chromebooks too! + +#### Basic `tailscale up` +Running `sudo tailscale up` then reveals the next step: + +```bash +❯ sudo tailscale up + +To authenticate, visit: + + https://login.tailscale.com/a/1872939939df +``` + +I can copy that address into a browser and I'll get prompted to log in to my Tailscale account. And that's it. Tailscale is installed, configured to run as a service, and connected to my Tailscale account. This also creates my tailnet. + +That was pretty easy, right? But what about if I can't easily get to a web browser from the terminal session on a certain device? No worries, `tailscale up` has a flag for that: + +```bash +sudo tailscale up --qr +``` + +That will convert the URL to a QR code that I can scan from my phone. + +#### Advanced `tailscale up` +There are a few additional flags that can be useful under certain situations: + +- `--advertise-exit-node` to tell the tailnet that this could be used as an exit node for internet traffic +```bash +sudo tailscale up --advertise-exit-node +``` +- `--advertise-routes` to let the node perform subnet routing functions to provide connectivity to specified local subnets +```bash +sudo tailscale up --advertise-routes "192.168.1.0/24,172.16.0.0/16" +``` +- `--advertise-tags`[^tags] to associate the node with certain tags for ACL purposes (like `tag:home` to identify stuff in my home network and `tag:cloud` to label external cloud-hosted resources) +```bash +sudo tailscale up --advertise-tags "tag:cloud" +``` +- `--hostname` to manually specific a hostname to use within the tailnet +```bash +sudo tailscale up --hostname "tailnode" +``` +- `--shields-up` to block incoming traffic +```bash +sudo tailscale up --shields-up +``` + +These flags can also be combined with each other: +```bash +sudo tailscale up --hostname "tailnode" --advertise-exit-node --qr +``` + +[^tags]: Before being able to assign tags at the command line, you must first define tag owners who can manage the tag. On a personal account, you've only got one user to worry with but you still have to set this up first. I'll go over this in a bit but here's [the documentation](https://tailscale.com/kb/1068/acl-tags/#defining-a-tag) if you want to skip ahead. + +#### Sidebar: Tailscale on VyOS +Getting Tailscale on [my VyOS virtual router](/vmware-home-lab-on-intel-nuc-9/#vyos) was unfortunately a little more involved than [leveraging the built-in WireGuard capability](/cloud-based-wireguard-vpn-remote-homelab-access/#configure-vyos-router-as-wireguard-peer). I found the [vyos-tailscale](https://github.com/DMarby/vyos-tailscale) project to help with building a customized VyOS installation ISO with the `tailscaled` daemon added in. I was then able to copy the ISO over to my VyOS instance and install it as if it were a [standard upgrade](https://docs.vyos.io/en/latest/installation/update.html). I could then bring up the interface, advertise my home networks, and make it available as an exit node with: +```bash +sudo tailscale up --advertise-exit-node --advertise-routes "192.168.1.0/24,172.16.0.0/16" +``` + +#### Other `tailscale` commands +Once there are a few members, I can use the `tailscale status` command to see a quick overview of the tailnet: +```bash +❯ tailscale status +100.115.115.39 deb01 john@ linux - +100.118.115.69 ipam john@ linux - +100.116.90.109 johns-iphone john@ iOS - +100.116.31.85 matrix john@ linux - +100.114.140.112 pixel6pro john@ android - +100.94.127.1 pixelbook john@ android - +100.75.110.50 snikket john@ linux - +100.96.24.81 vyos john@ linux - +100.124.116.125 win01 john@ windows - +``` + +Without doing any other configuration beyond just installing Tailscale and connecting it to my account, I can now easily connect from any of these devices to any of the other devices using the listed Tailscale IP[^magicdns]. Entering `ssh 100.116.31.85` will connect me to my Matrix server. + +`tailscale ping` lets me check the latency between two Tailscale nodes at the Tailscale layer; the first couple of pings will likely be delivered through a nearby DERP server until the NAT traversal magic is able to kick in: + +```bash +❯ tailscale ping snikket +pong from snikket (100.75.110.50) via DERP(nyc) in 34ms +pong from snikket (100.75.110.50) via DERP(nyc) in 35ms +pong from snikket (100.75.110.50) via DERP(nyc) in 35ms +pong from snikket (100.75.110.50) via [PUBLIC_IP]:41641 in 23ms +``` + +The `tailscale netcheck` command will give me some details about my local Tailscale node, like whether it's able to pass UDP traffic, which DERP server is the closest, and the latency to all Tailscale DERP servers: + +```bash +❯ tailscale netcheck + +Report: + * UDP: true + * IPv4: yes, [LOCAL_PUBLIC_IP]:52661 + * IPv6: no + * MappingVariesByDestIP: false + * HairPinning: false + * PortMapping: + * Nearest DERP: Chicago + * DERP latency: + - ord: 23.4ms (Chicago) + - dfw: 26.8ms (Dallas) + - nyc: 28.6ms (New York City) + - sea: 71.5ms (Seattle) + - sfo: 77.8ms (San Francisco) + - lhr: 102.2ms (London) + - fra: 114.8ms (Frankfurt) + - sao: 133.1ms (São Paulo) + - tok: 154.9ms (Tokyo) + - syd: 215.3ms (Sydney) + - sin: 243.7ms (Singapore) + - blr: 244.6ms (Bangalore) +``` + +[^magicdns]: I could also connect using the Tailscale hostname, if [MagicDNS](https://tailscale.com/kb/1081/magicdns/) is enabled - but I'm getting ahead of myself. + +### Tailscale management +Now that the Tailscale client is installed on my devices and I've verified that they can talk to each other, it might be a good time to *log in* at [`login.tailscale.com`](https://login.tailscale.com/) to take a look at the Tailscale admin console. +![Tailscale admin console](admin_console.png) + +#### Subnets and Exit Nodes +See how the `vyos` node has little labels on it about "Subnets (!)" and "Exit Node (!)"? The exclamation marks are there because the node is *advertising* subnets and its exit node eligibility, but those haven't actually been turned on it. To enable the `vyos` node to function as a subnet router (for the `172.16.0.0/16` and `192.168.1.0/24` networks listed beneath its Tailscale IP) and as an exit node (for internet-bound traffic from other Tailscale nodes), I need to click on the little three-dot menu icon at the right edge of the row and select the "Edit route settings..." option. +![The menu contains some other useful options too - we'll get to those!](edit_menu.png) + +![Edit route settings](route_settings.png) + +Now I can approve the subnet routes (individually or simultaneously and at the same time) and allow the node to route traffic to the internet as well[^exit_node]. +![Enabled the routes](enabled_routes.png) + +Cool! But now that's giving me another warning... + +[^exit_node]: Once subnets are allowed, they're made available to all members of the tailnet so that traffic destined for those networks can be routed accordingly. Clients will need to opt-in to using the Exit Node though; I typically only do that when I'm on a wireless network I don't control and want to make sure that no one can eavesdrop on my internet traffic, but I like to have that option available for when I need it. + +#### Key expiry +By default, Tailscale [expires each node's encryption keys every 180 days](https://tailscale.com/kb/1028/key-expiry/). This improves security (particularly over vanilla WireGuard, which doesn't require any key rotation) but each node will need to reauthenticate (via `tailscale up`) in order to get a new key. It may not make sense to do that for systems acting as subnet routers or exit nodes since they would stop passing all Tailscale traffic once the key expires. That would also hurt for my cloud servers which are *only* accessible via Tailscale; if I can't log in through SSH (since it's blocked at the firewall) then I can't reauthenticate Tailscale to regain access. For those systems, I can click that three-dot menu again and select the "Disable key expiry" option. I tend to do this for my "always on" tailnet members and just enforce the key expiry for my "client" type devices which could potentially be physically lost or stolen. + +![Machine list showing enabled Subnet Router and Exit Node and disabled Key Expiry](no_expiry.png) + +#### Configuring DNS +It's great that all my Tailscale machines can talk to each other directly by their respective Tailscale IP addresses, but who wants to keep up with IPs? I sure don't. Let's do some DNS. I'll start out by clicking on the [DNS](https://login.tailscale.com/admin/dns) tab in the admin console. +![The DNS options](dns_tab.png) + +I need to add a Global Nameserver before I can enable MagicDNS so I'll click on the appropriate button to enter in the *Tailscale IP*[^dns_ip] of my home DNS server (which is using [NextDNS](https://nextdns.io/) as the upstream resolver). +![Adding a global name server](add_global_ns.png) + +I'll also enable the toggle to "Override local DNS" to make sure all queries from connected clients are going through this server (and thus extend the NextDNS protection to all clients without having to configure them individually). +![Overriding local DNS configuration](override_local_dns.png) + +I can also define search domains to be used for unqualified DNS queries by adding another name server with the same IP address, enabling the "Restrict to search domain" option, and entering the desired domain: +![Entering a search domain](restrict_search_domain.png) + +This will let me resolve hostnames when connected remotely to my lab without having to type the domain suffix (ex, `vcsa` versus `vcsa.lab.bowdre.net`). + +And, finally, I can click the "Enable MagicDNS" button to turn on the magic. This adds a new nameserver with a private Tailscale IP which will resolve Tailscale hostnames to their internal IP addresses. + +![MagicDNS Enabled!](magicdns.png) + + +Now I can log in to my Matrix server by simply typing `ssh matrix`. Woohoo! + +[^dns_ip]: Using the Tailscale IP will allow queries to go straight to the DNS server without having to go through the VyOS router first. Configuring my clients to use a tailnet node for DNS queries also has the added benefit of sending that traffic through the encrypted tunnels instead of across the internet in the clear. I get secure DNS without having to configure secure DNS! + +### Access control +Right now, all of my Tailscale nodes can access all of my other Tailscale nodes. That's certainly very convenient, but I'd like to break things up a bit. I can use access control policies to define which devices should be able to talk to which other devices, and I can use [ACL tags](https://tailscale.com/kb/1068/acl-tags/) to logically group resources together to make this easier. + +#### Tags +I'm going to use three tags in my tailnet: +1. `tag:home` to identify servers in my home network which will have access to all other servers. +2. `tag:cloud` to identify my cloud servers which will only have access to other cloud servers. +3. `tag:client` to identify client-type devices which will be able to access all nodes in the tailnet. + +Before I can actually apply these tags to any of my machines, I first need to define `tagOwners` for each tag which will determine which users (in my organization of one) will be able to use the tags. This is done by editing the policy file available on the [Access Controls](https://login.tailscale.com/admin/acls) tab of the admin console. + +This ACL file uses a format called [HuJSON](https://github.com/tailscale/hujson), which is basically JSON but with support for inline comments and with a bit of leniency when it comes to trailing commas. That makes a config file that is easy for both humans and computers to parse. + +I'm going to start by creating a group called `admins` and add myself to that group. This isn't strictly necessary since I am the only user in the organization, but I feel like it's a nice practice anyway. Then I'll add the `tagOwners` section to map each tag to its owner, the new group I just created: + +```json +{ + "groups": { + "group:admins": ["john@example.com"], + }, + "tagOwners": { + "tag:home": ["group:admins"], + "tag:cloud": ["group:admins"], + "tag:client": ["group:admins"] + } +} +``` + +Now I have two options for applying tags to devices. I can either do it from the admin console, or by passing the `--advertise-tags` flag to the `tailscale up` CLI command. I touched on the CLI approach earlier so I'll go with the GUI approach this time. It's simple - I just go back to the [Machines](https://login.tailscale.com/admin/machines) tab, click on the three-dot menu button for a machine, and select the "Edit ACL tags..." option. +![Edit ACL tags](acl_menu.png) + +I can then pick the tag (or tags!) I want to apply: +![Selecting the tags](selecting_tags.png) + +The applied tags have now replaced the owner information which was previously associated with each machine: +![Tagged machines](tagged_machines.png) + +#### ACLs +By default, Tailscale implements an implicit "Allow All" ACL. As soon as you start modifying the ACL, though, that switches to an implicit "Deny All". So I'll add new rules to explicitly state what communication should be permitted and everything else will be blocked. + +Each ACL rule consists of four named parts: +1. `action` - what to do with the traffic, the only valid option is `allow`. +2. `users` - a list of traffic sources, can be specific users, Tailscale IPs, hostnames, subnets, groups, or tags. +3. `proto` - (optional) protocol for the traffic which should be permitted. +4. `ports` - a list of destinations (and optional ports). + +So I'll add this to the top of my policy file: +```json +{ + "acls": [ + { + // home servers can access other servers + "action": "accept", + "users": ["tag:home"], + "ports": [ + "tag:home:*", + "tag:cloud:*" + ] + }, + { + // clients can access everything + "action": "accept", + "users": ["tag:client"], + "ports": ["*:*"] + } + ] +} +``` + +This policy becomes active as soon as I click the Save button at the bottom of the page, and I notice a problem very shortly. Do you see it? + +Earlier I configured Tailscale to force all nodes to use my home DNS server for resolving all queries, and I just set an ACL which prevents my cloud servers from talking to my home servers... which includes the DNS server. I can think of two ways to address this: +1. Re-register the servers by passing the `--accept-dns=false` flag to `tailscale up` so they'll ignore the DNS configured in the admin console. +2. Add a new ACL rule to allow DNS traffic to reach the DNS server from the cloud. + +Option 2 sounds better to me so that's what I'm going to do. Instead of putting an IP address directly into the ACL rule I'd rather use a hostname, and unfortunately the Tailscale host names aren't available within ACL rule declarations. But I can define a host alias in the policy to map a friendly name to the IP: +```json +{ + "hosts": { + "win01": "100.124.116.125" + } +} +``` + +And I can then create a new rule for `"users": ["tag:cloud"]` to add an exception for `win01:53`: +```json +{ + "acls": [ + { + // cloud servers can only access other cloud servers plus my internal DNS server + "action": "accept", + "users": ["tag:cloud"], + "ports": [ + "win01:53" + ] + } + ] +} +``` + +And that gets DNS working again for my cloud servers while still serving the results from my NextDNS configuration. Here's the complete policy configuration: + +```json +{ + "acls": [ + { + // home servers can access other servers + "action": "accept", + "users": ["tag:home"], + "ports": [ + "tag:home:*", + "tag:cloud:*" + ] + }, + { + // cloud servers can only access my internal DNS server + "action": "accept", + "users": ["tag:cloud"], + "ports": [ + "win01:53" + ] + }, + { + // clients can access everything + "action": "accept", + "users": ["tag:client"], + "ports": ["*:*"] + } + ], + "hosts": { + "win01": "100.124.116.125" + }, + "groups": { + "group:admins": ["john@example.com"], + }, + "tagOwners": { + "tag:home": ["group:admins"], + "tag:cloud": ["group:admins"], + "tag:client": ["group:admins"] + } +} +``` + +### Wrapping up +This post has really only scratched the surface on the cool capabilities provided by Tailscale. I didn't even get into its options for [enabling HTTPS with valid certificates](https://tailscale.com/kb/1153/enabling-https/), [custom DERP servers](https://tailscale.com/kb/1118/custom-derp-servers/), [sharing tailnet nodes with other users](https://tailscale.com/kb/1084/sharing/), or [file transfer using Taildrop](https://tailscale.com/kb/1106/taildrop/). The [Next Steps](https://tailscale.com/kb/1017/install/#next-steps) section of the official Getting Started doc has some other cool ideas for Tailscale-powered use cases, and there are a ton more listed in the [Solutions](https://tailscale.com/kb/solutions/) and [Guides](https://tailscale.com/kb/guides/) categories as well. + +It's amazing to me that a VPN solution can be this simple to set up and manage while still offering such incredible flexibility. **Tailscale is proof that secure networking doesn't have to be hard.** \ No newline at end of file diff --git a/content/post/secure-networking-made-simple-with-tailscale/magicdns.png b/content/post/secure-networking-made-simple-with-tailscale/magicdns.png new file mode 100644 index 0000000..600a4c0 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/magicdns.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/no_expiry.png b/content/post/secure-networking-made-simple-with-tailscale/no_expiry.png new file mode 100644 index 0000000..d5d779d Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/no_expiry.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/override_local_dns.png b/content/post/secure-networking-made-simple-with-tailscale/override_local_dns.png new file mode 100644 index 0000000..cb6b692 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/override_local_dns.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/restrict_search_domain.png b/content/post/secure-networking-made-simple-with-tailscale/restrict_search_domain.png new file mode 100644 index 0000000..d80c8ca Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/restrict_search_domain.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/route_settings.png b/content/post/secure-networking-made-simple-with-tailscale/route_settings.png new file mode 100644 index 0000000..e18d753 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/route_settings.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/selecting_tags.png b/content/post/secure-networking-made-simple-with-tailscale/selecting_tags.png new file mode 100644 index 0000000..b2bdf5e Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/selecting_tags.png differ diff --git a/content/post/secure-networking-made-simple-with-tailscale/tagged_machines.png b/content/post/secure-networking-made-simple-with-tailscale/tagged_machines.png new file mode 100644 index 0000000..2457df3 Binary files /dev/null and b/content/post/secure-networking-made-simple-with-tailscale/tagged_machines.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/0-h1flLZs.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/0-h1flLZs.png new file mode 100644 index 0000000..1dc3741 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/0-h1flLZs.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/20210804_p10k_prompt.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/20210804_p10k_prompt.png new file mode 100644 index 0000000..bea3048 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/20210804_p10k_prompt.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/2LTaCEdWH.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/2LTaCEdWH.png new file mode 100644 index 0000000..77f5772 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/2LTaCEdWH.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/8q-WT0AyC.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/8q-WT0AyC.png new file mode 100644 index 0000000..066ea46 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/8q-WT0AyC.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/ACUKsohq6.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/ACUKsohq6.png new file mode 100644 index 0000000..3725676 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/ACUKsohq6.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/K1ScSuWcg.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/K1ScSuWcg.png new file mode 100644 index 0000000..96d835f Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/K1ScSuWcg.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/MkGu29HKl.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/MkGu29HKl.png new file mode 100644 index 0000000..1f34477 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/MkGu29HKl.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/QRP4iyLnu.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/QRP4iyLnu.png new file mode 100644 index 0000000..f1e1bee Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/QRP4iyLnu.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/U5E556eXf.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/U5E556eXf.png new file mode 100644 index 0000000..fd04def Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/U5E556eXf.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/XtmaR9Z0J.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/XtmaR9Z0J.png new file mode 100644 index 0000000..d08062e Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/XtmaR9Z0J.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/YaFNJJG_c.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/YaFNJJG_c.png new file mode 100644 index 0000000..0b779cc Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/YaFNJJG_c.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/a0uqHkJiC.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/a0uqHkJiC.png new file mode 100644 index 0000000..a6eaa1e Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/a0uqHkJiC.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/index.md b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/index.md new file mode 100644 index 0000000..b2cf338 --- /dev/null +++ b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/index.md @@ -0,0 +1,184 @@ +--- +series: Projects +date: "2020-10-27T08:34:30Z" +lastmod: "2021-05-20" +thumbnail: XtmaR9Z0J.png +usePageBundles: true +tags: +- chromeos +- linux +- crostini +- docker +- shell +- containers +title: Setting up Linux on a new Lenovo Chromebook Duet (bonus arm64 complications!) +featured: false +--- + +I've [written in the past](/3d-modeling-and-printing-on-chrome-os) about the Linux setup I've been using on my Pixel Slate. My Slate's keyboard stopped working over the weekend, though, and there don't seem to be any replacements (either Google or Brydge) to be found. And then I saw that [Walmart had the 64GB Lenovo Chromebook Duet temporarily marked down](https://twitter.com/johndotbowdre/status/1320733614426988544) to a mere $200 - just slightly more than the Slate's *keyboard* originally cost. So I jumped on that deal, and the little Chromeblet showed up today. + +![Aww, it's so cute!](kULHPeDuc.jpeg) + +I'll be putting the Duet through the paces in the coming days to see if/how it can replace my now-tablet-only Slate, but first things first: I need Linux. And this may be a little bit different than the setup on the Slate since the Duet's Mediatek processor uses the aarch64/arm64 architecture instead of amd64. (And while I'm writing these steps specific to the Duet, the same steps should work on basically any arm64 Chromebook.) + +So journey with me as I get this little guy set up! + +### Installing Linux +This part is dead simple. Just head into **Settings > Linux (Beta)** and hit the **Turn on** button: +![It doesn't take much to get Linux turned on](oLso9Wyzj.png) + +Click **Next**, review the options for username and initial disk size (which can be easily increased later so there's no real need to change it right now), and then select **Install**: +![Selecting username and storage allocation](ACUKsohq6.png) + +It takes just a few minutes to download and initialize the `termina` VM and then create the default `penguin` container: +![Installing...](2LTaCEdWH.png) + +You're ready to roll once the Terminal opens and gives you a prompt: +![Hello, Penguin!](0-h1flLZs.png) + +Your first action should be to go ahead and install any patches: +```shell +sudo apt update +sudo apt upgrade +``` + +### Zsh, Oh My Zsh, and powerlevel10k theme +I've been really getting into this shell setup recently so let's go on and make things comfortable before we move on too much further. Getting `zsh` is straight forward: +```shell +sudo apt install zsh +``` +Go ahead and launch `zsh` (by typing '`zsh`') and go through the initial setup wizard to configure preferences for things like history, completion, and other settings. I leave history on the defaults, enable the default completion options, switch the command-line editor to `vi`-style, and enable both `autocd` and `appendhistory`. Once you're back at the (new) `penguin%` prompt we can move on to installing the [Oh My Zsh plugin framework](https://github.com/ohmyzsh/ohmyzsh). + +Just grab the installer script like so: +```shell +wget https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh +``` +Review it if you'd like (and you should! *Always* review code before running it!!), and then execute it: +```shell +sh install.sh +``` +When asked if you'd like to change your default shell to `zsh` now, **say no**. This is because it will prompt for your password, but you probably don't have a password set on your brand-new Linux (Beta) account and that just makes things complicated. We'll clear this up later, but for now just check out that slick new prompt: +![Oh my!](8q-WT0AyC.png) + +Oh My Zsh is pretty handy because you can easily enable [additional plugins](https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins) to make your prompt behave exactly the way you want it to. Let's spruce it up even more with the [powerlevel10k theme](https://github.com/romkatv/powerlevel10k)! +```shell +git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ${ZSH_CUSTOM:-$HOME/.oh-my-zsh/custom}/themes/powerlevel10k +``` +Now we just need to edit `~/.zshrc` to point to the new theme: +```shell +sed -i s/^ZSH_THEME=.\*$/ZSH_THEME='"powerlevel10k\/powerlevel10k"'/ ~/.zshrc +``` +We'll need to launch another instance of `zsh` for the theme change to take effect so first lets go ahead and manually set `zsh` as our default shell. We can use `sudo` to get around the whole "don't have a password set" inconvenience: +```shell +sudo chsh -s /bin/zsh [username] +``` +Now close out the terminal and open it again, and you should be met by the powerlevel10k configurator which will walk you through getting things set up: +![pwerlevel10k configurator](K1ScSuWcg.png) + +This theme is crazy-configurable, but fortunately the configurator wizard does a great job of helping you choose the options that work best for you. +I pick the Classic prompt style, Unicode character set, Dark prompt color, 24-hour time, Angled separators, Sharp prompt heads, Flat prompt tails, 2-line prompt height, Dotted prompt connection, Right prompt frame, Sparse prompt spacing, Fluent prompt flow, Enabled transient prompt, Verbose instant prompt, and (finally) Yes to apply the changes. +![New P10k prompt](20210804_p10k_prompt.png) +Looking good! + +### Visual Studio Code +I'll need to do some light development work so VS Code is next on the hit list. You can grab the installer [here](https://code.visualstudio.com/Download#) or just copy/paste the following to stay in the Terminal. Definitely be sure to get the arm64 version! +```shell +curl -L https://aka.ms/linux-arm64-deb > code_arm64.deb +sudo apt install ./code_arm64.deb +``` +VS Code should automatically appear in the Chromebook's Launcher, or you can use it to open a file directly with `code [filename]`: +![VS Code editing my .zshrc file](XtmaR9Z0J.png) +Nice! + +### Android platform tools (adb and fastboot) +I sometimes don't want to wait for my Pixel to get updated naturally, so I love using `adb sideload` to manually update my phones. Here's what it takes to set that up. Installing adb is as simple as `sudo apt install adb`. To use it, enable the USB Debugging Developer Option on your phone, and then connect the phone to the Chromebook. You'll get a prompt to connect the phone to Linux: +![Connecting a phone to Linux](MkGu29HKl.png) + +Once you connect the phone to Linux, check the phone to approve the debugging connection. You can then issue `adb devices` to verify the phone is connected: +![Verifying the ADB connection](a0uqHkJiC.png) + +*I've since realized that the platform-tools (adb/fastboot) available in the repos are much older than what are required for flashing a factory image or sideloading an OTA image to a modern Pixel phone. This'll do fine for installing APKs either to your Chromebook or your phone, but I had to pull out my trusty Pixelbook to flash GrapheneOS to my Pixel 4a.* + +### Microsoft PowerShell and VMware PowerCLI +*[Updated 5/20/2021 with Microsoft's newer instructions]* +I'm working on setting up a [VMware homelab on an Intel NUC 9](https://twitter.com/johndotbowdre/status/1317558182936563714) so being able to automate things with PowerCLI will be handy. + +PowerShell for ARM is still in an early stage so while [it is supported](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7.2#support-for-arm-processors) it must be installed manually. Microsoft has instructions for installing PowerShell from binary archives [here](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7.2#linux), and I grabbed the latest `-linux-arm64.tar.gz` release I could find [here](https://github.com/PowerShell/PowerShell/releases). +```shell +curl -L -o /tmp/powershell.tar.gz https://github.com/PowerShell/PowerShell/releases/download/v7.2.0-preview.5/powershell-7.2.0-preview.5-linux-arm64.tar.gz +sudo mkdir -p /opt/microsoft/powershell/7 +sudo tar zxf /tmp/powershell.tar.gz -C /opt/microsoft/powershell/7 +sudo chmod +x /opt/microsoft/powershell/7/pwsh +sudo ln -s /opt/microsoft/powershell/7/pwsh /usr/bin/pwsh +``` +You can then just run `pwsh`: +![Powershell, in Linux, on Chrome OS](QRP4iyLnu.png) +That was the hard part. To install PowerCLI into your new Powershell environment, just run `Install-Module -Name VMware.PowerCLI` at the `PS >` prompt, and accept the warning about installing a module from an untrusted repository. + +I'm planning to use PowerCLI against my homelab without trusted SSL certificates so (note to self) I need to run `Set-PowerCLIConfiguration -InvalidCertificateAction Ignore` before I try to connect. +![PowerCLI connected to my vCenter](YaFNJJG_c.png) + +Woot! + +### Docker +The Linux (Beta) environment consists of a hardened virtual machine (named `termina`) running an LXC Debian container (named `penguin`). Know what would be even more fun? Let's run some other containers inside our container! + +The docker installation has a few prerequisites: +```shell +sudo apt install \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg-agent \ + software-properties-common +``` +Then we need to grab the Docker repo key: +```shell +curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - +``` +And then we can add the repo: +```shell +sudo add-apt-repository \ + "deb [arch=arm64] https://download.docker.com/linux/debian \ + $(lsb_release -cs) \ + stable" +``` +And finally update the package cache and install `docker` and its friends: +```shell +sudo apt update +sudo apt install docker-ce docker-ce-cli containerd.io +``` +![I put a container in your container](k2uiYi5e8.png) +Xzibit would be proud! + +### 3D printing utilities +Just like [last time](/3d-modeling-and-printing-on-chrome-os), I'll want to be sure I can do light 3D part design and slicing on this Chromebook. Once again, I can install FreeCAD with `sudo apt install freecad`, and this time I didn't have to implement any workarounds for graphical issues: +![FreeCAD](q1inyuUOb.png) + +Unfortunately, though, I haven't found a slicer application compiled with support for aarch64/arm64. There's a *much* older version of Cura available in the default Debian repos but it crashes upon launch. Neither Cura nor PrusaSlicer (or the Slic3r upstream) offer arm64 releases. + +So while I can use the Duet for designing 3D models, I won't be able to actually prepare those models for printing without using another device. I'll need to keep looking for another solution here. (If you know of an option I've missed, please let me know!) + +### Jupyter Notebook +I came across [a Reddit post](https://www.reddit.com/r/Crostini/comments/jnbqv3/successfully_running_jupyter_notebook_on_samsung/) today describing how to install `conda` and get a Jupyter Notebook running on arm64 so I had to give it a try. It actually wasn't that bad! + +The key is to grab the appropriate version of [conda Miniforge](https://github.com/conda-forge/miniforge), make it executable, and run the installer: +```shell +wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-aarch64.sh +chmod +x Miniforge3-Linux-aarch64.sh +./Miniforge3-Linux-aarch64.sh +``` +Exit the terminal and relaunch it, and then install Jupyter: +```shell +conda install -c conda-forge notebook +``` + +You can then launch the notebook with `jupyter notebook` and it will automatically open up in a Chrome OS browser tab: + +![To Jupyter and beyond!](U5E556eXf.png) + +Cool! Now I just need to learn what I'm doing with Jupyter - but at least I don't have an excuse about "my laptop won't run it". + + +### Wrap-up +I'm sure I'll be installing a few more utilities in the coming days but this covers most of my immediate must-have Linux needs. I'm eager to see how this little Chromeblet does now that I'm settled in. diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/k2uiYi5e8.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/k2uiYi5e8.png new file mode 100644 index 0000000..3989be3 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/k2uiYi5e8.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/kULHPeDuc.jpeg b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/kULHPeDuc.jpeg new file mode 100644 index 0000000..94207bf Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/kULHPeDuc.jpeg differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/oLso9Wyzj.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/oLso9Wyzj.png new file mode 100644 index 0000000..b520379 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/oLso9Wyzj.png differ diff --git a/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/q1inyuUOb.png b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/q1inyuUOb.png new file mode 100644 index 0000000..2f582e7 Binary files /dev/null and b/content/post/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/q1inyuUOb.png differ diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/9_Ze3zyBk.jpeg b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/9_Ze3zyBk.jpeg new file mode 100644 index 0000000..d82114d Binary files /dev/null and b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/9_Ze3zyBk.jpeg differ diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/BAf7knBk5.jpeg b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/BAf7knBk5.jpeg new file mode 100644 index 0000000..6b72db3 Binary files /dev/null and b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/BAf7knBk5.jpeg differ diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/CBziPHD8A.jpeg b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/CBziPHD8A.jpeg new file mode 100644 index 0000000..76834d7 Binary files /dev/null and b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/CBziPHD8A.jpeg differ diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/P-x5qEg_9.jpeg b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/P-x5qEg_9.jpeg new file mode 100644 index 0000000..2a02310 Binary files /dev/null and b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/P-x5qEg_9.jpeg differ diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/gVj7d_2Nu.jpeg b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/gVj7d_2Nu.jpeg new file mode 100644 index 0000000..e5b3d88 Binary files /dev/null and b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/gVj7d_2Nu.jpeg differ diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/index.md b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/index.md new file mode 100644 index 0000000..ebefe97 --- /dev/null +++ b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/index.md @@ -0,0 +1,61 @@ +--- +date: "2020-11-06T08:34:30Z" +thumbnail: P-x5qEg_9.jpeg +usePageBundles: true +tags: +- chromeos +title: 'Showdown: Lenovo Chromebook Duet vs. Google Pixel Slate' +--- + +Okay, okay, this isn't actually going to be a comparison review between the two wildly-mismatched-but-also-kind-of-similar [Chromeblets](https://www.reddit.com/r/chromeos/comments/bp1nwo/branding/), but rather a (hopefully) brief summary of my experience moving from an $800 Pixel Slate + $200 Google keyboard to a Lenovo Chromebook Duet I picked up on sale for just $200. + +![A Tale of Two Chromeblets](P-x5qEg_9.jpeg) + +### Background +Up until last week, I'd been using the Slate as my primary personal computing device for the previous 20 months or so, mainly in laptop mode (as opposed to tablet mode). I do a lot of casual web browsing, and I spend a significant portion of my free time helping other users on Google's product support forums as a part of the [Google Product Experts program](https://productexperts.withgoogle.com/what-it-is). I also work a lot with the [Chrome OS Linux (Beta) environment](/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications), but I avoid Android apps as much as I can. And I also used the Slate for a bit of Stadia gaming when I wasn't near a Chromecast. + +So the laptop experience is generally more important to me than the tablet one. I need to be able to work with a large number of browser tabs, but I don't typically need to do any heavy processing directly on the computer. + +I was pretty happy with the Slate, but its expensive keyboard stopped working recently and replacements aren't really available anywhere. Remember, laptop mode is key for my use case so the Pixel Slate became instantly unusable to me. + +### Size +When you put these machines side by side, the first difference that jumps out is the size disparity. The 12.3" Pixel Slate is positively massive next to the 10.1" Lenovo Duet. +![Big 'un and little 'un](gVj7d_2Nu.jpeg) + +The Duet is physically smaller so the display itself is of course smaller. I had a brief moment of panic when I first logged in and the setup wizard completely filled the screen. Dialing Chrome OS's display scaling down to 80% strikes a good balance for me between fonts being legible while still displaying enough content to be worthwhile. It can get a bit tight when you've got windows docked side-by-side but I'm getting by okay. + +Of course, the smaller size of the Duet also makes it work better as a tablet in my mind. It's comfortable enough to hold with one hand while you interact with the other, whereas the Slate always felt a little too big for that to me. +![One-handing the Duet](qne9SybLi.jpeg) + +### Keyboard +A far more impactful size difference is the keyboards though. The Duet keyboard gets a bit cramped, particularly over toward the right side (you know, those pesky braces and semicolons that are *never* needed when coding): +![The Duet's keyboard is MUCH smaller](CBziPHD8A.jpeg) + +Getting used to typing on this significantly smaller keyboard has been the biggest adjustment so far. The pad on my pinky finger is wider than the last few keys at the right edge of the keyboard so I've struggled with accurately hitting the correct `[` or `]`, and also with smacking Return (and inevitably sending a malformed chat message) when trying to insert an apostrophe. I feel like I'm slowly getting the hang of it, but like I said, it's been an adjustment. + +### Cover +![Cover up!](yiCW6XZbF.jpeg) +The Pixel Slate's keyboard + folio cover is a single (floppy) piece. The keyboard connects to contacts on the bottom edge of the Slate, and magnets hold it in place. The rear cover then folds and sticks to the back of the Slate with magnets to prop up the tablet in different angles. The magnet setup means you can smoothly transition it through varying levels of tilt, which is pretty nice. But being a single piece means the keyboard might get in the way if you're trying to use it as just a propped-up tablet. And the extra folding in the back takes up a bit of space so the Slate may not work well as a laptop on your actual lap. + +![Duet's fabric cover](9_Ze3zyBk.jpeg) + +The Duet's rear cover has a fabric finish kind of similar to the cases Google offers for their phones, and it provides a great texture for holding the tablet. It sticks to the back of the Duet through the magic of magnets, and the lower half of it folds out to create a really sturdy kickstand. And it's completely separate from the keyboard which is great for when you're using the Duet as a tablet (either handheld or propped up for watching a movie or gaming with Stadia). + +![Duet kickstand](nWRu2TB8i.jpeg) + +And this little kickstand can go *low*, much lower than the Slate. This makes it perfect for my late-night Stadia sessions while sitting in bed. I definitely prefer this approach compared to what Google did with the Pixel Slate. + +![The Duet handily wins this limbo competition](BAf7knBk5.jpeg) + +### Performance +The Duet does struggle a bit here. It's basically got a [smartphone processor](https://www.notebookcheck.net/Mediatek-Helio-P60T-Processor-Benchmarks-and-Specs.470711.0.html) and half the RAM of the Slate. Switching between windows and tabs sometimes takes an extra moment or two to catch up (particularly if said tab has been silently suspended in the background). Similarly, working with Linux apps is just a bit slower than you'd like it to be. Still, I've spent a bit more than a week now with the Duet as my go-to computer and it's never really been slow enough to bother me. + +That arm64 processor does make finding compatible Linux packages a little more difficult than it's been on amd64 architectures but a [little bit of digging](/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications) will get past that limitation in most cases. + +The upside of that smartphone processor is that the battery life is *insane*. After about seven hours of light usage today I'm sitting at 63% - with an estimated nine hours remaining. This thing keeps going and going, even while Stadia-ing for hours. Being able to play Far Cry 5 without being tethered to a wall is so nice. + +### Fingerprint sensor +The Duet doesn't have one, and that makes me sad. + +### Conclusion +The Lenovo Chromebook Duet is an incredible little Chromeblet for the price. It clearly can't compete with the significantly-more-expensive Google Pixel Slate *on paper*, but I'm finding its size to be fantastic for the sort of on-the-go computing I do a lot of. It works better as a lap-top laptop, works better as a tablet in my hand, works better for playing Stadia in bed, and just feels more portable. It's a little sluggish at times and the squished keyboard takes some getting used to but overall I'm pretty happy with this move. diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/nWRu2TB8i.jpeg b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/nWRu2TB8i.jpeg new file mode 100644 index 0000000..2f3ea1c Binary files /dev/null and b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/nWRu2TB8i.jpeg differ diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/qne9SybLi.jpeg b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/qne9SybLi.jpeg new file mode 100644 index 0000000..03773a4 Binary files /dev/null and b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/qne9SybLi.jpeg differ diff --git a/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/yiCW6XZbF.jpeg b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/yiCW6XZbF.jpeg new file mode 100644 index 0000000..876c414 Binary files /dev/null and b/content/post/showdown-lenovo-chromebook-duet-vs-google-pixel-slate/yiCW6XZbF.jpeg differ diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/circles.png b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/circles.png new file mode 100644 index 0000000..7354900 Binary files /dev/null and b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/circles.png differ diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/creating_invitation.png b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/creating_invitation.png new file mode 100644 index 0000000..90fb0ce Binary files /dev/null and b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/creating_invitation.png differ diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/index.md b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/index.md new file mode 100644 index 0000000..4742bdf --- /dev/null +++ b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/index.md @@ -0,0 +1,385 @@ +--- +title: "Snikket Private XMPP Chat on Oracle Cloud Free Tier" # Title of the blog post. +date: 2021-12-23 # Date of post creation. +lastmod: 2023-03-30 # Date when last modified +description: "Notes on installing a Snikket XMPP chat instance alongside a Matrix instance on an Oracle Cloud free tier server" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "snikket.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Projects +tags: + - linux + - cloud + - docker + - containers + - chat +comment: true # Disable comment if false. +--- +**Non-technical users deserve private communications, too.** + +I shared a [few months back](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/) about the steps I took to deploy my own [Matrix](https://matrix.org/) homeserver instance, and I've happily been using the [Element](https://element.io/) client for secure end-to-end encrypted chats with a small group of my technically-inclined friends. Being able to have private conversations without having to trust a single larger provider (unlike like Signal or WhatsApp) is pretty great. Of course, many Matrix users just create accounts directly on the matrix.org homeserver which kind of hurts the "decentralized" aspect of the network, and relatively few bother with hosting their own homeserver. There are also multiple options for homeserver software and dozens of Matrix clients each with their own set of features and limitations. As a result, an individual's experience with Matrix could vary wildly depending on what combination of softwares they are using. And it can be difficult to get non-technical users (like my family) on board with a new communication platform when iMessage works just fine[^1]. + +I recently came across the [Snikket project](https://snikket.org/), which [aims](https://snikket.org/about/goals/) to make decentralized end-to-end encrypted personal messaging simple and accessible for *everyone*, with an emphasis on providing a consistent experience across the network. Snikket does this by maintaining a matched set of server and client[^2] software with feature and design parity, making it incredibly easy to deploy and manage the server, and simplifying user registration with invite links. In contrast to Matrix, Snikket does not operate an open server on which users can self-register but instead requires users to be invited to a hosted instance. The idea is that a server would be used by small groups of family and friends where every user knows (and trusts!) the server operator while also ensuring the complete decentralization of the network[^3]. + +How simple is the server install? +{{< tweet user="johndotbowdre" id="1461356940466933768" >}} +Seriously, their [4-step quick-start guide](https://snikket.org/service/quickstart/) is so good that I didn't feel the need to do a blog post about my experience. I've now been casually using Snikket for a bit over month and remain very impressed both by the software and the project itself, and have even deployed a new Snikket instance for my family to use. My parents were actually able to join the chat without any issues, which is a testament to how easy it is from a user perspective too. + +A few days ago I migrated my original Snikket instance from Google Cloud (GCP) to the same Oracle Cloud Infrastructure (OCI) virtual server that's hosting my Matrix homeserver so I thought I might share some notes first on the installation process. At the end, I'll share the tweaks which were needed to get Snikket to run happily alongside Matrix. + +[^1]: `John laughed at a message.` +[^2]: Snikket currently has clients for [Android](https://play.google.com/store/apps/details?id=org.snikket.android) and [iOS](https://apps.apple.com/us/app/snikket/id1545164189) with plans to add a web client in the future. +[^3]: That said, Snikket is built on the [XMPP messaging standard](https://xmpp.org/) which means it can inter-operate with other XMPP-based chat networks, servers, and clients - though of course the best experience will be had between Snikket servers and clients. + +### Infrastructure setup +You can refer to my notes from last time for details on how I [created the Ubuntu 20.04 VM](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#instance-creation) and [configured the firewall rules](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#firewall-configuration) both at the cloud infrastructure level as well as within the host using `iptables`. Snikket does need a few additional [firewall ports](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/firewall.md) beyond what was needed for my Matrix setup: + +| Port(s) | Transport | Purpose | +| --- | --- | --- | +| `80, 443` | TCP | Web interface and group file sharing | +| `3478-3479` | TCP/UDP | Audio/Video data proxy negotiation and discovery ([STUN/TURN](https://www.twilio.com/docs/stun-turn/faq)) | +| `5349-5350` | TCP/UDP | Audio/Video data proxy negotiation and discovery (STUN/TURN over TLS) | +| `5000` | TCP | File transfer proxy | +| `5222` | TCP | Connections from clients | +| `5269` | TCP | Connections from other servers | +| `60000-60100`[^4] | UDP | Audio/Video data proxy (TURN data) | + +As a gentle reminder, Oracle's `iptables` configuration inserts a `REJECT all` rule at the bottom of each chain. I needed to make sure that each of my `ALLOW` rules get inserted above that point. So I used `iptables -L INPUT --line-numbers` to identify which line held the `REJECT` rule, and then used `iptables -I INPUT [LINE_NUMBER] -m state --state NEW -p [PROTOCOL] --dport [PORT] -j ACCEPT` to insert the new rules above that point. +```bash +sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 80 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 443 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dports 3478-3479 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p tcp -m multiport --dports 3478-3479 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p tcp -m multiport --dports 3478,3479 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 5000 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 5222 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 5269 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 3478,3479 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 5349,5350 -j ACCEPT +sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 60000:60100 -j ACCEPT +``` + +Then to verify the rules are in the right order: +```bash +$ sudo iptables -L INPUT --line-numbers -n +Chain INPUT (policy ACCEPT) +num target prot opt source destination +1 ts-input all -- 0.0.0.0/0 0.0.0.0/0 +2 ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED +3 ACCEPT icmp -- 0.0.0.0/0 0.0.0.0/0 +4 ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 +5 ACCEPT udp -- 0.0.0.0/0 0.0.0.0/0 udp spt:123 +6 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:22 +7 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:443 +8 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:80 +9 ACCEPT udp -- 0.0.0.0/0 0.0.0.0/0 state NEW multiport dports 5349,5350 +10 ACCEPT udp -- 0.0.0.0/0 0.0.0.0/0 state NEW multiport dports 60000:60100 +11 ACCEPT udp -- 0.0.0.0/0 0.0.0.0/0 state NEW multiport dports 3478,3479 +12 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:5269 +13 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:5222 +14 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:5000 +15 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW multiport dports 3478,3479 +16 REJECT all -- 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-prohibited +``` + +Before moving on, it's important to save them so the rules will persist across reboots! +```bash +$ sudo netfilter-persistent save +run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save +run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save +``` + +I also needed to create three DNS records with my domain registrar: +``` +# Domain TTL Class Type Target +chat.vpota.to 300 IN A 132.145.174.39 +groups.vpota.to 300 IN CNAME chat.vpota.to +share.vpota.to 300 IN CNAME chat.vpota.to +``` + +[^4]: By default Snikket can use any UDP port in the range `49152-65535` for TURN call data but restricting it to 100 ports [should be sufficient](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/firewall.md#how-many-ports-does-the-turn-service-need) for most small servers. + +### Install `docker` and `docker-compose` +Snikket is distributed as a set of docker containers which makes it super easy to get up and running on basically any Linux system. But, of course, you'll first need to [install `docker`](https://docs.docker.com/engine/install/ubuntu/) + +```bash +# Update package index +sudo apt update +# Install prereqs +sudo apt install ca-certificates curl gnupg lsb-release +# Add docker's GPG key +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg +# Add the docker repo +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +# Refresh the package index with the new repo added +sudo apt update +# Install docker +sudo apt install docker-ce docker-ce-cli containerd.io +``` + +And install `docker-compose` also to simplify the container management: + +```bash +# Download the docker-compose binary +sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose +# Make it executable +sudo chmod +x /usr/local/bin/docker-compose +``` + +Now we're ready to... + +### Install Snikket +This starts with just making a place for Snikket to live: + +```bash +sudo mkdir /etc/snikket +cd /etc/snikket +``` + +And then grabbing the Snikket `docker-compose` file: + +```bash +sudo curl -o docker-compose.yml https://snikket.org/service/resources/docker-compose.beta.yml +``` + +And then creating a very minimal configuration file: + +```bash +sudo vi snikket.conf +``` + +A basic config only needs two parameters: + +| Parameter | Description | +| --- | --- | +| `SNIKKET_DOMAIN` | The fully-qualified domain name that clients will connect to | +| `SNIKKET_ADMIN_EMAIL` | An admin contact for the server | + +That's it. + +In my case, I'm going to add two additional parameters to restrict the UDP TURN port range that I set in my firewalls above. + +So here's my config: + +``` +SNIKKET_DOMAIN=chat.vpota.to +SNIKKET_ADMIN_EMAIL=ops@example.com + +# Limit UDP port range +SNIKKET_TWEAK_TURNSERVER_MIN_PORT=60000 +SNIKKET_TWEAK_TURNSERVER_MAX_PORT=60100 +``` + +### Start it up! +With everything in place, I can start up the Snikket server: + +```bash +sudo docker-compose up -d +``` + +This will take a moment or two to pull down all the required container images, start them, and automatically generate the SSL certificates. Very soon, though, I can point my browser to `https://chat.vpota.to` and see a lovely login page - complete with an automagically-valid-and-trusted certificate: +![Snikket login page](snikket_login_page.png) + +Of course, I don't yet have a way to log in, and like I mentioned earlier Snikket doesn't offer open user registration. Every user (even me, the admin!) has to be invited. Fortunately I can generate my first invite directly from the command line: + +```bash +sudo docker exec snikket create-invite --admin --group default +``` + +That command will return a customized invite link which I can copy and paste into my browser. +![Snikket invite page](snikket_invite_page.png) + +If I've got a mobile device handy, I can go ahead and install the client there to get started; the app will even automatically generate a secure password[^6] so that I (and my users) don't have to worry about it. Otherwise, clicking the **register an account manually** link at the bottom of the screen lets me create a username and password directly. + +With shiny new credentials in hand, I can log in at the web portal to manage my account or access the the admin panel. + +[^6]: It's also easy for the administrator to generate password reset links for users who need a new password. + +![Welcome home, John!](welcome_home_john.png) + +### Invite more users +Excellent, I've got a private chat server but no one to chat privately with[^7]. Time to fix that, eh? I *could* use that `docker exec snikket create-invite` command line again to create another invite link, but I think I'll do that through the admin panel instead. +![Snikket admin panel](snikket_admin_panel.png) + +Before I get into the invite process, I'm going to take a brief detour to discuss *circles*. For those of you who didn't make a comfortable (though short-lived) home on Google+[^8], Snikket uses the term circle to refer to social circles within a local community. Each server gets a circle created by default, and new users will be automatically added to that circle. Users within the same circle will appear in each other's contact list and will also be added into a group chat together. + +It might make sense to use circles to group users based on how they know each other. There could be a circle for family, a circle for people who work(ed) together, a circle for the members of a club, and a circle for The Gang that gets together every couple of weeks for board games. + +The **Manage circles** button lets you, well, manage circles: create them, add/remove users to them, delete them, whatever. I just created a new circle called "Because Internets". + +![Circles!](circles.png) + +That yellow link icon in the **Actions** column will generate a one-time use link that could be used to invite an individual user to create an account on the server and join the selected circle. + +Instead, I'll go back to the admin area and click on the **Manage invitations** button, which will give me a bit more control over how the invite works. +![Creating an invitation](creating_invitation.png) + +As you can see, this page includes a toggle to select the invitation type: +- **Individual** invitations are single-use links so should be used for inviting one user at a time. +- **Group** invitation links can be used multiple times by multiple users. + +Whichever type is selected, I also need to select the time period for which the invitation link will be valid as well as which circle anyone who accepts the invitation will be added to. And then I can generate and share the new invite link as needed. +![New invite link](new_invite_link.png) + +[^7]: The ultimate in privacy? +[^8]: Too soon? Yep. Still too soon. + +### Advanced configuration +Okay, so that covers everything that's needed for a standard Snikket installation in OCI. The firewall configuration in particular would have been much simpler in GCP (where I could have used `ufw`) but I still think it's overall pretty straight forward. But what about my case where I wanted to put Snikket on the same server as my Matrix instance? Or the steps I needed to move Snikket from the GCP server onto the OCI server? + +Let's start with adjusting the Caddy reverse proxy to accomodate Snikket since that will be needed before I can start Snikket on the new server. + +#### Caddy reverse proxy +Remember [Caddy](https://caddyserver.com/) from [last time](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#reverse-proxy-setup)? It's a super-handy easy-to-configure web server, and it greatly simplified the reverse proxy configuration needed for my Matrix instance. + +One of the really cool things about Caddy is that it automatically generates SSL certificates for any domain name you tell it to serve (as long as the domain ownership can be validated through the [ACME](https://en.wikipedia.org/wiki/Automated_Certificate_Management_Environment) challenge process, of course). But Snikket already handles its own certificates so I'll need to make sure that Caddy doesn't get in the way of those challenges. + +Fortunately, the [Snikket reverse proxy documentation](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/reverse_proxy.md#basic) was recently updated with a sample config for making this happen. Matrix and Snikket really only overlap on ports `80` and `443` so those are the only ports I'll need to handle, which lets me go for the "Basic" configuration instead of the "Advanced" one. I can just adapt the sample config from the documentation and add that to my existing `/etc/caddy/Caddyfile` alongside the config for Matrix: + +``` +http://chat.vpota.to, +http://groups.chat.vpota.to, +http://share.chat.vpota.to { + reverse_proxy localhost:5080 +} + +chat.vpota.to, +groups.chat.vpota.to, +share.chat.vpota.to { + reverse_proxy https://localhost:5443 { + transport http { + tls_insecure_skip_verify + } + } +} + +matrix.bowdre.net { + reverse_proxy /_matrix/* http://localhost:8008 + reverse_proxy /_synapse/client/* http://localhost:8008 +} + +bowdre.net { + route { + respond /.well-known/matrix/server `{"m.server": "matrix.bowdre.net:443"}` + redir https://virtuallypotato.com + } +} +``` + +So Caddy will be listening on port `80` for traffic to `http://chat.vpota.to`, `http://groups.chat.vpota.to`, and `http://share.chat.vpota.to`, and will proxy that HTTP traffic to the Snikket instance on port `5080`. Snikket will automatically redirect HTTP traffic to HTTPS except in the case of the required ACME challenges so that the certs can get renewed. It will also listen on port `443` for traffic to the same hostnames and will pass that into Snikket on port `5443` *without verifying certs* between the backside of the proxy and the front side of Snikket. This is needed since there isn't an easy way to get Caddy to trust the certificates used internally by Snikket[^10]. + +And then any traffic to `matrix.bowdre.net` or `bowdre.net` still gets handled as described in that other post. + +Did you notice that Snikket will need to get reconfigured to listen on `5080` and `5443` now? We'll get to that in just a minute. First, let's get the data onto the new server. + +[^10]: Remember that both Caddy and Snikket are managing their own fully-valid certificates in this scenario, but they don't necessarily know that about each other. + +#### Migrating a Snikket instance +Since Snikket is completely containerized, moving between hosts is a simple matter of transferring the configuration and data. + +The Snikket team has actually put together a couple of scripts to assist with [backing up](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/backup.sh) and [restoring](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/restore.sh) an instance. I just adapted the last line of each to do what I needed: + +```bash +sudo docker run --rm --volumes-from=snikket \ + -v "/home/john/snikket-backup/":/backup debian:buster-slim \ + tar czf /backup/snikket-"$(date +%F-%H%m)".tar.gz /snikket +``` + +That will drop a compressed backup of the `snikket_data` volume into the specified directory, `/home/john/snikket-backup/`. While I'm at it, I'll also go ahead and copy the `docker-compose.yml` and `snikket.conf` files from `/etc/snikket/`: + +```bash +$ sudo cp -a /etc/snikket/* /home/john/snikket-backup/ +$ ls -l /home/john/snikket-backup/ +total 1728 +-rw-r--r-- 1 root root 993 Dec 19 17:47 docker-compose.yml +-rw-r--r-- 1 root root 1761046 Dec 19 17:46 snikket-2021-12-19-1745.tar.gz +-rw-r--r-- 1 root root 299 Dec 19 17:47 snikket.conf +``` + +And I can then zip that up for easy transfer: +```bash +tar cvf /home/john/snikket-backup.tar.gz /home/john/snikket-backup/ +``` + +This would be a great time to go ahead and stop this original Snikket instance. After all, nothing that happens after the backup was exported is going to carry over anyway. + +```bash +sudo docker-compose down +``` +{{% notice tip "Update DNS" %}} +This is also a great time to update the `A` record for `chat.vpota.to` so that it points to the new server. It will need a little bit of time for the change to trickle out, and the updated record really needs to be in place before starting Snikket on the new server so that there aren't any certificate problems. +{{% /notice %}} + + +Now I just need to transfer the archive from one server to the other. I've got [Tailscale](https://tailscale.com/)[^11] running on my various cloud servers so that they can talk to each other through a secure WireGuard tunnel (remember [WireGuard](/cloud-based-wireguard-vpn-remote-homelab-access/)?) without having to open any firewall ports between them, and that means I can just use `scp` to transfer the file without any fuss. I can even leverage Tailscale's [Magic DNS](https://tailscale.com/kb/1081/magicdns/) feature to avoid worrying with any IPs, just the hostname registered in Tailscale (`chat-oci`): + +```bash +scp /home/john/snikket-backup.tar.gz chat-oci:/home/john/ +``` + +Next, I SSH in to the new server and unzip the archive: + +```bash +$ ssh snikket-oci-server +$ tar xf snikket-backup.tar.gz +$ cd snikket-backup +$ ls -l +total 1728 +-rw-r--r-- 1 root root 993 Dec 19 17:47 docker-compose.yml +-rw-r--r-- 1 root root 1761046 Dec 19 17:46 snikket-2021-12-19-1745.tar.gz +-rw-r--r-- 1 root root 299 Dec 19 17:47 snikket.conf +``` + +Before I can restore the content of the `snikket-data` volume on the new server, I'll need to first go ahead and set up Snikket again. I've already got `docker` and `docker-compose` installed from when I installed Matrix so I'll skip to creating the Snikket directory and copying in the `docker-compose.yml` and `snikket.conf` files. + +```bash +sudo mkdir /etc/snikket +sudo cp docker-compose.yml /etc/snikket/ +sudo cp snikket.conf /etc/snikket/ +cd /etc/snikket +``` + +Before I fire this up on the new host, I need to edit the `snikket.conf` to tell Snikket to use those different ports defined in the reverse proxy configuration using [a couple of `SNIKKET_TWEAK_*` lines](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/reverse_proxy.md#snikket): + +``` +SNIKKET_DOMAIN=chat.vpota.to +SNIKKET_ADMIN_EMAIL=ops@example.com + +SNIKKET_TWEAK_HTTP_PORT=5080 +SNIKKET_TWEAK_HTTPS_PORT=5443 +SNIKKET_TWEAK_TURNSERVER_MIN_PORT=60000 +SNIKKET_TWEAK_TURNSERVER_MAX_PORT=60100 +``` + +Alright, let's start up the Snikket server: +```bash +sudo docker-compose up -d +``` + +After a moment or two, I can point a browser to `https://chat.vpota.to` and see the login screen (with a valid SSL certificate!) but I won't actually be able to log in. As far as Snikket is concerned, this is a brand new setup. + +Now I can borrow the last line from the [`restore.sh` script](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/restore.sh) to bring in my data: + +```bash +sudo docker run --rm --volumes-from=snikket \ + --mount type=bind,source="/home/john/snikket-backup/snikket-2021-12-19-1745.tar.gz",destination=/backup.tar.gz \ + debian:buster-slim \ + bash -c "rm -rf /snikket/*; tar xvf /backup.tar.gz -C /" +``` + +If I refresh the login page I can now log back in with my account and verify that everything is just the way I left it back on that other server: +![Welcome (back) home, John!](welcome_home_john.png) + +And I can open the Snikket client on my phone and get back to chatting - this migration was a success! + +[^11]: More on how I use Tailscale [here](/secure-networking-made-simple-with-tailscale/)! diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/new_invite_link.png b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/new_invite_link.png new file mode 100644 index 0000000..905d86d Binary files /dev/null and b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/new_invite_link.png differ diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket.png b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket.png new file mode 100644 index 0000000..79e482a Binary files /dev/null and b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket.png differ diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_admin_panel.png b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_admin_panel.png new file mode 100644 index 0000000..e077c93 Binary files /dev/null and b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_admin_panel.png differ diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_invite_page.png b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_invite_page.png new file mode 100644 index 0000000..540f0a5 Binary files /dev/null and b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_invite_page.png differ diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_login_page.png b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_login_page.png new file mode 100644 index 0000000..79dd704 Binary files /dev/null and b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/snikket_login_page.png differ diff --git a/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/welcome_home_john.png b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/welcome_home_john.png new file mode 100644 index 0000000..f2e71b7 Binary files /dev/null and b/content/post/snikket-private-xmpp-chat-on-oracle-cloud-free-tier/welcome_home_john.png differ diff --git a/content/post/tailscale-golink-private-shortlinks-tailnet/create_auth_key.png b/content/post/tailscale-golink-private-shortlinks-tailnet/create_auth_key.png new file mode 100644 index 0000000..6427b9d Binary files /dev/null and b/content/post/tailscale-golink-private-shortlinks-tailnet/create_auth_key.png differ diff --git a/content/post/tailscale-golink-private-shortlinks-tailnet/create_new_link.png b/content/post/tailscale-golink-private-shortlinks-tailnet/create_new_link.png new file mode 100644 index 0000000..44d7d82 Binary files /dev/null and b/content/post/tailscale-golink-private-shortlinks-tailnet/create_new_link.png differ diff --git a/content/post/tailscale-golink-private-shortlinks-tailnet/empty_go_page.png b/content/post/tailscale-golink-private-shortlinks-tailnet/empty_go_page.png new file mode 100644 index 0000000..33feff0 Binary files /dev/null and b/content/post/tailscale-golink-private-shortlinks-tailnet/empty_go_page.png differ diff --git a/content/post/tailscale-golink-private-shortlinks-tailnet/golinks.png b/content/post/tailscale-golink-private-shortlinks-tailnet/golinks.png new file mode 100644 index 0000000..b062503 Binary files /dev/null and b/content/post/tailscale-golink-private-shortlinks-tailnet/golinks.png differ diff --git a/content/post/tailscale-golink-private-shortlinks-tailnet/index.md b/content/post/tailscale-golink-private-shortlinks-tailnet/index.md new file mode 100644 index 0000000..e723cdd --- /dev/null +++ b/content/post/tailscale-golink-private-shortlinks-tailnet/index.md @@ -0,0 +1,155 @@ +--- +title: "Tailscale golink: Private Shortlinks for your Tailnet" # Title of the blog post. +date: 2023-02-12 +lastmod: 2023-02-13 +description: "How to deploy Tailscale's golink service in a Docker container." +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "golinks.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Projects # Projects, Scripts, vRA8, K8s on vSphere +tags: + - docker + - vpn + - tailscale + - wireguard + - containers +comment: true # Disable comment if false. +--- +I've shared in the past about how I use [custom search engines in Chrome](/abusing-chromes-custom-search-engines-for-fun-and-profit/) as quick web shortcuts. And I may have mentioned [my love for Tailscale](/tags/tailscale/) a time or two as well. Well I recently learned of a way to combine these two passions: [Tailscale golink](https://github.com/tailscale/golink). The [golink announcement post on the Tailscale blog](https://tailscale.com/blog/golink/) offers a great overview of the service: +> Using golink, you can create and share simple go/name links for commonly accessed websites, so that anyone in your network can access them no matter the device they’re on — without requiring browser extensions or fiddling with DNS settings. And because golink integrates with Tailscale, links are private to users in your tailnet without any separate user management, logins, or security policies. + +And these go links don't have to be simply static shortcuts either; they can also conditionally insert text into the target URL. That lets the shortcuts work similarly to my custom search engines in Chrome, but they are available on *any* device in my tailnet rather than just those that run Chrome. The shortcuts even work from command-line utilities like `curl`, provided that you pass a flag like `-L` to follow redirects. +![Moon weather report](moon_wx.png) + +Sounds great - but how do you actually make golink available on your tailnet? Well, here's what I did to deploy the [golink Docker image](https://github.com/tailscale/golink/pkgs/container/golink) on a [Photon OS VM I set up running on my Quartz64 running ESXi-ARM](/esxi-arm-on-quartz64/#workload-creation). + +### Tailnet prep +There are three things I'll need to do in the Tailscale admin portal before moving on: +#### Create an ACL tag +I assign ACL tags to devices in my tailnet based on their location and/or purpose, and I'm then able to use those in a policy to restrict access between certain devices. To that end, I'm going to create a new `tag:golink` tag for this purpose. Creating a new tag in Tailscale is really just going to the [Access Controls page of the admin console](https://login.tailscale.com/admin/acls) and editing the policy to specify a `tagOwner` who is permitted to assign the tag: +```text {hl_lines=[11]} + "groups": + "group:admins": ["john@example.com"], + }, + "tagOwners": { + "tag:home": ["group:admins"], + "tag:cloud": ["group:admins"], + "tag:client": ["group:admins"], + "tag:dns": ["group:admins"], + "tag:rsync": ["group:admins"], + "tag:funnel": ["group:admins"], + "tag:golink": ["group:admins"], + }, +``` + +#### Configure ACL access +This step is really only necessary since I've altered the default Tailscale ACL and prevent my nodes from communicating with each other unless specifically permitted. I want to make sure that everything on my tailnet can access golink: + +```text +"acls": [ + { + // make golink accessible to everything + "action": "accept", + "users": ["*"], + "ports": [ + "tag:golink:80", + ], + }, + ... + ], +``` + +#### Create an auth key +The last prerequisite task is to create a new authentication key that the golink container can use to log in to Tailscale since I won't be running `tailscale` interactively. This can easily be done from the [Settings page](https://login.tailscale.com/admin/settings/keys). I'll go ahead and set the key to expire in 1 day (since I'm going to use it in just a moment), make sure that the Ephemeral option is _disabled_ (since I don't want the new node to lose its authorization once it disconnects), and associate it with my new `tag:golink` tag. + +![Creating a new auth key](create_auth_key.png) + +Applying that tag does two things for me: (1) it makes it easy to manage access with the ACL policy file edited above, and (2) it automatically sets it so that the node's token won't automatically expire. Once it's auth'd and connected to my tailnet, it'll stay there. + +After clicking the **Generate key** button, the key will be displayed. This is the only time it will be visible so be sure to copy it somewhere safe! + + +### Docker setup +The [golink repo](https://github.com/tailscale/golink) offers this command for running the container: +```shell +docker run -it --rm ghcr.io/tailscale/golink:main +``` + +The doc also indicates that I can pass the auth key to the golink service via the `TS_AUTHKEY` environment variable, and that all the configuration will be stored in `/home/nonroot` (which will be owned by uid/gid `65532`). I'll take this knowledge and use it to craft a `docker-compose.yml` to simplify container management. + +```shell +mkdir -p golink/data +cd golink +chmod 65532:65532 data +vi docker-compose.yaml +``` + +```yaml +# golink docker-compose.yaml +version: '3' +services: + golink: + container_name: golink + restart: unless-stopped + image: ghcr.io/tailscale/golink:main + volumes: + - './data:/home/nonroot' + environment: + - TS_AUTHKEY=MY_TS_AUTHKEY +``` + +I can then start the container with `sudo docker-compose up -d`, and check the Tailscale admin console to see that the new machine was registered successfully: +![Newly registered machine](registered_machine.png) + +And I can point a web browser to `go/` and see the (currently-empty) landing page: +![Empty go page](empty_go_page.png) + +{{% notice tip "Security cleanup!" %}} +The `TS_AUTHKEY` is only needed for this initial authentication; now that the container is connected to my Tailnet I can remove that line from the `docker-compose.yaml` file to avoid having a sensitive credential hanging around. Future (re)starts of the container will use the token stored in the golink database. +{{% /notice %}} + +### Get go'ing +Getting started with golink is pretty simple - just enter a shortname and a destination: +![Creating a new link](create_new_link.png) + +So now when I enter `go/vcenter` it will automatically take me to the vCenter in my homelab. That's handy... but we can do better. You see, golink also supports Go template syntax, which allows it to behave a bit like those custom search engines I mentioned earlier. + +I can go to `go/.detail/LINK_NAME` to edit the link, so I hit up `go/.detail/vcenter` and add a bit to the target URL: +``` +https://vcsa.lab.bowdre.net/ui/{{with .Path}}app/search?query={{.}}&searchType=simple{{end}} +``` + +Now if I just enter `go/vcenter` I will go to the vSphere UI, while if I enter something like `go/vcenter/vm_name` I will instead be taken directly to the corresponding search results. + +Some of my other golinks: + +| Shortlink | Destination URL | Description | +| --- | --- | --- | +| `code` | `https://github.com/search?type=code&q=user:jbowdre{{with .Path}}+{{.}}{{end}}` | searches my code on Github | +| `ipam` | `https://ipam.lab.bowdre.net/{{with .Path}}tools/search/{{.}}{{end}}` | searches my lab phpIPAM instance | +| `pdb` | `https://www.protondb.com/{{with .Path}}search?q={{.}}{{end}}` | searches [protondb](https://www.protondb.com/), super-handy for checking game compatibility when [Tailscale is installed on a Steam Deck](https://tailscale.com/blog/steam-deck/) | +| `tailnet` | `https://login.tailscale.com/admin/machines?q={{.Path}}` | searches my Tailscale admin panel for a machine name | +| `vpot8` | `https://www.virtuallypotato.com/{{with .Path}}search?query={{.}}{{end}}` | searches this here site | +| `sho` | `https://www.shodan.io/{{with .Path}}search?query={{.}}{{end}}` | searches Shodan for interesting internet-connected systems | +| `tools` | `https://neeva.com/spaces/m_Bhx8tPfYQbOmaW1UHz-3a_xg3h2amlogo2GzgD` | shortcut to my [Tech Toolkit space](https://neeva.com/spaces/m_Bhx8tPfYQbOmaW1UHz-3a_xg3h2amlogo2GzgD) on Neeva | +| `randpass` | `https://www.random.org/passwords/?num=1\u0026len=24\u0026format=plain\u0026rnd=new` | generates a random 24-character string suitable for use as a password (`curl`-friendly) | +| `wx` | `https://wttr.in/{{ .Path }}` | local weather report based on geolocation or weather for a designated city (`curl`-friendly) | + +#### Back up and restore +You can browse to `go/.export` to see a JSON-formatted listing of all configured shortcuts - or, if you're clever, you could do something like `curl http://go/.export -o links.json` to download a copy. + +To restore, just pass `--snapshot /path/to/links.json` when starting golink. What I usually do is copy the file into the `data` folder that I'm mounting as a Docker volume, and then just run: +```shell +sudo docker exec golink /golink --sqlitedb /home/nonroot/golink.db --snapshot /home/nonroot/links.json +``` + +### Conclusion +This little golink utility has been pretty handy on my Tailnet so far. It seems so simple, but I'm really impressed by how well it works. If you happen to try it out, I'd love to hear how you're putting it to use. \ No newline at end of file diff --git a/content/post/tailscale-golink-private-shortlinks-tailnet/moon_wx.png b/content/post/tailscale-golink-private-shortlinks-tailnet/moon_wx.png new file mode 100644 index 0000000..760dbe1 Binary files /dev/null and b/content/post/tailscale-golink-private-shortlinks-tailnet/moon_wx.png differ diff --git a/content/post/tailscale-golink-private-shortlinks-tailnet/registered_machine.png b/content/post/tailscale-golink-private-shortlinks-tailnet/registered_machine.png new file mode 100644 index 0000000..d7b5dad Binary files /dev/null and b/content/post/tailscale-golink-private-shortlinks-tailnet/registered_machine.png differ diff --git a/content/post/using-vs-code-to-explore-giant-log-bundles/IL29_Shlg.png b/content/post/using-vs-code-to-explore-giant-log-bundles/IL29_Shlg.png new file mode 100644 index 0000000..585bff4 Binary files /dev/null and b/content/post/using-vs-code-to-explore-giant-log-bundles/IL29_Shlg.png differ diff --git a/content/post/using-vs-code-to-explore-giant-log-bundles/PPZu_UOGO.png b/content/post/using-vs-code-to-explore-giant-log-bundles/PPZu_UOGO.png new file mode 100644 index 0000000..f112306 Binary files /dev/null and b/content/post/using-vs-code-to-explore-giant-log-bundles/PPZu_UOGO.png differ diff --git a/content/post/using-vs-code-to-explore-giant-log-bundles/SBKtJ8K1p.png b/content/post/using-vs-code-to-explore-giant-log-bundles/SBKtJ8K1p.png new file mode 100644 index 0000000..a592c10 Binary files /dev/null and b/content/post/using-vs-code-to-explore-giant-log-bundles/SBKtJ8K1p.png differ diff --git a/content/post/using-vs-code-to-explore-giant-log-bundles/index.md b/content/post/using-vs-code-to-explore-giant-log-bundles/index.md new file mode 100644 index 0000000..2c7d3c9 --- /dev/null +++ b/content/post/using-vs-code-to-explore-giant-log-bundles/index.md @@ -0,0 +1,32 @@ +--- +series: Tips +date: "2021-02-18T08:34:30Z" +thumbnail: PPZu_UOGO.png +usePageBundles: true +tags: +- logs +- vmware +title: Using VS Code to explore giant log bundles +toc: false +--- + +I recently ran into a peculiar issue after upgrading my vRealize Automation homelab to the new 8.3 release, and the error message displayed in the UI didn't give me a whole lot of information to work with: +![Unfortunately my 'Essential Googling The Error Message' O'RLY book was no help with making the bad words go away](IL29_Shlg.png) + +I connected to the vRA appliance to try to find the relevant log excerpt, but [doing so isn't all that straightforward](https://www.stevenbright.com/2020/01/vmware-vrealize-automation-8-0-logs/#:~:text=Access%20Logs%20from%20the%20CLI) given the containerized nature of the services. +So instead I used the `vracli log-bundle` command to generate a bundle of all relevant logs, and I then transferred the resulting (2.2GB!) `log-bundle.tar` to my workstation for further investigation. I expanded the tar and ran `tree -P '*.log'` to get a quick idea of what I've got to deal with: +![That's a lot of logs!](wAa9KjBHO.png) +Ugh. Even if I knew which logs I wanted to look at (and I don't) it would take ages to dig through all of this. There's got to be a better way. + +And there is! Visual Studio Code lets you open an entire directory tree in the editor: +![Directory opened in VS Code](SBKtJ8K1p.png) + +You can then "Find in Files" with `Ctrl`+`Shift`+`F`, and VS Code will *very* quickly search through all the files to find what you're looking for: +![Searching all files](PPZu_UOGO.png) + +You can also click the "Open in editor" link at the top of the search results to open the matching snippets in a single view: +![All the matching strings together](kJ_l7gPD2.png) + +Adjusting the number at the far top right of that view will dynamically tweak how many context lines are included with each line containing the search term. + +In this case, the logs didn't actually tell me what was going wrong - but I felt much better for having explored them! Maybe this little trick will help you track down what's ailing you. \ No newline at end of file diff --git a/content/post/using-vs-code-to-explore-giant-log-bundles/kJ_l7gPD2.png b/content/post/using-vs-code-to-explore-giant-log-bundles/kJ_l7gPD2.png new file mode 100644 index 0000000..7460851 Binary files /dev/null and b/content/post/using-vs-code-to-explore-giant-log-bundles/kJ_l7gPD2.png differ diff --git a/content/post/using-vs-code-to-explore-giant-log-bundles/wAa9KjBHO.png b/content/post/using-vs-code-to-explore-giant-log-bundles/wAa9KjBHO.png new file mode 100644 index 0000000..bae4483 Binary files /dev/null and b/content/post/using-vs-code-to-explore-giant-log-bundles/wAa9KjBHO.png differ diff --git a/content/simplex.md b/content/simplex.md new file mode 100644 index 0000000..1dfc46e --- /dev/null +++ b/content/simplex.md @@ -0,0 +1,32 @@ ++++ +comments = false +toc = false +usePageBundles = false +showDate = false +showShare = false +showReadTime = false +timeless = true ++++ +*You can **[contact me on SimpleX Chat](https://simplex.chat/contact/#/?v=1-2&smp=smp%3A%2F%2FkYx5LmVD9FMM8hJN4BQqL4WmeUNZn8ipXsX2UkBoiHE%3D%40smp.vpota.to%2FFLy56WLZ79Xda3gW0BjUWDotP6uaparF%23%2F%3Fv%3D1-2%26dh%3DMCowBQYDK2VuAyEAZTkRAbrxefYZbb5Qypb9BXfuN0X0tzSPEv682DkNcn0%253D)** by clicking that link or scanning the QR code below.* + +![](/images/simplex-invite.png) + +[SimpleX Chat](https://simplex.chat/) is a secure messaging solution with a strong emphasis on user privacy. It's (naturally) end-to-end encrypted, doesn't require (or collect) *any* information about you in order to sign up, doesn't use any persistent user identifiers (not even a randomly-generated one), is fully decentralized, and is *not* affiliated with any cryptocurrency project/scam. + +Incoming messages are routed through a pool of servers so that your conversations don't all follow the same path - and no server knows anything about conversations that aren't routed through it. Servers only hold your messages long enough to ensure they get to you, and those messages exist only in the encrypted database on your device once they've been delivered. (Fortunately, SimpleX makes it easy to back up that database and restore it on a new device so you don't lose any messages or contacts.) + +The app is also packed with other features like disappearing messages, encrypted file transfers, encrypted voice messages, encrypted audio and video calls, decentralized private groups, and a cool incognito mode which connects new conversations to a randomly-generated profile instead of your primary one. There's even a [CLI client](https://github.com/simplex-chat/simplex-chat/blob/stable/docs/CLI.md)! + +## Servers +You can easily host your own [simplexmq server](https://github.com/simplex-chat/simplexmq) for handling your inbound message queue, and I've done just that; in fact, I've deployed three! And, as one of my closest internet friends, *you're welcome to use them as well.* + +Just add these in the SimpleX app at **Settings > Network & servers > SMP servers > + Add server...**. Enable the option to use them for new connections, and they'll be added to the pool used for incoming messages in new conversations. If you want to use them immediately for existing conversations, go into each conversation's options menu and use the **Switch receiving address** option. You can also *disable* the option to use the default servers for new conversations if you only want messages to be routed through specific servers, but that does increase the likelikhood of concurrent conversations being routed the same way. More servers, more path options, less metadata in any one place. + +![](/images/smp-vpota-to.png) +`smp://kYx5LmVD9FMM8hJN4BQqL4WmeUNZn8ipXsX2UkBoiHE=@smp.vpota.to` + +![](/images/smp1-vpota-to.png) +`smp://TbUrGydawdVKID0Lvix14UkaN-WarFgqXx4kaEG8Trw=@smp1.vpota.to` + +![](/images/smp2-vpota-to.png) +`smp://tNfQisxTQ9MhKpFDTbx9RnjgWigtxF1a26jroy5-rR4=@smp2.vpota.to` diff --git a/layouts/.gitkeep b/layouts/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/static/.gitignore b/static/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/static/css/logo.css b/static/css/logo.css new file mode 100644 index 0000000..56006ef --- /dev/null +++ b/static/css/logo.css @@ -0,0 +1,37 @@ +.page__logo { + padding: 0; + margin: 0; + font-weight: inherit; + color: var(--off-fg); +} + +.page__logo:before { + content: none; +} + +.page__logo-inner { + display: block; + background: var(--logo); + opacity: 0.80; + padding: 0.25rem; +} + +a.page__logo-inner:link, a.page__logo-inner:visited { + color: inherit; + text-decoration: inherit; +} + +a.page__logo-inner:hover, +a.page__logo-inner:active { + opacity: 1; +} + +.page__logo-inner:before { + content: "["; + color: var(--bg); +} + +.page__logo-inner:after { + content: "] $"; + color: var(--bg); +} diff --git a/static/css/palettes/runtimeterror.css b/static/css/palettes/runtimeterror.css new file mode 100644 index 0000000..4558c4e --- /dev/null +++ b/static/css/palettes/runtimeterror.css @@ -0,0 +1,21 @@ +/* base16 runtimeterror + */ + +:root { + --base00: #181818; /* background */ + --base01: #282828; /* alt background */ + --base02: #383838; /* in-text backgrounds */ + --base03: #585858; /* muted text */ + --base04: #959494; /* alt foreground */ + --base05: #d8d8d8; /* foreground */ + --base06: #e8e8e8; + --base07: #f8f8f8; + --base08: #ab4642; + --base09: #dc9656; + --base0A: #f7ca88; /* highlights */ + --base0B: #772a28; /* primary accent */ + --base0C: #661514; /* active links */ + --base0D: #c45a5a; /* links */ + --base0E: #ba8baf; + --base0F: #a16946; +} diff --git a/static/images/broken-computer.svg b/static/images/broken-computer.svg new file mode 100644 index 0000000..cbe1c5f --- /dev/null +++ b/static/images/broken-computer.svg @@ -0,0 +1,13 @@ + + + + + + \ No newline at end of file diff --git a/static/images/simplex-invite.png b/static/images/simplex-invite.png new file mode 100644 index 0000000..a07745e Binary files /dev/null and b/static/images/simplex-invite.png differ diff --git a/static/images/smp-vpota-to.png b/static/images/smp-vpota-to.png new file mode 100644 index 0000000..9a21455 Binary files /dev/null and b/static/images/smp-vpota-to.png differ diff --git a/static/images/smp1-vpota-to.png b/static/images/smp1-vpota-to.png new file mode 100644 index 0000000..db66545 Binary files /dev/null and b/static/images/smp1-vpota-to.png differ diff --git a/static/images/smp2-vpota-to.png b/static/images/smp2-vpota-to.png new file mode 100644 index 0000000..8ba52c4 Binary files /dev/null and b/static/images/smp2-vpota-to.png differ diff --git a/themes/hugo-cloak-email b/themes/hugo-cloak-email new file mode 160000 index 0000000..19e0284 --- /dev/null +++ b/themes/hugo-cloak-email @@ -0,0 +1 @@ +Subproject commit 19e0284810127d1d56e2d813a311b0bd73bba96c diff --git a/themes/hugo-notice b/themes/hugo-notice new file mode 160000 index 0000000..cce7afd --- /dev/null +++ b/themes/hugo-notice @@ -0,0 +1 @@ +Subproject commit cce7afd430d49fe01ad6e2c006c3518fddc8ad77 diff --git a/themes/risotto b/themes/risotto new file mode 160000 index 0000000..21fdc87 --- /dev/null +++ b/themes/risotto @@ -0,0 +1 @@ +Subproject commit 21fdc87b56e662133c9bba6ee96940ed8f5be6a6