mirror of
https://github.com/jbowdre/runtimeterror.git
synced 2024-11-22 06:52:18 +00:00
Merge branch 'main' into drafts
This commit is contained in:
commit
3b8dd67877
76 changed files with 3091 additions and 1512 deletions
8
.gitignore
vendored
8
.gitignore
vendored
|
@ -1,2 +1,8 @@
|
||||||
.hugo_build.lock
|
.hugo_build.lock
|
||||||
resources/
|
/node_modules/
|
||||||
|
/package-lock.json
|
||||||
|
/package.json
|
||||||
|
/public/
|
||||||
|
/resources/
|
||||||
|
/.env
|
||||||
|
|
||||||
|
|
72
assets/css/code-copy-button.css
Normal file
72
assets/css/code-copy-button.css
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
/* adapted from https://digitaldrummerj.me/hugo-add-copy-code-snippet-button/ */
|
||||||
|
|
||||||
|
.highlight {
|
||||||
|
position: relative;
|
||||||
|
z-index: 0;
|
||||||
|
padding: 0;
|
||||||
|
margin:40px 0 10px 0;
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.copy-code-button {
|
||||||
|
position: absolute;
|
||||||
|
z-index: -1;
|
||||||
|
right: 0px;
|
||||||
|
top: -26px;
|
||||||
|
font-size: 13px;
|
||||||
|
font-weight: 700;
|
||||||
|
line-height: 14px;
|
||||||
|
letter-spacing: 0.5px;
|
||||||
|
width: 65px;
|
||||||
|
color: var(--fg);
|
||||||
|
background-color: var(--bg);
|
||||||
|
border: 1.25px solid var(--off-bg);
|
||||||
|
border-top-left-radius: 4px;
|
||||||
|
border-top-right-radius: 4px;
|
||||||
|
border-bottom-right-radius: 0px;
|
||||||
|
border-bottom-left-radius: 0px;
|
||||||
|
white-space: nowrap;
|
||||||
|
padding: 6px 6px 7px 6px;
|
||||||
|
margin: 0 0 0 1px;
|
||||||
|
cursor: pointer;
|
||||||
|
opacity: 0.6;
|
||||||
|
}
|
||||||
|
|
||||||
|
.copy-code-button:hover,
|
||||||
|
.copy-code-button:focus,
|
||||||
|
.copy-code-button:active,
|
||||||
|
.copy-code-button:active:hover {
|
||||||
|
color: var(--off-bg);
|
||||||
|
background-color: var(--off-fg);
|
||||||
|
opacity: 0.8;
|
||||||
|
}
|
||||||
|
|
||||||
|
.copyable-text-area {
|
||||||
|
position: absolute;
|
||||||
|
height: 0;
|
||||||
|
z-index: -1;
|
||||||
|
opacity: .01;
|
||||||
|
}
|
||||||
|
|
||||||
|
.torchlight [data-lang]:before {
|
||||||
|
position: absolute;
|
||||||
|
z-index: -1;
|
||||||
|
top: -26px;
|
||||||
|
left: 0px;
|
||||||
|
content: attr(data-lang);
|
||||||
|
font-size: 13px;
|
||||||
|
font-weight: 700;
|
||||||
|
color: var(--fg);
|
||||||
|
background-color: var(--bg);
|
||||||
|
border-top-left-radius: 4px;
|
||||||
|
border-top-right-radius: 4px;
|
||||||
|
border-bottom-left-radius: 0;
|
||||||
|
border-bottom-right-radius: 0;
|
||||||
|
padding: 6px 6px 7px 6px;
|
||||||
|
line-height: 14px;
|
||||||
|
opacity: 0.6;
|
||||||
|
position: absolute;
|
||||||
|
letter-spacing: 0.5px;
|
||||||
|
border: 1.25px solid var(--off-bg);
|
||||||
|
margin: 0 0 0 1px;
|
||||||
|
}
|
147
assets/css/torchlight.css
Normal file
147
assets/css/torchlight.css
Normal file
|
@ -0,0 +1,147 @@
|
||||||
|
/*********************************************
|
||||||
|
* Basic styling for Torchlight code blocks. *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Margin and rounding are personal preferences,
|
||||||
|
overflow-x-auto is recommended.
|
||||||
|
*/
|
||||||
|
pre {
|
||||||
|
border-radius: 0.25rem;
|
||||||
|
margin-top: 1rem;
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
overflow-x: auto;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Add some vertical padding and expand the width
|
||||||
|
to fill its container. The horizontal padding
|
||||||
|
comes at the line level so that background
|
||||||
|
colors extend edge to edge.
|
||||||
|
*/
|
||||||
|
pre.torchlight {
|
||||||
|
display: block;
|
||||||
|
padding-top: 1rem;
|
||||||
|
padding-bottom: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Horizontal line padding to match the vertical
|
||||||
|
padding from the code block above.
|
||||||
|
*/
|
||||||
|
pre.torchlight .line {
|
||||||
|
padding-left: 1rem;
|
||||||
|
padding-right: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Push the code away from the line numbers and
|
||||||
|
summary caret indicators.
|
||||||
|
*/
|
||||||
|
pre.torchlight .line-number,
|
||||||
|
pre.torchlight .summary-caret {
|
||||||
|
margin-right: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Focus styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Blur and dim the lines that don't have the `.line-focus` class,
|
||||||
|
but are within a code block that contains any focus lines.
|
||||||
|
*/
|
||||||
|
.torchlight.has-focus-lines .line:not(.line-focus) {
|
||||||
|
transition: filter 0.35s, opacity 0.35s;
|
||||||
|
filter: blur(.095rem);
|
||||||
|
opacity: .65;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
When the code block is hovered, bring all the lines into focus.
|
||||||
|
*/
|
||||||
|
.torchlight.has-focus-lines:hover .line:not(.line-focus) {
|
||||||
|
filter: blur(0px);
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Collapse styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
.torchlight summary:focus {
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Hide the default markers, as we provide our own */
|
||||||
|
.torchlight details > summary::marker,
|
||||||
|
.torchlight details > summary::-webkit-details-marker {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.torchlight details .summary-caret::after {
|
||||||
|
pointer-events: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add spaces to keep everything aligned */
|
||||||
|
.torchlight .summary-caret-empty::after,
|
||||||
|
.torchlight details .summary-caret-middle::after,
|
||||||
|
.torchlight details .summary-caret-end::after {
|
||||||
|
content: " ";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Show a minus sign when the block is open. */
|
||||||
|
.torchlight details[open] .summary-caret-start::after {
|
||||||
|
content: "-";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* And a plus sign when the block is closed. */
|
||||||
|
.torchlight details:not([open]) .summary-caret-start::after {
|
||||||
|
content: "+";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Hide the [...] indicator when open. */
|
||||||
|
.torchlight details[open] .summary-hide-when-open {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Show the [...] indicator when closed. */
|
||||||
|
.torchlight details:not([open]) .summary-hide-when-open {
|
||||||
|
display: initial;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Additional styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/* Fix for disjointed horizontal scrollbars */
|
||||||
|
.highlight div {
|
||||||
|
overflow-x: visible;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Insert prompt indicators on interactive shells.
|
||||||
|
*/
|
||||||
|
.cmd::before {
|
||||||
|
color: var(--base07);
|
||||||
|
content: "$ ";
|
||||||
|
}
|
||||||
|
|
||||||
|
.cmd_root::before {
|
||||||
|
color: var(--base08);
|
||||||
|
content: "# ";
|
||||||
|
}
|
||||||
|
|
||||||
|
.cmd_pwsh::before {
|
||||||
|
color: var(--base07);
|
||||||
|
content: "PS> ";
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Don't copy shell outputs
|
||||||
|
*/
|
||||||
|
.nocopy {
|
||||||
|
webkit-user-select: none;
|
||||||
|
user-select: none;
|
||||||
|
}
|
56
assets/js/code-copy-button.js
Normal file
56
assets/js/code-copy-button.js
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
// adapted from https://digitaldrummerj.me/hugo-add-copy-code-snippet-button/
|
||||||
|
|
||||||
|
function createCopyButton(highlightDiv) {
|
||||||
|
const button = document.createElement("button");
|
||||||
|
button.className = "copy-code-button";
|
||||||
|
button.type = "button";
|
||||||
|
button.innerText = "Copy";
|
||||||
|
button.addEventListener("click", () => copyCodeToClipboard(button, highlightDiv));
|
||||||
|
highlightDiv.insertBefore(button, highlightDiv.firstChild);
|
||||||
|
const wrapper = document.createElement("div");
|
||||||
|
wrapper.className = "highlight-wrapper";
|
||||||
|
highlightDiv.parentNode.insertBefore(wrapper, highlightDiv);
|
||||||
|
wrapper.appendChild(highlightDiv);
|
||||||
|
}
|
||||||
|
|
||||||
|
document.querySelectorAll(".highlight").forEach((highlightDiv) => createCopyButton(highlightDiv));
|
||||||
|
|
||||||
|
async function copyCodeToClipboard(button, highlightDiv) {
|
||||||
|
// capture all code lines in the selected block which aren't classed `nocopy` or `line-remove`
|
||||||
|
let codeToCopy = highlightDiv.querySelectorAll(":last-child > .torchlight > code > .line:not(.nocopy, .line-remove)");
|
||||||
|
// now remove the first-child of each line with class `line-number`
|
||||||
|
codeToCopy = Array.from(codeToCopy).reduce((accumulator, line) => {
|
||||||
|
if (line.firstChild.className != "line-number") {
|
||||||
|
return accumulator + line.innerText + "\n"; }
|
||||||
|
else {
|
||||||
|
return accumulator + Array.from(line.children).filter(
|
||||||
|
(child) => child.className != "line-number").reduce(
|
||||||
|
(accumulator, child) => accumulator + child.innerText, "") + "\n";
|
||||||
|
}
|
||||||
|
}, "");
|
||||||
|
try {
|
||||||
|
var result = await navigator.permissions.query({ name: "clipboard-write" });
|
||||||
|
if (result.state == "granted" || result.state == "prompt") {
|
||||||
|
await navigator.clipboard.writeText(codeToCopy);
|
||||||
|
} else {
|
||||||
|
button.blur();
|
||||||
|
button.innerText = "Error!";
|
||||||
|
setTimeout(function () {
|
||||||
|
button.innerText = "Copy";
|
||||||
|
}, 2000);
|
||||||
|
}
|
||||||
|
} catch (_) {
|
||||||
|
button.blur();
|
||||||
|
button.innerText = "Error!";
|
||||||
|
setTimeout(function () {
|
||||||
|
button.innerText = "Copy";
|
||||||
|
}, 2000);
|
||||||
|
} finally {
|
||||||
|
button.blur();
|
||||||
|
button.innerText = "Copied!";
|
||||||
|
setTimeout(function () {
|
||||||
|
button.innerText = "Copy";
|
||||||
|
}, 2000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
7
build.sh
Executable file
7
build.sh
Executable file
|
@ -0,0 +1,7 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Quick script to run local builds
|
||||||
|
source .env
|
||||||
|
hugo --minify --environment local -D
|
||||||
|
npx torchlight
|
||||||
|
python3 -m http.server --directory public 1313
|
||||||
|
|
|
@ -2,7 +2,7 @@ baseURL = "https://runtimeterror.dev"
|
||||||
theme = "risotto"
|
theme = "risotto"
|
||||||
title = "runtimeterror"
|
title = "runtimeterror"
|
||||||
copyright = "© 2018-2023 [runtimeterror](https://runtimeterror.dev)"
|
copyright = "© 2018-2023 [runtimeterror](https://runtimeterror.dev)"
|
||||||
paginate = 3
|
paginate = 10
|
||||||
languageCode = "en"
|
languageCode = "en"
|
||||||
DefaultContentLanguage = "en"
|
DefaultContentLanguage = "en"
|
||||||
enableInlineShortcodes = true
|
enableInlineShortcodes = true
|
||||||
|
|
|
@ -6,18 +6,6 @@
|
||||||
[goldmark.extensions]
|
[goldmark.extensions]
|
||||||
typographer = false
|
typographer = false
|
||||||
|
|
||||||
[highlight]
|
|
||||||
anchorLineNos = true
|
|
||||||
codeFences = true
|
|
||||||
guessSyntax = true
|
|
||||||
hl_Lines = ''
|
|
||||||
lineAnchors = ''
|
|
||||||
lineNos = true
|
|
||||||
lineNoStart = 1
|
|
||||||
lineNumbersInTable = false
|
|
||||||
noClasses = true
|
|
||||||
tabwidth = 2
|
|
||||||
|
|
||||||
# Table of contents
|
# Table of contents
|
||||||
# Add toc = true to content front matter to enable
|
# Add toc = true to content front matter to enable
|
||||||
[tableOfContents]
|
[tableOfContents]
|
||||||
|
|
|
@ -15,6 +15,8 @@ utterancesRepo = "jbowdre/site-comments"
|
||||||
utterancesIssueTerm = "og:title"
|
utterancesIssueTerm = "og:title"
|
||||||
utterancesTheme = "gruvbox-dark"
|
utterancesTheme = "gruvbox-dark"
|
||||||
|
|
||||||
|
analytics = true
|
||||||
|
|
||||||
[theme]
|
[theme]
|
||||||
palette = "runtimeterror"
|
palette = "runtimeterror"
|
||||||
|
|
||||||
|
@ -94,6 +96,7 @@ taglines = [
|
||||||
"the bug stops here",
|
"the bug stops here",
|
||||||
"the system is down",
|
"the system is down",
|
||||||
"there's no place like $HOME",
|
"there's no place like $HOME",
|
||||||
|
"time jumped backwards, rotating",
|
||||||
"tonight we test in prod",
|
"tonight we test in prod",
|
||||||
"unable to open display",
|
"unable to open display",
|
||||||
"undefined reference to function",
|
"undefined reference to function",
|
||||||
|
@ -133,3 +136,23 @@ url = "https://runtimeterror.dev/simplex"
|
||||||
icon = "fa-solid fa-envelope"
|
icon = "fa-solid fa-envelope"
|
||||||
title = "Email"
|
title = "Email"
|
||||||
url = "mailto:ops@runtimeterror.dev"
|
url = "mailto:ops@runtimeterror.dev"
|
||||||
|
|
||||||
|
[[powerLinks]]
|
||||||
|
title = "hugo"
|
||||||
|
url = "https://gohugo.io"
|
||||||
|
|
||||||
|
[[powerLinks]]
|
||||||
|
title = "netlify"
|
||||||
|
url = "https://www.netlify.com"
|
||||||
|
|
||||||
|
[[powerLinks]]
|
||||||
|
title = "risotto"
|
||||||
|
url = "https://github.com/joeroe/risotto"
|
||||||
|
|
||||||
|
[[powerLinks]]
|
||||||
|
title = "torchlight"
|
||||||
|
url = "https://torchlight.dev"
|
||||||
|
|
||||||
|
[[powerLinks]]
|
||||||
|
title = "cabin"
|
||||||
|
url = "https://withcabin.com/privacy/runtimeterror.dev"
|
1
config/local/hugo.toml
Normal file
1
config/local/hugo.toml
Normal file
|
@ -0,0 +1 @@
|
||||||
|
baseURL = "http://localhost:1313/"
|
2
config/local/params.toml
Normal file
2
config/local/params.toml
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
comments = false
|
||||||
|
analytics = false
|
1
config/preview/hugo.toml
Normal file
1
config/preview/hugo.toml
Normal file
|
@ -0,0 +1 @@
|
||||||
|
baseURL = "https://preview--runtimeterrordev.netlify.app"
|
2
config/preview/params.toml
Normal file
2
config/preview/params.toml
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
comments = false
|
||||||
|
analytics = false
|
|
@ -13,30 +13,30 @@ title: 3D Modeling and Printing on Chrome OS
|
||||||
|
|
||||||
I've got an Ender 3 Pro 3D printer, a Raspberry Pi 4, and a Pixel Slate. I can't interface directly with the printer over USB from the Slate (plus having to be physically connected to things is like so lame) so I installed [Octoprint on the Raspberry Pi](https://github.com/guysoft/OctoPi) and connected that to the printer's USB interface. This gave me a pretty web interface for controlling the printer - but it's only accessible over the local network. I also installed [The Spaghetti Detective](https://www.thespaghettidetective.com/) to allow secure remote control of the printer, with the added bonus of using AI magic and a cheap camera to detect and abort failing prints.
|
I've got an Ender 3 Pro 3D printer, a Raspberry Pi 4, and a Pixel Slate. I can't interface directly with the printer over USB from the Slate (plus having to be physically connected to things is like so lame) so I installed [Octoprint on the Raspberry Pi](https://github.com/guysoft/OctoPi) and connected that to the printer's USB interface. This gave me a pretty web interface for controlling the printer - but it's only accessible over the local network. I also installed [The Spaghetti Detective](https://www.thespaghettidetective.com/) to allow secure remote control of the printer, with the added bonus of using AI magic and a cheap camera to detect and abort failing prints.
|
||||||
|
|
||||||
That's a pretty sweet setup, but I still needed a way to convert STL 3D models into GCODE files which the printer can actually understand. And what if I want to create my own designs?
|
That's a pretty sweet setup, but I still needed a way to convert STL 3D models into GCODE files which the printer can actually understand. And what if I want to create my own designs?
|
||||||
|
|
||||||
Enter "Crostini," Chrome OS's [Linux (Beta) feature](https://chromium.googlesource.com/chromiumos/docs/+/master/containers_and_vms.md). It consists of a hardened Linux VM named `termina` which runs (by default) a Debian Buster LXD container named `penguin` (though you can spin up just about any container for which you can find an [image](https://us.images.linuxcontainers.org/)) and some fancy plumbing to let Chrome OS and Linux interact in specific clearly-defined ways. It's a brilliant balance between offering the flexibility of Linux while preserving Chrome OS's industry-leading security posture.
|
Enter "Crostini," Chrome OS's [Linux (Beta) feature](https://chromium.googlesource.com/chromiumos/docs/+/master/containers_and_vms.md). It consists of a hardened Linux VM named `termina` which runs (by default) a Debian Buster LXD container named `penguin` (though you can spin up just about any container for which you can find an [image](https://us.images.linuxcontainers.org/)) and some fancy plumbing to let Chrome OS and Linux interact in specific clearly-defined ways. It's a brilliant balance between offering the flexibility of Linux while preserving Chrome OS's industry-leading security posture.
|
||||||
|
|
||||||
|
|
||||||
![Neofetch in the Crostini terminal](lhTnVwCO3.png)
|
![Neofetch in the Crostini terminal](lhTnVwCO3.png)
|
||||||
|
|
||||||
There are plenty of great guides (like [this one](https://www.computerworld.com/article/3314739/linux-apps-on-chrome-os-an-easy-to-follow-guide.html)) on how to get started with Linux on Chrome OS so I won't rehash those steps here.
|
There are plenty of great guides (like [this one](https://www.computerworld.com/article/3314739/linux-apps-on-chrome-os-an-easy-to-follow-guide.html)) on how to get started with Linux on Chrome OS so I won't rehash those steps here.
|
||||||
|
|
||||||
One additional step you will probably want to take is make sure that your Chromebook is configured to enable hyperthreading, as it may have [hyperthreading disabled by default](https://support.google.com/chromebook/answer/9340236). Just plug `chrome://flags/#scheduler-configuration` into Chrome's address bar, set it to `Enables Hyper-Threading on relevant CPUs`, and then click the button to restart your Chromebook. You'll thank me later.
|
One additional step you will probably want to take is make sure that your Chromebook is configured to enable hyperthreading, as it may have [hyperthreading disabled by default](https://support.google.com/chromebook/answer/9340236). Just plug `chrome://flags/#scheduler-configuration` into Chrome's address bar, set it to `Enables Hyper-Threading on relevant CPUs`, and then click the button to restart your Chromebook. You'll thank me later.
|
||||||
![Enabling hyperthreading](LHax6lAwh.png)
|
![Enabling hyperthreading](LHax6lAwh.png)
|
||||||
|
|
||||||
### The Software
|
### The Software
|
||||||
I settled on using [FreeCAD](https://www.freecadweb.org/) for parametric modeling and [Ultimaker Cura](https://ultimaker.com/software/ultimaker-cura) for my GCODE slicer, but unfortunately getting them working cleanly wasn't entirely straightforward.
|
I settled on using [FreeCAD](https://www.freecadweb.org/) for parametric modeling and [Ultimaker Cura](https://ultimaker.com/software/ultimaker-cura) for my GCODE slicer, but unfortunately getting them working cleanly wasn't entirely straightforward.
|
||||||
|
|
||||||
#### FreeCAD
|
#### FreeCAD
|
||||||
Installing FreeCAD is as easy as:
|
Installing FreeCAD is as easy as:
|
||||||
```shell
|
```shell
|
||||||
$ sudo apt update
|
sudo apt update # [tl! .cmd:2]
|
||||||
$ sudo apt install freecad
|
sudo apt install freecad
|
||||||
```
|
```
|
||||||
But launching `/usr/bin/freecad` caused me some weird graphical defects which rendered the application unusable. I found that I needed to pass the `LIBGL_DRI3_DISABLE=1` environment variable to eliminate these glitches:
|
But launching `/usr/bin/freecad` caused me some weird graphical defects which rendered the application unusable. I found that I needed to pass the `LIBGL_DRI3_DISABLE=1` environment variable to eliminate these glitches:
|
||||||
```shell
|
```shell
|
||||||
$ env 'LIBGL_DRI3_DISABLE=1' /usr/bin/freecad &
|
env 'LIBGL_DRI3_DISABLE=1' /usr/bin/freecad & # [tl! .cmd]
|
||||||
```
|
```
|
||||||
To avoid having to type that every time I wished to launch the app, I inserted this line at the bottom of my `~/.bashrc` file:
|
To avoid having to type that every time I wished to launch the app, I inserted this line at the bottom of my `~/.bashrc` file:
|
||||||
```shell
|
```shell
|
||||||
|
@ -44,7 +44,10 @@ alias freecad="env 'LIBGL_DRI3_DISABLE=1' /usr/bin/freecad &"
|
||||||
```
|
```
|
||||||
To be able to start FreeCAD from the Chrome OS launcher with that environment variable intact, edit it into the `Exec` line of the `/usr/share/applications/freecad.desktop` file:
|
To be able to start FreeCAD from the Chrome OS launcher with that environment variable intact, edit it into the `Exec` line of the `/usr/share/applications/freecad.desktop` file:
|
||||||
```shell
|
```shell
|
||||||
$ sudo vi /usr/share/applications/freecad.desktop
|
sudo vi /usr/share/applications/freecad.desktop # [tl! .cmd]
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini
|
||||||
[Desktop Entry]
|
[Desktop Entry]
|
||||||
Version=1.0
|
Version=1.0
|
||||||
Name=FreeCAD
|
Name=FreeCAD
|
||||||
|
@ -53,7 +56,7 @@ Comment=Feature based Parametric Modeler
|
||||||
Comment[de]=Feature-basierter parametrischer Modellierer
|
Comment[de]=Feature-basierter parametrischer Modellierer
|
||||||
GenericName=CAD Application
|
GenericName=CAD Application
|
||||||
GenericName[de]=CAD-Anwendung
|
GenericName[de]=CAD-Anwendung
|
||||||
Exec=env LIBGL_DRI3_DISABLE=1 /usr/bin/freecad %F
|
Exec=env LIBGL_DRI3_DISABLE=1 /usr/bin/freecad %F # [tl! focus]
|
||||||
Path=/usr/lib/freecad
|
Path=/usr/lib/freecad
|
||||||
Terminal=false
|
Terminal=false
|
||||||
Type=Application
|
Type=Application
|
||||||
|
@ -64,32 +67,32 @@ GenericName[de_DE]=Feature-basierter parametrischer Modellierer
|
||||||
Comment[de_DE]=Feature-basierter parametrischer Modellierer
|
Comment[de_DE]=Feature-basierter parametrischer Modellierer
|
||||||
MimeType=application/x-extension-fcstd
|
MimeType=application/x-extension-fcstd
|
||||||
```
|
```
|
||||||
That's it! Get on with your 3D-modeling bad self.
|
That's it! Get on with your 3D-modeling bad self.
|
||||||
![FreeCAD](qDTXt1jp3.png)
|
![FreeCAD](qDTXt1jp3.png)
|
||||||
Now that you've got a model, be sure to [export it as an STL mesh](https://wiki.freecadweb.org/Export_to_STL_or_OBJ) so you can import it into your slicer.
|
Now that you've got a model, be sure to [export it as an STL mesh](https://wiki.freecadweb.org/Export_to_STL_or_OBJ) so you can import it into your slicer.
|
||||||
|
|
||||||
#### Ultimaker Cura
|
#### Ultimaker Cura
|
||||||
Cura isn't available from the default repos so you'll need to download the AppImage from https://github.com/Ultimaker/Cura/releases/tag/4.7.1. You can do this in Chrome and then use the built-in File app to move the file into your 'My Files > Linux Files' directory. Feel free to put it in a subfolder if you want to keep things organized - I stash all my AppImages in `~/Applications/`.
|
Cura isn't available from the default repos so you'll need to download the AppImage from https://github.com/Ultimaker/Cura/releases/tag/4.7.1. You can do this in Chrome and then use the built-in File app to move the file into your 'My Files > Linux Files' directory. Feel free to put it in a subfolder if you want to keep things organized - I stash all my AppImages in `~/Applications/`.
|
||||||
|
|
||||||
To be able to actually execute the AppImage you'll need to adjust the permissions with 'chmod +x':
|
To be able to actually execute the AppImage you'll need to adjust the permissions with 'chmod +x':
|
||||||
```shell
|
```shell
|
||||||
$ chmod +x ~/Applications/Ultimaker_Cura-4.7.1.AppImage
|
chmod +x ~/Applications/Ultimaker_Cura-4.7.1.AppImage # [tl! .cmd]
|
||||||
```
|
```
|
||||||
You can then start up the app by calling the file directly:
|
You can then start up the app by calling the file directly:
|
||||||
```shell
|
```shell
|
||||||
$ ~/Applications/Ultimaker_Cura-4.7.1.AppImage &
|
~/Applications/Ultimaker_Cura-4.7.1.AppImage & # [tl! .cmd]
|
||||||
```
|
```
|
||||||
AppImages don't automatically appear in the Chrome OS launcher so you'll need to create its `.desktop` file. You can do this manually if you want, but I found it a lot easier to leverage `menulibre`:
|
AppImages don't automatically appear in the Chrome OS launcher so you'll need to create its `.desktop` file. You can do this manually if you want, but I found it a lot easier to leverage `menulibre`:
|
||||||
```shell
|
```shell
|
||||||
$ sudo apt update && sudo apt install menulibre
|
sudo apt update && sudo apt install menulibre # [tl! .cmd:2]
|
||||||
$ menulibre
|
menulibre
|
||||||
```
|
```
|
||||||
Just plug in the relevant details (you can grab the appropriate icon [here](https://github.com/Ultimaker/Cura/blob/master/icons/cura-128.png)), hit the filing cabinet Save icon, and you should then be able to search for Cura from the Chrome OS launcher.
|
Just plug in the relevant details (you can grab the appropriate icon [here](https://github.com/Ultimaker/Cura/blob/master/icons/cura-128.png)), hit the filing cabinet Save icon, and you should then be able to search for Cura from the Chrome OS launcher.
|
||||||
![Using menulibre to create the launcher shortcut](VTISYOKHO.png)
|
![Using menulibre to create the launcher shortcut](VTISYOKHO.png)
|
||||||
|
|
||||||
![Ultimaker Cura](f8nRJcyI6.png)
|
![Ultimaker Cura](f8nRJcyI6.png)
|
||||||
|
|
||||||
From there, just import the STL mesh, configure the appropriate settings, slice, and save the resulting GCODE. You can then just upload the GCODE straight to The Spaghetti Detective and kick off the print.
|
From there, just import the STL mesh, configure the appropriate settings, slice, and save the resulting GCODE. You can then just upload the GCODE straight to The Spaghetti Detective and kick off the print.
|
||||||
|
|
||||||
![Successful print, designed and sliced on Chrome OS!](2g57odtq2.jpeg)
|
![Successful print, designed and sliced on Chrome OS!](2g57odtq2.jpeg)
|
||||||
|
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
---
|
|
||||||
title: "Accessing a Tanzu Community Edition Kubernetes Cluster from a new device" # Title of the blog post.
|
|
||||||
date: 2022-02-01T10:58:57-06:00 # Date of post creation.
|
|
||||||
# lastmod: 2022-02-01T10:58:57-06:00 # Date when last modified
|
|
||||||
description: "The Tanzu Community Edition documentation does a great job of explaining how to authenticate to a newly-deployed cluster at the tail end of the installation steps, but how do you log in from another system?" # Description used for search engine.
|
|
||||||
featured: false # Sets if post is a featured post, making appear on the home page side bar.
|
|
||||||
draft: true # Sets whether to render this page. Draft of true will not be rendered.
|
|
||||||
toc: false # Controls if a table of contents should be generated for first-level links automatically.
|
|
||||||
usePageBundles: true
|
|
||||||
# menu: main
|
|
||||||
# featureImage: "file.png" # Sets featured image on blog post.
|
|
||||||
# featureImageAlt: 'Description of image' # Alternative text for featured image.
|
|
||||||
# featureImageCap: 'This is the featured image.' # Caption (optional).
|
|
||||||
# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage.
|
|
||||||
# shareImage: "share.png" # Designate a separate image for social media sharing.
|
|
||||||
codeLineNumbers: false # Override global value for showing of line numbers within code block.
|
|
||||||
series: Tips
|
|
||||||
tags:
|
|
||||||
- vmware
|
|
||||||
- kubernetes
|
|
||||||
- tanzu
|
|
||||||
comment: true # Disable comment if false.
|
|
||||||
---
|
|
||||||
When I [recently set up my Tanzu Community Edition environment](/tanzu-community-edition-k8s-homelab/), I did so from a Linux VM since I knew that my Chromebook Linux environment wouldn't support the `kind` bootstrap cluster used for the deployment. But now I'd like to be able to connect to the cluster directly using the `tanzu` and `kubectl` CLI tools. How do I get the appropriate cluster configuration over to my Chromebook?
|
|
||||||
|
|
||||||
The Tanzu CLI actually makes that pretty easy. I just run these commands on my Linux VM to export the `kubeconfig` of my management (`tce-mgmt`) and workload (`tce-work`) clusters to a pair of files:
|
|
||||||
```shell
|
|
||||||
tanzu management-cluster kubeconfig get --admin --export-file tce-mgmt-kubeconfig.yaml
|
|
||||||
tanzu cluster kubeconfig get tce-work --admin --export-file tce-work-kubeconfig.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
I could then use `scp` to pull the files from the VM into my local Linux environment. I then needed to [install `kubectl`](/tanzu-community-edition-k8s-homelab/#kubectl-binary) and the [`tanzu` CLI](/tanzu-community-edition-k8s-homelab/#tanzu-cli) (making sure to also [enable shell auto-completion](/enable-tanzu-cli-auto-completion-bash-zsh/) along the way!), and I could import the configurations locally:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
❯ tanzu login --kubeconfig tce-mgmt-kubeconfig.yaml --context tce-mgmt-admin@tce-mgmt --name tce-mgmt
|
|
||||||
✔ successfully logged in to management cluster using the kubeconfig tce-mgmt
|
|
||||||
|
|
||||||
❯ tanzu login --kubeconfig tce-work-kubeconfig.yaml --context tce-work-admin@tce-work --name tce-work
|
|
||||||
✔ successfully logged in to management cluster using the kubeconfig tce-work
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ tags:
|
||||||
title: Adding VM Notes and Custom Attributes with vRA8
|
title: Adding VM Notes and Custom Attributes with vRA8
|
||||||
---
|
---
|
||||||
|
|
||||||
*In [past posts](/series/vra8), I started by [creating a basic deployment infrastructure](/vra8-custom-provisioning-part-one) in Cloud Assembly and using tags to group those resources. I then [wrote an integration](/integrating-phpipam-with-vrealize-automation-8) to let vRA8 use phpIPAM for static address assignments. I [implemented a vRO workflow](/vra8-custom-provisioning-part-two) for generating unique VM names which fit an organization's established naming standard, and then [extended the workflow](/vra8-custom-provisioning-part-three) to avoid any naming conflicts in Active Directory and DNS. And, finally, I [created an intelligent provisioning request form in Service Broker](/vra8-custom-provisioning-part-four) to make it easy for users to get the servers they need. That's got the core functionality pretty well sorted, so moving forward I'll be detailing additions that enable new capabilities and enhance the experience.*
|
*In [past posts](/series/vra8), I started by [creating a basic deployment infrastructure](/vra8-custom-provisioning-part-one) in Cloud Assembly and using tags to group those resources. I then [wrote an integration](/integrating-phpipam-with-vrealize-automation-8) to let vRA8 use phpIPAM for static address assignments. I [implemented a vRO workflow](/vra8-custom-provisioning-part-two) for generating unique VM names which fit an organization's established naming standard, and then [extended the workflow](/vra8-custom-provisioning-part-three) to avoid any naming conflicts in Active Directory and DNS. And, finally, I [created an intelligent provisioning request form in Service Broker](/vra8-custom-provisioning-part-four) to make it easy for users to get the servers they need. That's got the core functionality pretty well sorted, so moving forward I'll be detailing additions that enable new capabilities and enhance the experience.*
|
||||||
|
|
||||||
In this post, I'll describe how to get certain details from the Service Broker request form and into the VM's properties in vCenter. The obvious application of this is adding descriptive notes so I can remember what purpose a VM serves, but I will also be using [Custom Attributes](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenterhost.doc/GUID-73606C4C-763C-4E27-A1DA-032E4C46219D.html) to store the server's Point of Contact information and a record of which ticketing system request resulted in the server's creation.
|
In this post, I'll describe how to get certain details from the Service Broker request form and into the VM's properties in vCenter. The obvious application of this is adding descriptive notes so I can remember what purpose a VM serves, but I will also be using [Custom Attributes](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenterhost.doc/GUID-73606C4C-763C-4E27-A1DA-032E4C46219D.html) to store the server's Point of Contact information and a record of which ticketing system request resulted in the server's creation.
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ In this post, I'll describe how to get certain details from the Service Broker r
|
||||||
I'll start this by adding a few new inputs to the cloud template in Cloud Assembly.
|
I'll start this by adding a few new inputs to the cloud template in Cloud Assembly.
|
||||||
![New inputs in Cloud Assembly](F3Wkd3VT.png)
|
![New inputs in Cloud Assembly](F3Wkd3VT.png)
|
||||||
|
|
||||||
I'm using a basic regex on the `poc_email` field to make sure that the user's input is *probably* a valid email address in the format `[some string]@[some string].[some string]`.
|
I'm using a basic regex on the `poc_email` field to make sure that the user's input is *probably* a valid email address in the format `[some string]@[some string].[some string]`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
inputs:
|
inputs:
|
||||||
|
@ -36,8 +36,8 @@ inputs:
|
||||||
poc_email:
|
poc_email:
|
||||||
type: string
|
type: string
|
||||||
title: Point of Contact Email
|
title: Point of Contact Email
|
||||||
default: jack.shephard@virtuallypotato.com
|
default: username@example.com
|
||||||
pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$'
|
pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$' # [tl! highlight]
|
||||||
ticket:
|
ticket:
|
||||||
type: string
|
type: string
|
||||||
title: Ticket/Request Number
|
title: Ticket/Request Number
|
||||||
|
@ -46,9 +46,10 @@ inputs:
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll also need to add these to the `resources` section of the template so that they will get passed along with the deployment properties.
|
I'll also need to add these to the `resources` section of the template so that they will get passed along with the deployment properties.
|
||||||
|
|
||||||
![New resource properties](N7YllJkxS.png)
|
![New resource properties](N7YllJkxS.png)
|
||||||
|
|
||||||
I'm actually going to combine the `poc_name` and `poc_email` fields into a single `poc` string.
|
I'm actually going to combine the `poc_name` and `poc_email` fields into a single `poc` string.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
resources:
|
resources:
|
||||||
|
@ -56,7 +57,7 @@ resources:
|
||||||
type: Cloud.vSphere.Machine
|
type: Cloud.vSphere.Machine
|
||||||
properties:
|
properties:
|
||||||
<...>
|
<...>
|
||||||
poc: '${input.poc_name + " (" + input.poc_email + ")"}'
|
poc: '${input.poc_name + " (" + input.poc_email + ")"}' # [tl! highlight]
|
||||||
ticket: '${input.ticket}'
|
ticket: '${input.ticket}'
|
||||||
description: '${input.description}'
|
description: '${input.description}'
|
||||||
<...>
|
<...>
|
||||||
|
@ -73,14 +74,15 @@ I can then go to Service Broker and drag the new fields onto the Custom Form can
|
||||||
Okay, so I've got the information I want to pass on to vCenter. Now I need to whip up a new workflow in vRO that will actually do that (after [telling vRO how to connect to the vCenter](/vra8-custom-provisioning-part-two#interlude-connecting-vro-to-vcenter), of course). I'll want to call this after the VM has been provisioned, so I'll cleverly call the workflow "VM Post-Provisioning".
|
Okay, so I've got the information I want to pass on to vCenter. Now I need to whip up a new workflow in vRO that will actually do that (after [telling vRO how to connect to the vCenter](/vra8-custom-provisioning-part-two#interlude-connecting-vro-to-vcenter), of course). I'll want to call this after the VM has been provisioned, so I'll cleverly call the workflow "VM Post-Provisioning".
|
||||||
![Naming the new workflow](X9JhgWx8x.png)
|
![Naming the new workflow](X9JhgWx8x.png)
|
||||||
|
|
||||||
The workflow will have a single input from vRA, `inputProperties` of type `Properties`.
|
The workflow will have a single input from vRA, `inputProperties` of type `Properties`.
|
||||||
![Workflow input](zHrp6GPcP.png)
|
![Workflow input](zHrp6GPcP.png)
|
||||||
|
|
||||||
The first thing this workflow needs to do is parse `inputProperties (Properties)` to get the name of the VM, and it will then use that information to query vCenter and grab the corresponding VM object. So I'll add a scriptable task item to the workflow canvas and call it `Get VM Object`. It will take `inputProperties (Properties)` as its sole input, and output a new variable called `vm` of type `VC:VirtualMachine`.
|
The first thing this workflow needs to do is parse `inputProperties (Properties)` to get the name of the VM, and it will then use that information to query vCenter and grab the corresponding VM object. So I'll add a scriptable task item to the workflow canvas and call it `Get VM Object`. It will take `inputProperties (Properties)` as its sole input, and output a new variable called `vm` of type `VC:VirtualMachine`.
|
||||||
![Get VM Object action](5ATk99aPW.png)
|
![Get VM Object action](5ATk99aPW.png)
|
||||||
|
|
||||||
The script for this task is fairly straightforward:
|
The script for this task is fairly straightforward:
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: Get VM Object
|
// JavaScript: Get VM Object
|
||||||
// Inputs: inputProperties (Properties)
|
// Inputs: inputProperties (Properties)
|
||||||
// Outputs: vm (VC:VirtualMachine)
|
// Outputs: vm (VC:VirtualMachine)
|
||||||
|
@ -99,7 +101,8 @@ The first part of the script creates a new VM config spec, inserts the descripti
|
||||||
|
|
||||||
The second part uses a built-in action to set the `Point of Contact` and `Ticket` custom attributes accordingly.
|
The second part uses a built-in action to set the `Point of Contact` and `Ticket` custom attributes accordingly.
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// Javascript: Set Notes
|
// Javascript: Set Notes
|
||||||
// Inputs: vm (VC:VirtualMachine), inputProperties (Properties)
|
// Inputs: vm (VC:VirtualMachine), inputProperties (Properties)
|
||||||
// Outputs: None
|
// Outputs: None
|
||||||
|
@ -112,7 +115,7 @@ var spec = new VcVirtualMachineConfigSpec()
|
||||||
spec.annotation = notes
|
spec.annotation = notes
|
||||||
vm.reconfigVM_Task(spec)
|
vm.reconfigVM_Task(spec)
|
||||||
|
|
||||||
System.getModule("com.vmware.library.vc.customattribute").setOrCreateCustomField(vm,"Point of Contact", poc)
|
System.getModule("com.vmware.library.vc.customattribute").setOrCreateCustomField(vm,"Point of Contact", poc) // [tl! highlight:2]
|
||||||
System.getModule("com.vmware.library.vc.customattribute").setOrCreateCustomField(vm,"Ticket", ticket)
|
System.getModule("com.vmware.library.vc.customattribute").setOrCreateCustomField(vm,"Ticket", ticket)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ Once the VM is created, I power it on and hop into the web console. The default
|
||||||
### Configure Networking
|
### Configure Networking
|
||||||
My next step was to configure a static IP address by creating `/etc/systemd/network/10-static-en.network` and entering the following contents:
|
My next step was to configure a static IP address by creating `/etc/systemd/network/10-static-en.network` and entering the following contents:
|
||||||
|
|
||||||
```conf
|
```ini
|
||||||
[Match]
|
[Match]
|
||||||
Name=eth0
|
Name=eth0
|
||||||
|
|
||||||
|
@ -48,12 +48,12 @@ By the way, that `192.168.1.5` address is my Windows DC/DNS server that I use fo
|
||||||
|
|
||||||
I also disabled DHCP by setting `DHCP=no` in `/etc/systemd/network/99-dhcp-en.network`:
|
I also disabled DHCP by setting `DHCP=no` in `/etc/systemd/network/99-dhcp-en.network`:
|
||||||
|
|
||||||
```conf
|
```ini
|
||||||
[Match]
|
[Match]
|
||||||
Name=e*
|
Name=e*
|
||||||
|
|
||||||
[Network]
|
[Network]
|
||||||
DHCP=no
|
DHCP=no # [tl! highlight]
|
||||||
IPv6AcceptRA=no
|
IPv6AcceptRA=no
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -71,26 +71,26 @@ Now that I'm in, I run `tdnf update` to make sure the VM is fully up to date.
|
||||||
Photon OS ships with Docker preinstalled, but I need to install `docker-compose` on my own to simplify container deployment. Per the [install instructions](https://docs.docker.com/compose/install/#install-compose), I run:
|
Photon OS ships with Docker preinstalled, but I need to install `docker-compose` on my own to simplify container deployment. Per the [install instructions](https://docs.docker.com/compose/install/#install-compose), I run:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose # [tl! .cmd_root:1]
|
||||||
chmod +x /usr/local/bin/docker-compose
|
chmod +x /usr/local/bin/docker-compose
|
||||||
```
|
```
|
||||||
|
|
||||||
And then verify that it works:
|
And then verify that it works:
|
||||||
```shell
|
```shell
|
||||||
root@adguard [ ~]# docker-compose --version
|
docker-compose --version # [tl! .cmd_root]
|
||||||
docker-compose version 1.29.2, build 5becea4c
|
docker-compose version 1.29.2, build 5becea4c # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll also want to enable and start Docker:
|
I'll also want to enable and start Docker:
|
||||||
```shell
|
```shell
|
||||||
systemctl enable docker
|
systemctl enable docker # [tl! .cmd_root:1]
|
||||||
systemctl start docker
|
systemctl start docker
|
||||||
```
|
```
|
||||||
|
|
||||||
### Disable DNSStubListener
|
### Disable DNSStubListener
|
||||||
By default, the `resolved` daemon is listening on `127.0.0.53:53` and will prevent docker from binding to that port. Fortunately it's [pretty easy](https://github.com/pi-hole/docker-pi-hole#installing-on-ubuntu) to disable the `DNSStubListener` and free up the port:
|
By default, the `resolved` daemon is listening on `127.0.0.53:53` and will prevent docker from binding to that port. Fortunately it's [pretty easy](https://github.com/pi-hole/docker-pi-hole#installing-on-ubuntu) to disable the `DNSStubListener` and free up the port:
|
||||||
```shell
|
```shell
|
||||||
sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf
|
sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf # [tl! .cmd_root:2]
|
||||||
rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
|
rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
|
||||||
systemctl restart systemd-resolved
|
systemctl restart systemd-resolved
|
||||||
```
|
```
|
||||||
|
@ -100,13 +100,14 @@ Okay, now for the fun part.
|
||||||
|
|
||||||
I create a directory for AdGuard to live in, and then create a `docker-compose.yaml` therein:
|
I create a directory for AdGuard to live in, and then create a `docker-compose.yaml` therein:
|
||||||
```shell
|
```shell
|
||||||
mkdir ~/adguard
|
mkdir ~/adguard # [tl! .cmd_root:2]
|
||||||
cd ~/adguard
|
cd ~/adguard
|
||||||
vi docker-compose.yaml
|
vi docker-compose.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
And I define the container:
|
And I define the container:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
version: "3"
|
version: "3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
@ -134,17 +135,17 @@ services:
|
||||||
Then I can fire it up with `docker-compose up --detach`:
|
Then I can fire it up with `docker-compose up --detach`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
root@adguard [ ~/adguard ]# docker-compose up --detach
|
docker-compose up --detach # [tl! .cmd_root focus:start]
|
||||||
Creating network "adguard_default" with the default driver
|
Creating network "adguard_default" with the default driver # [tl! .nocopy:start]
|
||||||
Pulling adguard (adguard/adguardhome:latest)...
|
Pulling adguard (adguard/adguardhome:latest)...
|
||||||
latest: Pulling from adguard/adguardhome
|
latest: Pulling from adguard/adguardhome # [tl! focus:end]
|
||||||
339de151aab4: Pull complete
|
339de151aab4: Pull complete
|
||||||
4db4be09618a: Pull complete
|
4db4be09618a: Pull complete
|
||||||
7e918e810e4e: Pull complete
|
7e918e810e4e: Pull complete
|
||||||
bfad96428d01: Pull complete
|
bfad96428d01: Pull complete
|
||||||
Digest: sha256:de7d791b814560663fe95f9812fca2d6dd9d6507e4b1b29926cc7b4a08a676ad
|
Digest: sha256:de7d791b814560663fe95f9812fca2d6dd9d6507e4b1b29926cc7b4a08a676ad # [tl! focus:3]
|
||||||
Status: Downloaded newer image for adguard/adguardhome:latest
|
Status: Downloaded newer image for adguard/adguardhome:latest
|
||||||
Creating adguard ... done
|
Creating adguard ... done # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,7 @@ When I cobbled together this script I was primarily targeting the Enterprise Lin
|
||||||
{{% /notice %}}
|
{{% /notice %}}
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# This will attempt to automatically detect the LVM logical volume where / is mounted and then
|
# This will attempt to automatically detect the LVM logical volume where / is mounted and then
|
||||||
# expand the underlying physical partition, LVM physical volume, LVM volume group, LVM logical
|
# expand the underlying physical partition, LVM physical volume, LVM volume group, LVM logical
|
||||||
|
|
|
@ -41,23 +41,28 @@ When I originally wrote this post back in September 2018, the containerized BitW
|
||||||
2. Log in to the GCE instance and run `sudo apt-get update` followed by `sudo apt-get install ddclient`. Part of the install process prompts you to configure things... just accept the defaults and move on.
|
2. Log in to the GCE instance and run `sudo apt-get update` followed by `sudo apt-get install ddclient`. Part of the install process prompts you to configure things... just accept the defaults and move on.
|
||||||
3. Edit the `ddclient` config file to look like this, substituting the username, password, and FDQN from Google Domains:
|
3. Edit the `ddclient` config file to look like this, substituting the username, password, and FDQN from Google Domains:
|
||||||
```shell
|
```shell
|
||||||
$ sudo vi /etc/ddclient.conf
|
sudo vim /etc/ddclient.conf # [tl! .cmd]
|
||||||
# Configuration file for ddclient generated by debconf
|
```
|
||||||
#
|
|
||||||
# /etc/ddclient.conf
|
|
||||||
|
|
||||||
protocol=googledomains,
|
```ini
|
||||||
ssl=yes,
|
# torchlight! {"lineNumbers": true}
|
||||||
syslog=yes,
|
# Configuration file for ddclient generated by debconf
|
||||||
use=web,
|
#
|
||||||
server=domains.google.com,
|
# /etc/ddclient.conf
|
||||||
login='[USERNAME]',
|
|
||||||
password='[PASSWORD]',
|
protocol=googledomains,
|
||||||
[FQDN]
|
ssl=yes,
|
||||||
|
syslog=yes,
|
||||||
|
use=web,
|
||||||
|
server=domains.google.com,
|
||||||
|
login='[USERNAME]', # [tl! highlight:3]
|
||||||
|
password='[PASSWORD]',
|
||||||
|
[FQDN]
|
||||||
```
|
```
|
||||||
4. `sudo vi /etc/default/ddclient` and make sure that `run_daemon="true"`:
|
4. `sudo vi /etc/default/ddclient` and make sure that `run_daemon="true"`:
|
||||||
|
|
||||||
```shell
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Configuration for ddclient scripts
|
# Configuration for ddclient scripts
|
||||||
# generated from debconf on Sat Sep 8 21:58:02 UTC 2018
|
# generated from debconf on Sat Sep 8 21:58:02 UTC 2018
|
||||||
#
|
#
|
||||||
|
@ -71,7 +76,7 @@ run_dhclient="false"
|
||||||
# established. This might be useful, if you are using dial-on-demand.
|
# established. This might be useful, if you are using dial-on-demand.
|
||||||
run_ipup="false"
|
run_ipup="false"
|
||||||
|
|
||||||
# Set to "true" if ddclient should run in daemon mode
|
# Set to "true" if ddclient should run in daemon mode [tl! focus:3]
|
||||||
# If this is changed to true, run_ipup and run_dhclient must be set to false.
|
# If this is changed to true, run_ipup and run_dhclient must be set to false.
|
||||||
run_daemon="true"
|
run_daemon="true"
|
||||||
|
|
||||||
|
@ -81,8 +86,8 @@ daemon_interval="300"
|
||||||
```
|
```
|
||||||
5. Restart the `ddclient` service - twice for good measure (daemon mode only gets activated on the second go *because reasons*):
|
5. Restart the `ddclient` service - twice for good measure (daemon mode only gets activated on the second go *because reasons*):
|
||||||
```shell
|
```shell
|
||||||
$ sudo systemctl restart ddclient
|
sudo systemctl restart ddclient # [tl! .cmd:2]
|
||||||
$ sudo systemctl restart ddclient
|
sudo systemctl restart ddclient
|
||||||
```
|
```
|
||||||
6. After a few moments, refresh the Google Domains page to verify that your instance's external IP address is showing up on the new DDNS record.
|
6. After a few moments, refresh the Google Domains page to verify that your instance's external IP address is showing up on the new DDNS record.
|
||||||
|
|
||||||
|
@ -90,11 +95,11 @@ $ sudo systemctl restart ddclient
|
||||||
*Steps taken from [here](https://docs.docker.com/install/linux/docker-ce/debian/).*
|
*Steps taken from [here](https://docs.docker.com/install/linux/docker-ce/debian/).*
|
||||||
1. Update `apt` package index:
|
1. Update `apt` package index:
|
||||||
```shell
|
```shell
|
||||||
$ sudo apt-get update
|
sudo apt-get update # [tl! .cmd]
|
||||||
```
|
```
|
||||||
2. Install package management prereqs:
|
2. Install package management prereqs:
|
||||||
```shell
|
```shell
|
||||||
$ sudo apt-get install \
|
sudo apt-get install \ # [tl! .cmd]
|
||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
|
@ -103,46 +108,46 @@ $ sudo apt-get install \
|
||||||
```
|
```
|
||||||
3. Add Docker GPG key:
|
3. Add Docker GPG key:
|
||||||
```shell
|
```shell
|
||||||
$ curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
|
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - # [tl! .cmd]
|
||||||
```
|
```
|
||||||
4. Add the Docker repo:
|
4. Add the Docker repo:
|
||||||
```shell
|
```shell
|
||||||
$ sudo add-apt-repository \
|
sudo add-apt-repository \ # [tl! .cmd]
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/debian \
|
"deb [arch=amd64] https://download.docker.com/linux/debian \
|
||||||
$(lsb_release -cs) \
|
$(lsb_release -cs) \
|
||||||
stable"
|
stable"
|
||||||
```
|
```
|
||||||
5. Update apt index again:
|
5. Update apt index again:
|
||||||
```shell
|
```shell
|
||||||
$ sudo apt-get update
|
sudo apt-get update # [tl! .cmd]
|
||||||
```
|
```
|
||||||
6. Install Docker:
|
6. Install Docker:
|
||||||
```shell
|
```shell
|
||||||
$ sudo apt-get install docker-ce
|
sudo apt-get install docker-ce # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install Certbot and generate SSL cert
|
### Install Certbot and generate SSL cert
|
||||||
*Steps taken from [here](https://certbot.eff.org/instructions?ws=other&os=debianbuster).*
|
*Steps taken from [here](https://certbot.eff.org/instructions?ws=other&os=debianbuster).*
|
||||||
1. Install Certbot:
|
1. Install Certbot:
|
||||||
```shell
|
```shell
|
||||||
$ sudo apt-get install certbot
|
sudo apt-get install certbot # [tl! .cmd]
|
||||||
```
|
```
|
||||||
2. Generate certificate:
|
2. Generate certificate:
|
||||||
```shell
|
```shell
|
||||||
$ sudo certbot certonly --standalone -d [FQDN]
|
sudo certbot certonly --standalone -d ${FQDN} # [tl! .cmd]
|
||||||
```
|
```
|
||||||
3. Create a directory to store the new certificates and copy them there:
|
3. Create a directory to store the new certificates and copy them there:
|
||||||
```shell
|
```shell
|
||||||
$ sudo mkdir -p /ssl/keys/
|
sudo mkdir -p /ssl/keys/ # [tl! .cmd:3]
|
||||||
$ sudo cp -p /etc/letsencrypt/live/[FQDN]/fullchain.pem /ssl/keys/
|
sudo cp -p /etc/letsencrypt/live/${FQDN}/fullchain.pem /ssl/keys/
|
||||||
$ sudo cp -p /etc/letsencrypt/live/[FQDN]/privkey.pem /ssl/keys/
|
sudo cp -p /etc/letsencrypt/live/${FQDN}/privkey.pem /ssl/keys/
|
||||||
```
|
```
|
||||||
|
|
||||||
### Set up vaultwarden
|
### Set up vaultwarden
|
||||||
*Using the container image available [here](https://github.com/dani-garcia/vaultwarden).*
|
*Using the container image available [here](https://github.com/dani-garcia/vaultwarden).*
|
||||||
1. Let's just get it up and running first:
|
1. Let's just get it up and running first:
|
||||||
```shell
|
```shell
|
||||||
$ sudo docker run -d --name vaultwarden \
|
sudo docker run -d --name vaultwarden \ # [tl! .cmd]
|
||||||
-e ROCKET_TLS={certs='"/ssl/fullchain.pem", key="/ssl/privkey.pem"}' \
|
-e ROCKET_TLS={certs='"/ssl/fullchain.pem", key="/ssl/privkey.pem"}' \
|
||||||
-e ROCKET_PORT='8000' \
|
-e ROCKET_PORT='8000' \
|
||||||
-v /ssl/keys/:/ssl/ \
|
-v /ssl/keys/:/ssl/ \
|
||||||
|
@ -154,9 +159,9 @@ $ sudo docker run -d --name vaultwarden \
|
||||||
2. At this point you should be able to point your web browser at `https://[FQDN]` and see the BitWarden login screen. Click on the Create button and set up a new account. Log in, look around, add some passwords, etc. Everything should basically work just fine.
|
2. At this point you should be able to point your web browser at `https://[FQDN]` and see the BitWarden login screen. Click on the Create button and set up a new account. Log in, look around, add some passwords, etc. Everything should basically work just fine.
|
||||||
3. Unless you want to host passwords for all of the Internet you'll probably want to disable signups at some point by adding the `env` option `SIGNUPS_ALLOWED=false`. And you'll need to set `DOMAIN=https://[FQDN]` if you want to use U2F authentication:
|
3. Unless you want to host passwords for all of the Internet you'll probably want to disable signups at some point by adding the `env` option `SIGNUPS_ALLOWED=false`. And you'll need to set `DOMAIN=https://[FQDN]` if you want to use U2F authentication:
|
||||||
```shell
|
```shell
|
||||||
$ sudo docker stop vaultwarden
|
sudo docker stop vaultwarden # [tl! .cmd:2]
|
||||||
$ sudo docker rm vaultwarden
|
sudo docker rm vaultwarden
|
||||||
$ sudo docker run -d --name vaultwarden \
|
sudo docker run -d --name vaultwarden \
|
||||||
-e ROCKET_TLS={certs='"/ssl/fullchain.pem",key="/ssl/privkey.pem"'} \
|
-e ROCKET_TLS={certs='"/ssl/fullchain.pem",key="/ssl/privkey.pem"'} \
|
||||||
-e ROCKET_PORT='8000' \
|
-e ROCKET_PORT='8000' \
|
||||||
-e SIGNUPS_ALLOWED=false \
|
-e SIGNUPS_ALLOWED=false \
|
||||||
|
@ -170,62 +175,78 @@ $ sudo docker run -d --name vaultwarden \
|
||||||
|
|
||||||
### Install vaultwarden as a service
|
### Install vaultwarden as a service
|
||||||
*So we don't have to keep manually firing this thing off.*
|
*So we don't have to keep manually firing this thing off.*
|
||||||
1. Create a script to stop, remove, update, and (re)start the `vaultwarden` container:
|
1. Create a script at `/usr/local/bin/start-vaultwarden.sh` to stop, remove, update, and (re)start the `vaultwarden` container:
|
||||||
```shell
|
```shell
|
||||||
$ sudo vi /usr/local/bin/start-vaultwarden.sh
|
sudo vim /usr/local/bin/start-vaultwarden.sh # [tl! .cmd]
|
||||||
#!/bin/bash
|
```
|
||||||
|
|
||||||
docker stop vaultwarden
|
```shell
|
||||||
docker rm vaultwarden
|
# torchlight! {"lineNumbers": true}
|
||||||
docker pull vaultwarden/server
|
#!/bin/bash
|
||||||
|
|
||||||
docker run -d --name vaultwarden \
|
docker stop vaultwarden
|
||||||
-e ROCKET_TLS={certs='"/ssl/fullchain.pem",key="/ssl/privkey.pem"'} \
|
docker rm vaultwarden
|
||||||
-e ROCKET_PORT='8000' \
|
docker pull vaultwarden/server
|
||||||
-e SIGNUPS_ALLOWED=false \
|
|
||||||
-e DOMAIN=https://[FQDN] \
|
docker run -d --name vaultwarden \
|
||||||
-v /ssl/keys/:/ssl/ \
|
-e ROCKET_TLS={certs='"/ssl/fullchain.pem",key="/ssl/privkey.pem"'} \
|
||||||
-v /bw-data/:/data/ \
|
-e ROCKET_PORT='8000' \
|
||||||
-v /icon_cache/ \
|
-e SIGNUPS_ALLOWED=false \
|
||||||
-p 0.0.0.0:443:8000 \
|
-e DOMAIN=https://${FQDN} \
|
||||||
vaultwarden/server:latest
|
-v /ssl/keys/:/ssl/ \
|
||||||
$ sudo chmod 744 /usr/local/bin/start-vaultwarden.sh
|
-v /bw-data/:/data/ \
|
||||||
|
-v /icon_cache/ \
|
||||||
|
-p 0.0.0.0:443:8000 \
|
||||||
|
vaultwarden/server:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo chmod 744 /usr/local/bin/start-vaultwarden.sh # [tl! .cmd]
|
||||||
```
|
```
|
||||||
2. And add it as a `systemd` service:
|
2. And add it as a `systemd` service:
|
||||||
```shell
|
```shell
|
||||||
$ sudo vi /etc/systemd/system/vaultwarden.service
|
sudo vim /etc/systemd/system/vaultwarden.service # [tl! .cmd]
|
||||||
[Unit]
|
```
|
||||||
Description=BitWarden container
|
|
||||||
Requires=docker.service
|
|
||||||
After=docker.service
|
|
||||||
|
|
||||||
[Service]
|
```ini
|
||||||
Restart=always
|
[Unit]
|
||||||
ExecStart=/usr/local/bin/vaultwarden-start.sh
|
Description=BitWarden container
|
||||||
ExecStop=/usr/bin/docker stop vaultwarden
|
Requires=docker.service
|
||||||
|
After=docker.service
|
||||||
|
|
||||||
[Install]
|
[Service]
|
||||||
WantedBy=default.target
|
Restart=always
|
||||||
$ sudo chmod 644 /etc/systemd/system/vaultwarden.service
|
ExecStart=/usr/local/bin/vaultwarden-start.sh # [tl! highlight]
|
||||||
|
ExecStop=/usr/bin/docker stop vaultwarden
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
|
```
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo chmod 644 /etc/systemd/system/vaultwarden.service # [tl! .cmd]
|
||||||
```
|
```
|
||||||
3. Try it out:
|
3. Try it out:
|
||||||
```shell
|
```shell
|
||||||
$ sudo systemctl start vaultwarden
|
sudo systemctl start vaultwarden # [tl! .cmd]
|
||||||
$ sudo systemctl status vaultwarden
|
```
|
||||||
● bitwarden.service - BitWarden container
|
|
||||||
Loaded: loaded (/etc/systemd/system/vaultwarden.service; enabled; vendor preset: enabled)
|
|
||||||
Active: deactivating (stop) since Sun 2018-09-09 03:43:20 UTC; 1s ago
|
|
||||||
Process: 13104 ExecStart=/usr/local/bin/bitwarden-start.sh (code=exited, status=0/SUCCESS)
|
|
||||||
Main PID: 13104 (code=exited, status=0/SUCCESS); Control PID: 13229 (docker)
|
|
||||||
Tasks: 5 (limit: 4915)
|
|
||||||
Memory: 9.7M
|
|
||||||
CPU: 375ms
|
|
||||||
CGroup: /system.slice/vaultwarden.service
|
|
||||||
└─control
|
|
||||||
└─13229 /usr/bin/docker stop vaultwarden
|
|
||||||
|
|
||||||
Sep 09 03:43:20 vaultwarden vaultwarden-start.sh[13104]: Status: Image is up to date for vaultwarden/server:latest
|
```shell
|
||||||
Sep 09 03:43:20 vaultwarden vaultwarden-start.sh[13104]: ace64ca5294eee7e21be764ea1af9e328e944658b4335ce8721b99a33061d645
|
sudo systemctl status vaultwarden # [tl! .cmd focus:start]
|
||||||
|
● bitwarden.service - BitWarden container # [tl! .nocopy:start]
|
||||||
|
Loaded: loaded (/etc/systemd/system/vaultwarden.service; enabled; vendor preset: enabled)
|
||||||
|
Active: deactivating (stop) since Sun 2018-09-09 03:43:20 UTC; 1s ago
|
||||||
|
Process: 13104 ExecStart=/usr/local/bin/bitwarden-start.sh (code=exited, status=0/SUCCESS) # [tl! focus:end]
|
||||||
|
Main PID: 13104 (code=exited, status=0/SUCCESS); Control PID: 13229 (docker)
|
||||||
|
Tasks: 5 (limit: 4915)
|
||||||
|
Memory: 9.7M
|
||||||
|
CPU: 375ms
|
||||||
|
CGroup: /system.slice/vaultwarden.service
|
||||||
|
└─control
|
||||||
|
└─13229 /usr/bin/docker stop vaultwarden
|
||||||
|
|
||||||
|
Sep 09 03:43:20 vaultwarden vaultwarden-start.sh[13104]: Status: Image is up to date for vaultwarden/server:latest
|
||||||
|
Sep 09 03:43:20 vaultwarden vaultwarden-start.sh[13104]: ace64ca5294eee7e21be764ea1af9e328e944658b4335ce8721b99a33061d645 # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Conclusion
|
### Conclusion
|
||||||
|
|
|
@ -27,18 +27,18 @@ comment: true # Disable comment if false.
|
||||||
I [recently wrote](/tanzu-community-edition-k8s-homelab/#a-real-workload---phpipam) about getting started with VMware's [Tanzu Community Edition](https://tanzucommunityedition.io/) and deploying [phpIPAM](https://phpipam.net/) as my first real-world Kubernetes workload. Well I've spent much of my time since then working on a script which would help to populate my phpIPAM instance with a list of networks to monitor.
|
I [recently wrote](/tanzu-community-edition-k8s-homelab/#a-real-workload---phpipam) about getting started with VMware's [Tanzu Community Edition](https://tanzucommunityedition.io/) and deploying [phpIPAM](https://phpipam.net/) as my first real-world Kubernetes workload. Well I've spent much of my time since then working on a script which would help to populate my phpIPAM instance with a list of networks to monitor.
|
||||||
|
|
||||||
### Planning and Exporting
|
### Planning and Exporting
|
||||||
The first step in making this work was to figure out which networks I wanted to import. We've got hundreds of different networks in use across our production vSphere environments. I focused only on those which are portgroups on distributed virtual switches since those configurations are pretty standardized (being vCenter constructs instead of configured on individual hosts). These dvPortGroups bear a naming standard which conveys all sorts of useful information, and it's easy and safe to rename any dvPortGroups which _don't_ fit the standard (unlike renaming portgroups on a standard virtual switch).
|
The first step in making this work was to figure out which networks I wanted to import. We've got hundreds of different networks in use across our production vSphere environments. I focused only on those which are portgroups on distributed virtual switches since those configurations are pretty standardized (being vCenter constructs instead of configured on individual hosts). These dvPortGroups bear a naming standard which conveys all sorts of useful information, and it's easy and safe to rename any dvPortGroups which _don't_ fit the standard (unlike renaming portgroups on a standard virtual switch).
|
||||||
|
|
||||||
The standard naming convention is `[Site/Description] [Network Address]{/[Mask]}`. So the networks (across two virtual datacenters and two dvSwitches) look something like this:
|
The standard naming convention is `[Site/Description] [Network Address]{/[Mask]}`. So the networks (across two virtual datacenters and two dvSwitches) look something like this:
|
||||||
![Production dvPortGroups approximated in my testing lab environment](dvportgroups.png)
|
![Production dvPortGroups approximated in my testing lab environment](dvportgroups.png)
|
||||||
|
|
||||||
Some networks have masks in the name, some don't; and some use an underscore (`_`) rather than a slash (`/`) to separate the network from the mask . Most networks correctly include the network address with a `0` in the last octet, but some use an `x` instead. And the VLANs associated with the networks have a varying number of digits. Consistency can be difficult so these are all things that I had to keep in mind as I worked on a solution which would make a true best effort at importing all of these.
|
Some networks have masks in the name, some don't; and some use an underscore (`_`) rather than a slash (`/`) to separate the network from the mask . Most networks correctly include the network address with a `0` in the last octet, but some use an `x` instead. And the VLANs associated with the networks have a varying number of digits. Consistency can be difficult so these are all things that I had to keep in mind as I worked on a solution which would make a true best effort at importing all of these.
|
||||||
|
|
||||||
As long as the dvPortGroup names stick to this format I can parse the name to come up with a description as well as the IP space of the network. The dvPortGroup also carries information about the associated VLAN, which is useful information to have. And I can easily export this information with a simple PowerCLI query:
|
As long as the dvPortGroup names stick to this format I can parse the name to come up with a description as well as the IP space of the network. The dvPortGroup also carries information about the associated VLAN, which is useful information to have. And I can easily export this information with a simple PowerCLI query:
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
PS /home/john> get-vdportgroup | select Name, VlanConfiguration
|
get-vdportgroup | select Name, VlanConfiguration # [tl! .cmd_pwsh]
|
||||||
|
# [tl! .nocopy:start]
|
||||||
Name VlanConfiguration
|
Name VlanConfiguration
|
||||||
---- -----------------
|
---- -----------------
|
||||||
MGT-Home 192.168.1.0
|
MGT-Home 192.168.1.0
|
||||||
|
@ -50,15 +50,15 @@ DRE-Servers 172.16.50.0 VLAN 1650
|
||||||
DRE-Servers 172.16.60.x VLAN 1660
|
DRE-Servers 172.16.60.x VLAN 1660
|
||||||
VPOT8-Mgmt 172.20.10.0/27 VLAN 20
|
VPOT8-Mgmt 172.20.10.0/27 VLAN 20
|
||||||
VPOT8-Servers 172.20.10.32/27 VLAN 30
|
VPOT8-Servers 172.20.10.32/27 VLAN 30
|
||||||
VPOT8-Servers 172.20.10.64_26 VLAN 40
|
VPOT8-Servers 172.20.10.64_26 VLAN 40 # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
In my [homelab](/vmware-home-lab-on-intel-nuc-9/), I only have a single vCenter. In production, we've got a handful of vCenters, and each manages the hosts in a given region. So I can use information about which vCenter hosts a dvPortGroup to figure out which region a network is in. When I import this data into phpIPAM, I can use the vCenter name to assign [remote scan agents](https://github.com/jbowdre/phpipam-agent-docker) to networks based on the region that they're in. I can also grab information about which virtual datacenter a dvPortGroup lives in, which I'll use for grouping networks into sites or sections.
|
In my [homelab](/vmware-home-lab-on-intel-nuc-9/), I only have a single vCenter. In production, we've got a handful of vCenters, and each manages the hosts in a given region. So I can use information about which vCenter hosts a dvPortGroup to figure out which region a network is in. When I import this data into phpIPAM, I can use the vCenter name to assign [remote scan agents](https://github.com/jbowdre/phpipam-agent-docker) to networks based on the region that they're in. I can also grab information about which virtual datacenter a dvPortGroup lives in, which I'll use for grouping networks into sites or sections.
|
||||||
|
|
||||||
The vCenter can be found in the `Uid` property returned by `get-vdportgroup`:
|
The vCenter can be found in the `Uid` property returned by `get-vdportgroup`:
|
||||||
```powershell
|
```powershell
|
||||||
PS /home/john> get-vdportgroup | select Name, VlanConfiguration, Datacenter, Uid
|
get-vdportgroup | select Name, VlanConfiguration, Datacenter, Uid # [tl! .cmd_pwsh]
|
||||||
|
# [tl! .nocopy:start]
|
||||||
Name VlanConfiguration Datacenter Uid
|
Name VlanConfiguration Datacenter Uid
|
||||||
---- ----------------- ---------- ---
|
---- ----------------- ---------- ---
|
||||||
MGT-Home 192.168.1.0 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-27015/
|
MGT-Home 192.168.1.0 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-27015/
|
||||||
|
@ -70,13 +70,14 @@ DRE-Servers 172.16.50.0 VLAN 1650 Lab /VIServer=lab\john@vcsa.
|
||||||
DRE-Servers 172.16.60.x VLAN 1660 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-28014/
|
DRE-Servers 172.16.60.x VLAN 1660 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-28014/
|
||||||
VPOT8-Mgmt 172.20.10.0/… VLAN 20 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35018/
|
VPOT8-Mgmt 172.20.10.0/… VLAN 20 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35018/
|
||||||
VPOT8-Servers 172.20.10… VLAN 30 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35019/
|
VPOT8-Servers 172.20.10… VLAN 30 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35019/
|
||||||
VPOT8-Servers 172.20.10… VLAN 40 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35020/
|
VPOT8-Servers 172.20.10… VLAN 40 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35020/ # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
It's not pretty, but it'll do the trick. All that's left is to export this data into a handy-dandy CSV-formatted file that I can easily parse for import:
|
It's not pretty, but it'll do the trick. All that's left is to export this data into a handy-dandy CSV-formatted file that I can easily parse for import:
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
get-vdportgroup | select Name, VlanConfiguration, Datacenter, Uid | export-csv -NoTypeInformation ./networks.csv
|
get-vdportgroup | select Name, VlanConfiguration, Datacenter, Uid ` # [tl! .cmd_pwsh]
|
||||||
|
| export-csv -NoTypeInformation ./networks.csv
|
||||||
```
|
```
|
||||||
![My networks.csv export, including the networks which don't match the naming criteria and will be skipped by the import process.](networks.csv.png)
|
![My networks.csv export, including the networks which don't match the naming criteria and will be skipped by the import process.](networks.csv.png)
|
||||||
|
|
||||||
|
@ -97,6 +98,7 @@ I'm also going to head in to **Administration > IP Related Management > Sections
|
||||||
Well that's enough prep work; now it's time for the Python3 [script](https://github.com/jbowdre/misc-scripts/blob/main/Python/phpipam-bulk-import.py):
|
Well that's enough prep work; now it's time for the Python3 [script](https://github.com/jbowdre/misc-scripts/blob/main/Python/phpipam-bulk-import.py):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# The latest version of this script can be found on Github:
|
# The latest version of this script can be found on Github:
|
||||||
# https://github.com/jbowdre/misc-scripts/blob/main/Python/phpipam-bulk-import.py
|
# https://github.com/jbowdre/misc-scripts/blob/main/Python/phpipam-bulk-import.py
|
||||||
|
|
||||||
|
@ -361,7 +363,7 @@ def main():
|
||||||
# make sure filepath is a path to an actual file
|
# make sure filepath is a path to an actual file
|
||||||
print("""\n\n
|
print("""\n\n
|
||||||
This script helps to add vSphere networks to phpIPAM for IP address management. It is expected
|
This script helps to add vSphere networks to phpIPAM for IP address management. It is expected
|
||||||
that the vSphere networks are configured as portgroups on distributed virtual switches and
|
that the vSphere networks are configured as portgroups on distributed virtual switches and
|
||||||
named like '[Description] [Subnet IP]{/[mask]}' (ex: 'LAB-Servers 192.168.1.0'). The following PowerCLI
|
named like '[Description] [Subnet IP]{/[mask]}' (ex: 'LAB-Servers 192.168.1.0'). The following PowerCLI
|
||||||
command can be used to export the networks from vSphere:
|
command can be used to export the networks from vSphere:
|
||||||
|
|
||||||
|
@ -377,7 +379,7 @@ def main():
|
||||||
else:
|
else:
|
||||||
print(f'[ERROR] Unable to find file at {filepath.name}.')
|
print(f'[ERROR] Unable to find file at {filepath.name}.')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# get collection of networks to import
|
# get collection of networks to import
|
||||||
networks = import_networks(filepath)
|
networks = import_networks(filepath)
|
||||||
networkNames = get_sorted_list_of_unique_values('name', networks)
|
networkNames = get_sorted_list_of_unique_values('name', networks)
|
||||||
|
@ -415,7 +417,7 @@ def main():
|
||||||
else:
|
else:
|
||||||
del test
|
del test
|
||||||
break
|
break
|
||||||
|
|
||||||
username = validate_input_is_not_empty('Username', f'Username with read/write access to {hostname}')
|
username = validate_input_is_not_empty('Username', f'Username with read/write access to {hostname}')
|
||||||
password = getpass.getpass(f'Password for {username}:\n')
|
password = getpass.getpass(f'Password for {username}:\n')
|
||||||
apiAppId = validate_input_is_not_empty('App ID', f'App ID for API key (from https://{hostname}/administration/api/)')
|
apiAppId = validate_input_is_not_empty('App ID', f'App ID for API key (from https://{hostname}/administration/api/)')
|
||||||
|
@ -452,7 +454,7 @@ def main():
|
||||||
vlan_sets = get_vlan_sets(uri, token, vlans)
|
vlan_sets = get_vlan_sets(uri, token, vlans)
|
||||||
if remote_agent:
|
if remote_agent:
|
||||||
agent_sets = get_agent_sets(uri, token, regions)
|
agent_sets = get_agent_sets(uri, token, regions)
|
||||||
|
|
||||||
# create the networks
|
# create the networks
|
||||||
for network in networks:
|
for network in networks:
|
||||||
network['region'] = regions[network['vcenter']]['name']
|
network['region'] = regions[network['vcenter']]['name']
|
||||||
|
@ -462,7 +464,7 @@ def main():
|
||||||
if network['vlan'] == 0:
|
if network['vlan'] == 0:
|
||||||
network['vlanId'] = None
|
network['vlanId'] = None
|
||||||
else:
|
else:
|
||||||
network['vlanId'] = get_id_from_sets(network['vlan'], vlan_sets)
|
network['vlanId'] = get_id_from_sets(network['vlan'], vlan_sets)
|
||||||
if remote_agent:
|
if remote_agent:
|
||||||
network['agentId'] = get_id_from_sets(network['region'], agent_sets)
|
network['agentId'] = get_id_from_sets(network['region'], agent_sets)
|
||||||
else:
|
else:
|
||||||
|
@ -478,8 +480,8 @@ if __name__ == "__main__":
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll run it and provide the path to the network export CSV file:
|
I'll run it and provide the path to the network export CSV file:
|
||||||
```bash
|
```shell
|
||||||
python3 phpipam-bulk-import.py ~/networks.csv
|
python3 phpipam-bulk-import.py ~/networks.csv # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
The script will print out a little descriptive bit about what sort of networks it's going to try to import and then will straight away start processing the file to identify the networks, vCenters, VLANs, and datacenters which will be imported:
|
The script will print out a little descriptive bit about what sort of networks it's going to try to import and then will straight away start processing the file to identify the networks, vCenters, VLANs, and datacenters which will be imported:
|
||||||
|
@ -489,16 +491,19 @@ Importing networks from /home/john/networks.csv...
|
||||||
Processed 17 lines and found:
|
Processed 17 lines and found:
|
||||||
|
|
||||||
- 10 networks:
|
- 10 networks:
|
||||||
['BOW-Servers 172.16.20.0', 'BOW-Servers 172.16.30.0', 'BOW-Servers 172.16.40.0', 'DRE-Servers 172.16.50.0', 'DRE-Servers 172.16.60.x', 'MGT-Home 192.168.1.0', 'MGT-Servers 172.16.10.0', 'VPOT8-Mgmt 172.20.10.0/27', 'VPOT8-Servers 172.20.10.32/27', 'VPOT8-Servers 172.20.10.64_26']
|
['BOW-Servers 172.16.20.0', 'BOW-Servers 172.16.30.0', 'BOW-Servers 172.16.40.0',
|
||||||
|
'DRE-Servers 172.16.50.0', 'DRE-Servers 172.16.60.x', 'MGT-Home 192.168.1.0',
|
||||||
|
'MGT-Servers 172.16.10.0', 'VPOT8-Mgmt 172.20.10.0/27', 'VPOT8-Servers 172.20.10.32/27',
|
||||||
|
'VPOT8-Servers 172.20.10.64_26']
|
||||||
|
|
||||||
- 1 vCenter servers:
|
- 1 vCenter servers:
|
||||||
['vcsa']
|
['vcsa']
|
||||||
|
|
||||||
- 10 VLANs:
|
- 10 VLANs:
|
||||||
[0, 20, 30, 40, 1610, 1620, 1630, 1640, 1650, 1660]
|
[0, 20, 30, 40, 1610, 1620, 1630, 1640, 1650, 1660]
|
||||||
|
|
||||||
- 2 Datacenters:
|
- 2 Datacenters:
|
||||||
['Lab', 'Other Lab']
|
['Lab', 'Other Lab']
|
||||||
```
|
```
|
||||||
|
|
||||||
It then starts prompting for the additional details which will be needed:
|
It then starts prompting for the additional details which will be needed:
|
||||||
|
@ -571,7 +576,7 @@ So now phpIPAM knows about the vSphere networks I care about, and it can keep tr
|
||||||
... but I haven't actually *deployed* an agent yet. I'll do that by following the same basic steps [described here](/tanzu-community-edition-k8s-homelab/#phpipam-agent) to spin up my `phpipam-agent` on Kubernetes, and I'll plug in that automagically-generated code for the `IPAM_AGENT_KEY` environment variable:
|
... but I haven't actually *deployed* an agent yet. I'll do that by following the same basic steps [described here](/tanzu-community-edition-k8s-homelab/#phpipam-agent) to spin up my `phpipam-agent` on Kubernetes, and I'll plug in that automagically-generated code for the `IPAM_AGENT_KEY` environment variable:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
---
|
# torchlight! {"lineNumbers": true}
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -25,7 +25,7 @@ It's super handy when a Linux config file is loaded with comments to tell you pr
|
||||||
|
|
||||||
Next time, instead of scrolling through page after page of lengthy embedded explanations, just use:
|
Next time, instead of scrolling through page after page of lengthy embedded explanations, just use:
|
||||||
```shell
|
```shell
|
||||||
egrep -v "^\s*(#|$)" $filename
|
egrep -v "^\s*(#|$)" $filename # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
For added usefulness, I alias this command to `ccat` (which my brain interprets as "commentless cat") in [my `~/.zshrc`](https://github.com/jbowdre/dotfiles/blob/main/zsh/.zshrc):
|
For added usefulness, I alias this command to `ccat` (which my brain interprets as "commentless cat") in [my `~/.zshrc`](https://github.com/jbowdre/dotfiles/blob/main/zsh/.zshrc):
|
||||||
|
@ -35,20 +35,24 @@ alias ccat='egrep -v "^\s*(#|$)"'
|
||||||
|
|
||||||
Now instead of viewing all 75 lines of a [mostly-default Vagrantfile](/create-vms-chromebook-hashicorp-vagrant), I just see the 7 that matter:
|
Now instead of viewing all 75 lines of a [mostly-default Vagrantfile](/create-vms-chromebook-hashicorp-vagrant), I just see the 7 that matter:
|
||||||
```shell
|
```shell
|
||||||
; wc -l Vagrantfile
|
wc -l Vagrantfile # [tl! .cmd]
|
||||||
75 Vagrantfile
|
75 Vagrantfile # [tl! .nocopy]
|
||||||
|
```
|
||||||
|
|
||||||
; ccat Vagrantfile
|
```shell
|
||||||
Vagrant.configure("2") do |config|
|
ccat Vagrantfile # [tl! .cmd]
|
||||||
|
Vagrant.configure("2") do |config| # [tl! .nocopy:start]
|
||||||
config.vm.box = "oopsme/windows11-22h2"
|
config.vm.box = "oopsme/windows11-22h2"
|
||||||
config.vm.provider :libvirt do |libvirt|
|
config.vm.provider :libvirt do |libvirt|
|
||||||
libvirt.cpus = 4
|
libvirt.cpus = 4
|
||||||
libvirt.memory = 4096
|
libvirt.memory = 4096
|
||||||
end
|
end
|
||||||
end
|
end # [tl! .nocopy:end]
|
||||||
|
```
|
||||||
|
|
||||||
; ccat Vagrantfile | wc -l
|
```shell
|
||||||
7
|
ccat Vagrantfile | wc -l # [tl! .cmd]
|
||||||
|
7 # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
Nice!
|
Nice!
|
||||||
|
|
|
@ -67,8 +67,8 @@ Anyway, after switching to the cheaper Standard tier I can click on the **Extern
|
||||||
|
|
||||||
##### Security Configuration
|
##### Security Configuration
|
||||||
The **Security** section lets me go ahead and upload an SSH public key that I can then use for logging into the instance once it's running. Of course, that means I'll first need to generate a key pair for this purpose:
|
The **Security** section lets me go ahead and upload an SSH public key that I can then use for logging into the instance once it's running. Of course, that means I'll first need to generate a key pair for this purpose:
|
||||||
```sh
|
```shell
|
||||||
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519_wireguard
|
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519_wireguard # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Okay, now that I've got my keys, I can click the **Add Item** button and paste in the contents of `~/.ssh/id_ed25519_wireguard.pub`.
|
Okay, now that I've got my keys, I can click the **Add Item** button and paste in the contents of `~/.ssh/id_ed25519_wireguard.pub`.
|
||||||
|
@ -90,61 +90,64 @@ I'll click **Create** and move on.
|
||||||
|
|
||||||
#### WireGuard Server Setup
|
#### WireGuard Server Setup
|
||||||
Once the **Compute Engine > Instances** [page](https://console.cloud.google.com/compute/instances) indicates that the instance is ready, I can make a note of the listed public IP and then log in via SSH:
|
Once the **Compute Engine > Instances** [page](https://console.cloud.google.com/compute/instances) indicates that the instance is ready, I can make a note of the listed public IP and then log in via SSH:
|
||||||
```sh
|
```shell
|
||||||
ssh -i ~/.ssh/id_25519_wireguard {PUBLIC_IP}
|
ssh -i ~/.ssh/id_25519_wireguard {PUBLIC_IP} # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Preparation
|
##### Preparation
|
||||||
And, as always, I'll first make sure the OS is fully updated before doing anything else:
|
And, as always, I'll first make sure the OS is fully updated before doing anything else:
|
||||||
```sh
|
```shell
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
sudo apt upgrade
|
sudo apt upgrade
|
||||||
```
|
```
|
||||||
|
|
||||||
Then I'll install `ufw` to easily manage the host firewall, `qrencode` to make it easier to generate configs for mobile clients, `openresolv` to avoid [this issue](https://superuser.com/questions/1500691/usr-bin-wg-quick-line-31-resolvconf-command-not-found-wireguard-debian/1500896), and `wireguard` to, um, guard the wires:
|
Then I'll install `ufw` to easily manage the host firewall, `qrencode` to make it easier to generate configs for mobile clients, `openresolv` to avoid [this issue](https://superuser.com/questions/1500691/usr-bin-wg-quick-line-31-resolvconf-command-not-found-wireguard-debian/1500896), and `wireguard` to, um, guard the wires:
|
||||||
```sh
|
```shell
|
||||||
sudo apt install ufw qrencode openresolv wireguard
|
sudo apt install ufw qrencode openresolv wireguard # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Configuring the host firewall with `ufw` is very straight forward:
|
Configuring the host firewall with `ufw` is very straight forward:
|
||||||
```sh
|
```shell
|
||||||
# First, SSH:
|
# First, SSH: # [tl! .nocopy]
|
||||||
sudo ufw allow 22/tcp
|
sudo ufw allow 22/tcp # [tl! .cmd]
|
||||||
# and WireGuard:
|
# and WireGuard: # [tl! .nocopy]
|
||||||
sudo ufw allow 51820/udp
|
sudo ufw allow 51820/udp # [tl! .cmd]
|
||||||
# Then turn it on:
|
# Then turn it on: # [tl! .nocopy]
|
||||||
sudo ufw enable
|
sudo ufw enable # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
The last preparatory step is to enable packet forwarding in the kernel so that the instance will be able to route traffic between the remote clients and my home network (once I get to that point). I can configure that on-the-fly with:
|
The last preparatory step is to enable packet forwarding in the kernel so that the instance will be able to route traffic between the remote clients and my home network (once I get to that point). I can configure that on-the-fly with:
|
||||||
```sh
|
```shell
|
||||||
sudo sysctl -w net.ipv4.ip_forward=1
|
sudo sysctl -w net.ipv4.ip_forward=1 # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
To make it permanent, I'll edit `/etc/sysctl.conf` and uncomment the same line:
|
To make it permanent, I'll edit `/etc/sysctl.conf` and uncomment the same line:
|
||||||
```sh
|
```shell
|
||||||
$ sudo vi /etc/sysctl.conf
|
sudo vi /etc/sysctl.conf # [tl! .cmd]
|
||||||
|
```
|
||||||
|
```ini
|
||||||
# Uncomment the next line to enable packet forwarding for IPv4
|
# Uncomment the next line to enable packet forwarding for IPv4
|
||||||
net.ipv4.ip_forward=1
|
net.ipv4.ip_forward=1
|
||||||
```
|
```
|
||||||
|
|
||||||
##### WireGuard Interface Config
|
##### WireGuard Interface Config
|
||||||
I'll switch to the root user, move into the `/etc/wireguard` directory, and issue `umask 077` so that the files I'm about to create will have a very limited permission set (to be accessible by root, and _only_ root):
|
I'll switch to the root user, move into the `/etc/wireguard` directory, and issue `umask 077` so that the files I'm about to create will have a very limited permission set (to be accessible by root, and _only_ root):
|
||||||
```sh
|
```shell
|
||||||
sudo -i
|
sudo -i # [tl! .cmd]
|
||||||
cd /etc/wireguard
|
cd /etc/wireguard # [tl! .cmd_root:1]
|
||||||
umask 077
|
umask 077
|
||||||
```
|
```
|
||||||
|
|
||||||
Then I can use the `wg genkey` command to generate the server's private key, save it to a file called `server.key`, pass it through `wg pubkey` to generate the corresponding public key, and save that to `server.pub`:
|
Then I can use the `wg genkey` command to generate the server's private key, save it to a file called `server.key`, pass it through `wg pubkey` to generate the corresponding public key, and save that to `server.pub`:
|
||||||
```sh
|
```shell
|
||||||
wg genkey | tee server.key | wg pubkey > server.pub
|
wg genkey | tee server.key | wg pubkey > server.pub # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
As I mentioned earlier, WireGuard will create a virtual network interface using an internal network to pass traffic between the WireGuard peers. By convention, that interface is `wg0` and it draws its configuration from a file in `/etc/wireguard` named `wg0.conf`. I could create a configuration file with a different name and thus wind up with a different interface name as well, but I'll stick with tradition to keep things easy to follow.
|
As I mentioned earlier, WireGuard will create a virtual network interface using an internal network to pass traffic between the WireGuard peers. By convention, that interface is `wg0` and it draws its configuration from a file in `/etc/wireguard` named `wg0.conf`. I could create a configuration file with a different name and thus wind up with a different interface name as well, but I'll stick with tradition to keep things easy to follow.
|
||||||
|
|
||||||
The format of the interface configuration file will need to look something like this:
|
The format of the interface configuration file will need to look something like this:
|
||||||
```
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
[Interface] # this section defines the local WireGuard interface
|
[Interface] # this section defines the local WireGuard interface
|
||||||
Address = # CIDR-format IP address of the virtual WireGuard interface
|
Address = # CIDR-format IP address of the virtual WireGuard interface
|
||||||
ListenPort = # WireGuard listens on this port for incoming traffic (randomized if not specified)
|
ListenPort = # WireGuard listens on this port for incoming traffic (randomized if not specified)
|
||||||
|
@ -162,7 +165,8 @@ AllowedIPs = # which IPs will be routed to this peer
|
||||||
There will be a single `[Interface]` section in each peer's configuration file, but they may include multiple `[Peer]` sections. For my config, I'll use the `10.200.200.0/24` network for WireGuard, and let this server be `10.200.200.1`, the VyOS router in my home lab `10.200.200.2`, and I'll assign IPs to the other peers from there. I found a note that Google Cloud uses an MTU size of `1460` bytes so that's what I'll set on this end. I'm going to configure WireGuard to use the VyOS router as the DNS server, and I'll specify my internal `lab.bowdre.net` search domain. Finally, I'll leverage the `PostUp` and `PostDown` directives to enable and disable NAT so that the server will be able to forward traffic between networks for me.
|
There will be a single `[Interface]` section in each peer's configuration file, but they may include multiple `[Peer]` sections. For my config, I'll use the `10.200.200.0/24` network for WireGuard, and let this server be `10.200.200.1`, the VyOS router in my home lab `10.200.200.2`, and I'll assign IPs to the other peers from there. I found a note that Google Cloud uses an MTU size of `1460` bytes so that's what I'll set on this end. I'm going to configure WireGuard to use the VyOS router as the DNS server, and I'll specify my internal `lab.bowdre.net` search domain. Finally, I'll leverage the `PostUp` and `PostDown` directives to enable and disable NAT so that the server will be able to forward traffic between networks for me.
|
||||||
|
|
||||||
So here's the start of my GCP WireGuard server's `/etc/wireguard/wg0.conf`:
|
So here's the start of my GCP WireGuard server's `/etc/wireguard/wg0.conf`:
|
||||||
```sh
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# /etc/wireguard/wg0.conf
|
# /etc/wireguard/wg0.conf
|
||||||
[Interface]
|
[Interface]
|
||||||
Address = 10.200.200.1/24
|
Address = 10.200.200.1/24
|
||||||
|
@ -175,22 +179,25 @@ PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING
|
||||||
```
|
```
|
||||||
|
|
||||||
I don't have any other peers ready to add to this config yet, but I can go ahead and bring up the interface all the same. I'm going to use the `wg-quick` wrapper instead of calling `wg` directly since it simplifies a bit of the configuration, but first I'll need to enable the `wg-quick@{INTERFACE}` service so that it will run automatically at startup:
|
I don't have any other peers ready to add to this config yet, but I can go ahead and bring up the interface all the same. I'm going to use the `wg-quick` wrapper instead of calling `wg` directly since it simplifies a bit of the configuration, but first I'll need to enable the `wg-quick@{INTERFACE}` service so that it will run automatically at startup:
|
||||||
```sh
|
```shell
|
||||||
systemctl enable wg-quick@wg0
|
systemctl enable wg-quick@wg0 # [tl! .cmd_root:1]
|
||||||
systemctl start wg-quick@wg0
|
systemctl start wg-quick@wg0
|
||||||
```
|
```
|
||||||
|
|
||||||
I can now bring up the interface with `wg-quick up wg0` and check the status with `wg show`:
|
I can now bring up the interface with `wg-quick up wg0` and check the status with `wg show`:
|
||||||
```
|
```shell
|
||||||
root@wireguard:~# wg-quick up wg0
|
wg-quick up wg0 # [tl! .cmd_root]
|
||||||
[#] ip link add wg0 type wireguard
|
[#] ip link add wg0 type wireguard # [tl! .nocopy:start]
|
||||||
[#] wg setconf wg0 /dev/fd/63
|
[#] wg setconf wg0 /dev/fd/63
|
||||||
[#] ip -4 address add 10.200.200.1/24 dev wg0
|
[#] ip -4 address add 10.200.200.1/24 dev wg0
|
||||||
[#] ip link set mtu 1460 up dev wg0
|
[#] ip link set mtu 1460 up dev wg0
|
||||||
[#] resolvconf -a wg0 -m 0 -x
|
[#] resolvconf -a wg0 -m 0 -x
|
||||||
[#] iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens4 -j MASQUERADE
|
[#] iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens4 -j MASQUERADE # [tl! .nocopy:end]
|
||||||
root@wireguard:~# wg show
|
```
|
||||||
interface: wg0
|
|
||||||
|
```shell
|
||||||
|
wg show # [tl! .cmd_root]
|
||||||
|
interface: wg0 # [tl! .nocopy:3]
|
||||||
public key: {GCP_PUBLIC_IP}
|
public key: {GCP_PUBLIC_IP}
|
||||||
private key: (hidden)
|
private key: (hidden)
|
||||||
listening port: 51820
|
listening port: 51820
|
||||||
|
@ -200,45 +207,45 @@ I'll come back here once I've got a peer config to add.
|
||||||
|
|
||||||
### Configure VyoS Router as WireGuard Peer
|
### Configure VyoS Router as WireGuard Peer
|
||||||
Comparatively, configuring WireGuard on VyOS is a bit more direct. I'll start by entering configuration mode and generating and binding a key pair for this interface:
|
Comparatively, configuring WireGuard on VyOS is a bit more direct. I'll start by entering configuration mode and generating and binding a key pair for this interface:
|
||||||
```sh
|
```shell
|
||||||
configure
|
configure # [tl! .cmd_root:1]
|
||||||
run generate pki wireguard key-pair install interface wg0
|
run generate pki wireguard key-pair install interface wg0
|
||||||
```
|
```
|
||||||
|
|
||||||
And then I'll configure the rest of the options needed for the interface:
|
And then I'll configure the rest of the options needed for the interface:
|
||||||
```sh
|
```shell
|
||||||
set interfaces wireguard wg0 address '10.200.200.2/24'
|
set interfaces wireguard wg0 address '10.200.200.2/24' # [tl! .cmd_root:start]
|
||||||
set interfaces wireguard wg0 description 'VPN to GCP'
|
set interfaces wireguard wg0 description 'VPN to GCP'
|
||||||
set interfaces wireguard wg0 peer wireguard-gcp address '{GCP_PUBLIC_IP}'
|
set interfaces wireguard wg0 peer wireguard-gcp address '{GCP_PUBLIC_IP}'
|
||||||
set interfaces wireguard wg0 peer wireguard-gcp allowed-ips '0.0.0.0/0'
|
set interfaces wireguard wg0 peer wireguard-gcp allowed-ips '0.0.0.0/0'
|
||||||
set interfaces wireguard wg0 peer wireguard-gcp persistent-keepalive '25'
|
set interfaces wireguard wg0 peer wireguard-gcp persistent-keepalive '25'
|
||||||
set interfaces wireguard wg0 peer wireguard-gcp port '51820'
|
set interfaces wireguard wg0 peer wireguard-gcp port '51820'
|
||||||
set interfaces wireguard wg0 peer wireguard-gcp public-key '{GCP_PUBLIC_KEY}'
|
set interfaces wireguard wg0 peer wireguard-gcp public-key '{GCP_PUBLIC_KEY}' # [tl! .cmd_root:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that this time I'm allowing all IPs (`0.0.0.0/0`) so that this WireGuard interface will pass traffic intended for any destination (whether it's local, remote, or on the Internet). And I'm specifying a [25-second `persistent-keepalive` interval](https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence) to help ensure that this NAT-ed tunnel stays up even when it's not actively passing traffic - after all, I'll need the GCP-hosted peer to be able to initiate the connection so I can access the home network remotely.
|
Note that this time I'm allowing all IPs (`0.0.0.0/0`) so that this WireGuard interface will pass traffic intended for any destination (whether it's local, remote, or on the Internet). And I'm specifying a [25-second `persistent-keepalive` interval](https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence) to help ensure that this NAT-ed tunnel stays up even when it's not actively passing traffic - after all, I'll need the GCP-hosted peer to be able to initiate the connection so I can access the home network remotely.
|
||||||
|
|
||||||
While I'm at it, I'll also add a static route to ensure traffic for the WireGuard tunnel finds the right interface:
|
While I'm at it, I'll also add a static route to ensure traffic for the WireGuard tunnel finds the right interface:
|
||||||
```sh
|
```shell
|
||||||
set protocols static route 10.200.200.0/24 interface wg0
|
set protocols static route 10.200.200.0/24 interface wg0 # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
And I'll add the new `wg0` interface as a listening address for the VyOS DNS forwarder:
|
And I'll add the new `wg0` interface as a listening address for the VyOS DNS forwarder:
|
||||||
```sh
|
```shell
|
||||||
set service dns forwarding listen-address '10.200.200.2'
|
set service dns forwarding listen-address '10.200.200.2' # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
I can use the `compare` command to verify the changes I've made, and then apply and save the updated config:
|
I can use the `compare` command to verify the changes I've made, and then apply and save the updated config:
|
||||||
```sh
|
```shell
|
||||||
compare
|
compare # [tl! .cmd_root:2]
|
||||||
commit
|
commit
|
||||||
save
|
save
|
||||||
```
|
```
|
||||||
|
|
||||||
I can check the status of WireGuard on VyOS (and view the public key!) like so:
|
I can check the status of WireGuard on VyOS (and view the public key!) like so:
|
||||||
```sh
|
```shell
|
||||||
$ show interfaces wireguard wg0 summary
|
show interfaces wireguard wg0 summary # [tl! .cmd_root]
|
||||||
interface: wg0
|
interface: wg0 # [tl! .nocopy:start]
|
||||||
public key: {VYOS_PUBLIC_KEY}
|
public key: {VYOS_PUBLIC_KEY}
|
||||||
private key: (hidden)
|
private key: (hidden)
|
||||||
listening port: 43543
|
listening port: 43543
|
||||||
|
@ -247,13 +254,13 @@ peer: {GCP_PUBLIC_KEY}
|
||||||
endpoint: {GCP_PUBLIC_IP}:51820
|
endpoint: {GCP_PUBLIC_IP}:51820
|
||||||
allowed ips: 0.0.0.0/0
|
allowed ips: 0.0.0.0/0
|
||||||
transfer: 0 B received, 592 B sent
|
transfer: 0 B received, 592 B sent
|
||||||
persistent keepalive: every 25 seconds
|
persistent keepalive: every 25 seconds # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
See? That part was much easier to set up! But it doesn't look like it's actually passing traffic yet... because while the VyOS peer has been configured with the GCP peer's public key, the GCP peer doesn't know anything about the VyOS peer yet.
|
See? That part was much easier to set up! But it doesn't look like it's actually passing traffic yet... because while the VyOS peer has been configured with the GCP peer's public key, the GCP peer doesn't know anything about the VyOS peer yet.
|
||||||
|
|
||||||
So I'll copy `{VYOS_PUBLIC_KEY}` and SSH back to the GCP instance to finish that configuration. Once I'm there, I can edit `/etc/wireguard/wg0.conf` as root and add in a new `[Peer]` section at the bottom, like this:
|
So I'll copy `{VYOS_PUBLIC_KEY}` and SSH back to the GCP instance to finish that configuration. Once I'm there, I can edit `/etc/wireguard/wg0.conf` as root and add in a new `[Peer]` section at the bottom, like this:
|
||||||
```
|
```ini
|
||||||
[Peer]
|
[Peer]
|
||||||
# VyOS
|
# VyOS
|
||||||
PublicKey = {VYOS_PUBLIC_KEY}
|
PublicKey = {VYOS_PUBLIC_KEY}
|
||||||
|
@ -263,17 +270,17 @@ AllowedIPs = 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16
|
||||||
This time, I'm telling WireGuard that the new peer has IP `10.200.200.2` but that it should also get traffic destined for the `192.168.1.0/24` and `172.16.0.0/16` networks, my home and lab networks. Again, the `AllowedIPs` parameter is used for WireGuard's Cryptokey Routing so that it can keep track of which traffic goes to which peers (and which key to use for encryption).
|
This time, I'm telling WireGuard that the new peer has IP `10.200.200.2` but that it should also get traffic destined for the `192.168.1.0/24` and `172.16.0.0/16` networks, my home and lab networks. Again, the `AllowedIPs` parameter is used for WireGuard's Cryptokey Routing so that it can keep track of which traffic goes to which peers (and which key to use for encryption).
|
||||||
|
|
||||||
After saving the file, I can either restart WireGuard by bringing the interface down and back up (`wg-quick down wg0 && wg-quick up wg0`), or I can reload it on the fly with:
|
After saving the file, I can either restart WireGuard by bringing the interface down and back up (`wg-quick down wg0 && wg-quick up wg0`), or I can reload it on the fly with:
|
||||||
```sh
|
```shell
|
||||||
sudo -i
|
sudo -i # [tl! .cmd]
|
||||||
wg syncconf wg0 <(wg-quick strip wg0)
|
wg syncconf wg0 <(wg-quick strip wg0) # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
(I can't just use `wg syncconf wg0` directly since `/etc/wireguard/wg0.conf` includes the `PostUp`/`PostDown` commands which can only be parsed by the `wg-quick` wrapper, so I'm using `wg-quick strip {INTERFACE}` to grab the contents of the config file, remove the problematic bits, and then pass what's left to the `wg syncconf {INTERFACE}` command to update the current running config.)
|
(I can't just use `wg syncconf wg0` directly since `/etc/wireguard/wg0.conf` includes the `PostUp`/`PostDown` commands which can only be parsed by the `wg-quick` wrapper, so I'm using `wg-quick strip {INTERFACE}` to grab the contents of the config file, remove the problematic bits, and then pass what's left to the `wg syncconf {INTERFACE}` command to update the current running config.)
|
||||||
|
|
||||||
Now I can check the status of WireGuard on the GCP end:
|
Now I can check the status of WireGuard on the GCP end:
|
||||||
```sh
|
```shell
|
||||||
root@wireguard:~# wg show
|
wg show # [tl! .cmd_root]
|
||||||
interface: wg0
|
interface: wg0 # [tl! .nocopy:start]
|
||||||
public key: {GCP_PUBLIC_KEY}
|
public key: {GCP_PUBLIC_KEY}
|
||||||
private key: (hidden)
|
private key: (hidden)
|
||||||
listening port: 51820
|
listening port: 51820
|
||||||
|
@ -282,26 +289,28 @@ peer: {VYOS_PUBLIC_KEY}
|
||||||
endpoint: {VYOS_PUBLIC_IP}:43990
|
endpoint: {VYOS_PUBLIC_IP}:43990
|
||||||
allowed ips: 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16
|
allowed ips: 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16
|
||||||
latest handshake: 55 seconds ago
|
latest handshake: 55 seconds ago
|
||||||
transfer: 1.23 KiB received, 368 B sent
|
transfer: 1.23 KiB received, 368 B sent # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Hey, we're passing traffic now! And I can verify that I can ping stuff on my home and lab networks from the GCP instance:
|
Hey, we're passing traffic now! And I can verify that I can ping stuff on my home and lab networks from the GCP instance:
|
||||||
```sh
|
```shell
|
||||||
john@wireguard:~$ ping -c 1 192.168.1.5
|
ping -c 1 192.168.1.5 # [tl! .cmd]
|
||||||
PING 192.168.1.5 (192.168.1.5) 56(84) bytes of data.
|
PING 192.168.1.5 (192.168.1.5) 56(84) bytes of data. # [tl! .nocopy:start]
|
||||||
64 bytes from 192.168.1.5: icmp_seq=1 ttl=127 time=35.6 ms
|
64 bytes from 192.168.1.5: icmp_seq=1 ttl=127 time=35.6 ms
|
||||||
|
|
||||||
--- 192.168.1.5 ping statistics ---
|
--- 192.168.1.5 ping statistics ---
|
||||||
1 packets transmitted, 1 received, 0% packet loss, time 0ms
|
1 packets transmitted, 1 received, 0% packet loss, time 0ms
|
||||||
rtt min/avg/max/mdev = 35.598/35.598/35.598/0.000 ms
|
rtt min/avg/max/mdev = 35.598/35.598/35.598/0.000 ms # [tl! .nocopy:end]
|
||||||
|
```
|
||||||
|
|
||||||
john@wireguard:~$ ping -c 1 172.16.10.1
|
```shell
|
||||||
PING 172.16.10.1 (172.16.10.1) 56(84) bytes of data.
|
ping -c 1 172.16.10.1 # [tl! .cmd]
|
||||||
|
PING 172.16.10.1 (172.16.10.1) 56(84) bytes of data. # [tl! .nocopy:start]
|
||||||
64 bytes from 172.16.10.1: icmp_seq=1 ttl=64 time=35.3 ms
|
64 bytes from 172.16.10.1: icmp_seq=1 ttl=64 time=35.3 ms
|
||||||
|
|
||||||
--- 172.16.10.1 ping statistics ---
|
--- 172.16.10.1 ping statistics ---
|
||||||
1 packets transmitted, 1 received, 0% packet loss, time 0ms
|
1 packets transmitted, 1 received, 0% packet loss, time 0ms
|
||||||
rtt min/avg/max/mdev = 35.275/35.275/35.275/0.000 ms
|
rtt min/avg/max/mdev = 35.275/35.275/35.275/0.000 ms # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Cool!
|
Cool!
|
||||||
|
@ -340,14 +349,14 @@ I _shouldn't_ need the keepalive for the "Road Warrior" peers connecting to the
|
||||||
|
|
||||||
Now I can go ahead and save this configuration, but before I try (and fail) to connect I first need to tell the cloud-hosted peer about the Chromebook. So I fire up an SSH session to my GCP instance, become root, and edit the WireGuard configuration to add a new `[Peer]` section.
|
Now I can go ahead and save this configuration, but before I try (and fail) to connect I first need to tell the cloud-hosted peer about the Chromebook. So I fire up an SSH session to my GCP instance, become root, and edit the WireGuard configuration to add a new `[Peer]` section.
|
||||||
|
|
||||||
```sh
|
```shell
|
||||||
sudo -i
|
sudo -i # [tl! .cmd]
|
||||||
vi /etc/wireguard/wg0.conf
|
vi /etc/wireguard/wg0.conf # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
Here's the new section that I'll add to the bottom of the config:
|
Here's the new section that I'll add to the bottom of the config:
|
||||||
|
|
||||||
```sh
|
```ini
|
||||||
[Peer]
|
[Peer]
|
||||||
# Chromebook
|
# Chromebook
|
||||||
PublicKey = {CB_PUBLIC_KEY}
|
PublicKey = {CB_PUBLIC_KEY}
|
||||||
|
@ -357,7 +366,8 @@ AllowedIPs = 10.200.200.3/32
|
||||||
This one is acting as a single-node endpoint (rather than an entryway into other networks like the VyOS peer) so setting `AllowedIPs` to only the peer's IP makes sure that WireGuard will only send it traffic specifically intended for this peer.
|
This one is acting as a single-node endpoint (rather than an entryway into other networks like the VyOS peer) so setting `AllowedIPs` to only the peer's IP makes sure that WireGuard will only send it traffic specifically intended for this peer.
|
||||||
|
|
||||||
So my complete `/etc/wireguard/wg0.conf` looks like this so far:
|
So my complete `/etc/wireguard/wg0.conf` looks like this so far:
|
||||||
```sh
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# /etc/wireguard/wg0.conf
|
# /etc/wireguard/wg0.conf
|
||||||
[Interface]
|
[Interface]
|
||||||
Address = 10.200.200.1/24
|
Address = 10.200.200.1/24
|
||||||
|
@ -368,7 +378,7 @@ DNS = 10.200.200.2, lab.bowdre.net
|
||||||
PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens4 -j MASQUERADE
|
PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens4 -j MASQUERADE
|
||||||
PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING -o ens4 -j MASQUERADE; ip6tables -D FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -D POSTROUTING -o ens4 -j MASQUERADE
|
PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING -o ens4 -j MASQUERADE; ip6tables -D FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -D POSTROUTING -o ens4 -j MASQUERADE
|
||||||
|
|
||||||
[Peer]
|
[Peer] # [tl! focus:start]
|
||||||
# VyOS
|
# VyOS
|
||||||
PublicKey = {VYOS_PUBLIC_KEY}
|
PublicKey = {VYOS_PUBLIC_KEY}
|
||||||
AllowedIPs = 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16
|
AllowedIPs = 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16
|
||||||
|
@ -376,19 +386,19 @@ AllowedIPs = 10.200.200.2/32, 192.168.1.0/24, 172.16.0.0/16
|
||||||
[Peer]
|
[Peer]
|
||||||
# Chromebook
|
# Chromebook
|
||||||
PublicKey = {CB_PUBLIC_KEY}
|
PublicKey = {CB_PUBLIC_KEY}
|
||||||
AllowedIPs = 10.200.200.3/32
|
AllowedIPs = 10.200.200.3/32 # [tl! focus:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Now to save the file and reload the WireGuard configuration again:
|
Now to save the file and reload the WireGuard configuration again:
|
||||||
```sh
|
```shell
|
||||||
wg syncconf wg0 <(wg-quick strip wg0)
|
wg syncconf wg0 <(wg-quick strip wg0) # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
At this point I can activate the connection in the WireGuard Android app, wait a few seconds, and check with `wg show` to confirm that the tunnel has been established successfully:
|
At this point I can activate the connection in the WireGuard Android app, wait a few seconds, and check with `wg show` to confirm that the tunnel has been established successfully:
|
||||||
|
|
||||||
```sh
|
```shell
|
||||||
root@wireguard:~# wg show
|
wg show # [tl! .cmd_root]
|
||||||
interface: wg0
|
interface: wg0 # [tl! .nocopy:start]
|
||||||
public key: {GCP_PUBLIC_KEY}
|
public key: {GCP_PUBLIC_KEY}
|
||||||
private key: (hidden)
|
private key: (hidden)
|
||||||
listening port: 51820
|
listening port: 51820
|
||||||
|
@ -403,7 +413,7 @@ peer: {CB_PUBLIC_KEY}
|
||||||
endpoint: {CB_PUBLIC_IP}:33752
|
endpoint: {CB_PUBLIC_IP}:33752
|
||||||
allowed ips: 10.200.200.3/32
|
allowed ips: 10.200.200.3/32
|
||||||
latest handshake: 48 seconds ago
|
latest handshake: 48 seconds ago
|
||||||
transfer: 169.17 KiB received, 808.33 KiB sent
|
transfer: 169.17 KiB received, 808.33 KiB sent # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
And I can even access my homelab when not at home!
|
And I can even access my homelab when not at home!
|
||||||
|
@ -413,20 +423,21 @@ And I can even access my homelab when not at home!
|
||||||
Being able to copy-and-paste the required public keys between the WireGuard app and the SSH session to the GCP instance made it relatively easy to set up the Chromebook, but things could be a bit trickier on a phone without that kind of access. So instead I will create the phone's configuration on the WireGuard server in the cloud, render that config file as a QR code, and simply scan that through the phone's WireGuard app to import the settings.
|
Being able to copy-and-paste the required public keys between the WireGuard app and the SSH session to the GCP instance made it relatively easy to set up the Chromebook, but things could be a bit trickier on a phone without that kind of access. So instead I will create the phone's configuration on the WireGuard server in the cloud, render that config file as a QR code, and simply scan that through the phone's WireGuard app to import the settings.
|
||||||
|
|
||||||
I'll start by SSHing to the GCP instance, elevating to root, setting the restrictive `umask` again, and creating a new folder to store client configurations.
|
I'll start by SSHing to the GCP instance, elevating to root, setting the restrictive `umask` again, and creating a new folder to store client configurations.
|
||||||
```sh
|
```shell
|
||||||
sudo -i
|
sudo -i # [tl! .cmd]
|
||||||
umask 077
|
umask 077 # [tl! .cmd_root:2]
|
||||||
mkdir /etc/wireguard/clients
|
mkdir /etc/wireguard/clients
|
||||||
cd /etc/wireguard/clients
|
cd /etc/wireguard/clients
|
||||||
```
|
```
|
||||||
|
|
||||||
As before, I'll use the built-in `wg` commands to generate the private and public key pair:
|
As before, I'll use the built-in `wg` commands to generate the private and public key pair:
|
||||||
```sh
|
```shell
|
||||||
wg genkey | tee phone1.key | wg pubkey > phone1.pub
|
wg genkey | tee phone1.key | wg pubkey > phone1.pub # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
I can then use those keys to assemble the config for the phone:
|
I can then use those keys to assemble the config for the phone:
|
||||||
```sh
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# /etc/wireguard/clients/phone1.conf
|
# /etc/wireguard/clients/phone1.conf
|
||||||
[Interface]
|
[Interface]
|
||||||
PrivateKey = {PHONE1_PRIVATE_KEY}
|
PrivateKey = {PHONE1_PRIVATE_KEY}
|
||||||
|
@ -440,20 +451,20 @@ Endpoint = {GCP_PUBLIC_IP}:51820
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll also add the interface address and corresponding public key to a new `[Peer]` section of `/etc/wireguard/wg0.conf`:
|
I'll also add the interface address and corresponding public key to a new `[Peer]` section of `/etc/wireguard/wg0.conf`:
|
||||||
```sh
|
```ini
|
||||||
[Peer]
|
[Peer]
|
||||||
PublicKey = {PHONE1_PUBLIC_KEY}
|
PublicKey = {PHONE1_PUBLIC_KEY}
|
||||||
AllowedIPs = 10.200.200.4/32
|
AllowedIPs = 10.200.200.4/32
|
||||||
```
|
```
|
||||||
|
|
||||||
And reload the WireGuard config:
|
And reload the WireGuard config:
|
||||||
```sh
|
```shell
|
||||||
wg syncconf wg0 <(wg-quick strip wg0)
|
wg syncconf wg0 <(wg-quick strip wg0) # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
Back in the `clients/` directory, I can use `qrencode` to render the phone configuration file (keys and all!) as a QR code:
|
Back in the `clients/` directory, I can use `qrencode` to render the phone configuration file (keys and all!) as a QR code:
|
||||||
```sh
|
```shell
|
||||||
qrencode -t ansiutf8 < phone1.conf
|
qrencode -t ansiutf8 < phone1.conf # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
![QR code config](20211028_qrcode_config.png)
|
![QR code config](20211028_qrcode_config.png)
|
||||||
|
|
||||||
|
@ -465,8 +476,8 @@ I can even access my vSphere lab environment - not that it offers a great mobile
|
||||||
|
|
||||||
Before moving on too much further, though, I'm going to clean up the keys and client config file that I generated on the GCP instance. It's not great hygiene to keep a private key stored on the same system it's used to access.
|
Before moving on too much further, though, I'm going to clean up the keys and client config file that I generated on the GCP instance. It's not great hygiene to keep a private key stored on the same system it's used to access.
|
||||||
|
|
||||||
```sh
|
```shell
|
||||||
rm -f /etc/wireguard/clients/*
|
rm -f /etc/wireguard/clients/* # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Bonus: Automation!
|
##### Bonus: Automation!
|
||||||
|
|
|
@ -32,53 +32,52 @@ It took a bit of fumbling, but this article describes what it took to get a Vagr
|
||||||
There are are a few packages which need to be installed before we can move on to the Vagrant-specific stuff. It's quite possible that these are already on your system.... but if they *aren't* already present you'll have a bad problem[^problem].
|
There are are a few packages which need to be installed before we can move on to the Vagrant-specific stuff. It's quite possible that these are already on your system.... but if they *aren't* already present you'll have a bad problem[^problem].
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo apt update
|
sudo apt update && sudo apt install \ # [tl! .cmd]
|
||||||
sudo apt install \
|
build-essential \
|
||||||
build-essential \
|
gpg \
|
||||||
gpg \
|
lsb-release \
|
||||||
lsb-release \
|
wget
|
||||||
wget
|
|
||||||
```
|
```
|
||||||
|
|
||||||
[^problem]: and [will not go to space today](https://xkcd.com/1133/).
|
[^problem]: and [will not go to space today](https://xkcd.com/1133/).
|
||||||
|
|
||||||
I'll be configuring Vagrant to use [`libvirt`](https://libvirt.org/) to interface with the [Kernel Virtual Machine (KVM)](https://www.linux-kvm.org/page/Main_Page) virtualization solution (rather than something like VirtualBox that would bring more overhead) so I'll need to install some packages for that as well:
|
I'll be configuring Vagrant to use [`libvirt`](https://libvirt.org/) to interface with the [Kernel Virtual Machine (KVM)](https://www.linux-kvm.org/page/Main_Page) virtualization solution (rather than something like VirtualBox that would bring more overhead) so I'll need to install some packages for that as well:
|
||||||
```shell
|
```shell
|
||||||
sudo apt install virt-manager libvirt-dev
|
sudo apt install virt-manager libvirt-dev # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And to avoid having to `sudo` each time I interact with `libvirt` I'll add myself to that group:
|
And to avoid having to `sudo` each time I interact with `libvirt` I'll add myself to that group:
|
||||||
```shell
|
```shell
|
||||||
sudo gpasswd -a $USER libvirt ; newgrp libvirt
|
sudo gpasswd -a $USER libvirt ; newgrp libvirt # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And to avoid [this issue](https://github.com/virt-manager/virt-manager/issues/333) I'll make a tweak to the `qemu.conf` file:
|
And to avoid [this issue](https://github.com/virt-manager/virt-manager/issues/333) I'll make a tweak to the `qemu.conf` file:
|
||||||
```shell
|
```shell
|
||||||
echo "remember_owner = 0" | sudo tee -a /etc/libvirt/qemu.conf
|
echo "remember_owner = 0" | sudo tee -a /etc/libvirt/qemu.conf # [tl! .cmd:1]
|
||||||
sudo systemctl restart libvirtd
|
sudo systemctl restart libvirtd
|
||||||
```
|
```
|
||||||
|
|
||||||
I'm also going to use `rsync` to share a [synced folder](https://developer.hashicorp.com/vagrant/docs/synced-folders/basic_usage) between the host and the VM guest so I'll need to make sure that's installed too:
|
I'm also going to use `rsync` to share a [synced folder](https://developer.hashicorp.com/vagrant/docs/synced-folders/basic_usage) between the host and the VM guest so I'll need to make sure that's installed too:
|
||||||
```shell
|
```shell
|
||||||
sudo apt install rsync
|
sudo apt install rsync # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install Vagrant
|
### Install Vagrant
|
||||||
With that out of the way, I'm ready to move on to the business of installing Vagrant. I'll start by adding the HashiCorp repository:
|
With that out of the way, I'm ready to move on to the business of installing Vagrant. I'll start by adding the HashiCorp repository:
|
||||||
```shell
|
```shell
|
||||||
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg # [tl! .cmd:1]
|
||||||
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll then install the Vagrant package:
|
I'll then install the Vagrant package:
|
||||||
```shell
|
```shell
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
sudo apt install vagrant
|
sudo apt install vagrant
|
||||||
```
|
```
|
||||||
|
|
||||||
I also need to install the [`vagrant-libvirt` plugin](https://github.com/vagrant-libvirt/vagrant-libvirt) so that Vagrant will know how to interact with `libvirt`:
|
I also need to install the [`vagrant-libvirt` plugin](https://github.com/vagrant-libvirt/vagrant-libvirt) so that Vagrant will know how to interact with `libvirt`:
|
||||||
```shell
|
```shell
|
||||||
vagrant plugin install vagrant-libvirt
|
vagrant plugin install vagrant-libvirt # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create a lightweight VM
|
### Create a lightweight VM
|
||||||
|
@ -88,17 +87,14 @@ Vagrant VMs are distributed as Boxes, and I can browse some published Boxes at [
|
||||||
|
|
||||||
So I'll create a new folder to contain the Vagrant configuration:
|
So I'll create a new folder to contain the Vagrant configuration:
|
||||||
```shell
|
```shell
|
||||||
mkdir vagrant-alpine
|
mkdir vagrant-alpine # [tl! .cmd:1]
|
||||||
cd vagrant-alpine
|
cd vagrant-alpine
|
||||||
```
|
```
|
||||||
|
|
||||||
And since I'm referencing a Vagrant Box which is published on Vagrant Cloud, downloading the config is as simple as:
|
And since I'm referencing a Vagrant Box which is published on Vagrant Cloud, downloading the config is as simple as:
|
||||||
```shell
|
```shell
|
||||||
vagrant init generic/alpine38
|
vagrant init generic/alpine38 # [tl! .cmd]
|
||||||
```
|
# [tl! .nocopy:4]
|
||||||
|
|
||||||
That lets me know that
|
|
||||||
```text
|
|
||||||
A `Vagrantfile` has been placed in this directory. You are now
|
A `Vagrantfile` has been placed in this directory. You are now
|
||||||
ready to `vagrant up` your first virtual environment! Please read
|
ready to `vagrant up` your first virtual environment! Please read
|
||||||
the comments in the Vagrantfile as well as documentation on
|
the comments in the Vagrantfile as well as documentation on
|
||||||
|
@ -107,7 +103,7 @@ the comments in the Vagrantfile as well as documentation on
|
||||||
|
|
||||||
Before I `vagrant up` the joint, I do need to make a quick tweak to the default Vagrantfile, which is what tells Vagrant how to configure the VM. By default, Vagrant will try to create a synced folder using NFS and will throw a nasty error when that (inevitably[^inevitable]) fails. So I'll open up the Vagrantfile to review and edit it:
|
Before I `vagrant up` the joint, I do need to make a quick tweak to the default Vagrantfile, which is what tells Vagrant how to configure the VM. By default, Vagrant will try to create a synced folder using NFS and will throw a nasty error when that (inevitably[^inevitable]) fails. So I'll open up the Vagrantfile to review and edit it:
|
||||||
```shell
|
```shell
|
||||||
vim Vagrantfile
|
vim Vagrantfile # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Most of the default Vagrantfile is commented out. Here's the entirey of the configuration *without* the comments:
|
Most of the default Vagrantfile is commented out. Here's the entirey of the configuration *without* the comments:
|
||||||
|
@ -119,8 +115,11 @@ end
|
||||||
|
|
||||||
There's not a lot there, is there? Well I'm just going to add these two lines somewhere between the `Vagrant.configure()` and `end` lines:
|
There's not a lot there, is there? Well I'm just going to add these two lines somewhere between the `Vagrant.configure()` and `end` lines:
|
||||||
```ruby
|
```ruby
|
||||||
config.nfs.verify_installed = false
|
Vagrant.configure("2") do |config|
|
||||||
|
config.vm.box = "generic/alpine38"
|
||||||
|
config.nfs.verify_installed = false # [tl! focus:1 highlight:1]
|
||||||
config.vm.synced_folder '.', '/vagrant', type: 'rsync'
|
config.vm.synced_folder '.', '/vagrant', type: 'rsync'
|
||||||
|
end
|
||||||
```
|
```
|
||||||
|
|
||||||
The first line tells Vagrant not to bother checking to see if NFS is installed, and will use `rsync` to share the local directory with the VM guest, where it will be mounted at `/vagrant`.
|
The first line tells Vagrant not to bother checking to see if NFS is installed, and will use `rsync` to share the local directory with the VM guest, where it will be mounted at `/vagrant`.
|
||||||
|
@ -136,8 +135,8 @@ end
|
||||||
|
|
||||||
With that, I'm ready to fire up this VM with `vagrant up`! Vagrant will look inside `Vagrantfile` to see the config, pull down the `generic/alpine38` Box from Vagrant Cloud, boot the VM, configure it so I can SSH in to it, and mount the synced folder:
|
With that, I'm ready to fire up this VM with `vagrant up`! Vagrant will look inside `Vagrantfile` to see the config, pull down the `generic/alpine38` Box from Vagrant Cloud, boot the VM, configure it so I can SSH in to it, and mount the synced folder:
|
||||||
```shell
|
```shell
|
||||||
; vagrant up
|
vagrant up # [tl! .cmd]
|
||||||
Bringing machine 'default' up with 'libvirt' provider...
|
Bringing machine 'default' up with 'libvirt' provider... # [tl! .nocopy:start]
|
||||||
==> default: Box 'generic/alpine38' could not be found. Attempting to find and install...
|
==> default: Box 'generic/alpine38' could not be found. Attempting to find and install...
|
||||||
default: Box Provider: libvirt
|
default: Box Provider: libvirt
|
||||||
default: Box Version: >= 0
|
default: Box Version: >= 0
|
||||||
|
@ -157,14 +156,14 @@ Bringing machine 'default' up with 'libvirt' provider...
|
||||||
[...]
|
[...]
|
||||||
default: Key inserted! Disconnecting and reconnecting using new SSH key...
|
default: Key inserted! Disconnecting and reconnecting using new SSH key...
|
||||||
==> default: Machine booted and ready!
|
==> default: Machine booted and ready!
|
||||||
==> default: Rsyncing folder: /home/john/projects/vagrant-alpine/ => /vagrant
|
==> default: Rsyncing folder: /home/john/projects/vagrant-alpine/ => /vagrant # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
And then I can use `vagrant ssh` to log in to the new VM:
|
And then I can use `vagrant ssh` to log in to the new VM:
|
||||||
```shell
|
```shell
|
||||||
; vagrant ssh
|
vagrant ssh # [tl! .cmd:1]
|
||||||
alpine38:~$ cat /etc/os-release
|
cat /etc/os-release
|
||||||
NAME="Alpine Linux"
|
NAME="Alpine Linux" # [tl! .nocopy:5]
|
||||||
ID=alpine
|
ID=alpine
|
||||||
VERSION_ID=3.8.5
|
VERSION_ID=3.8.5
|
||||||
PRETTY_NAME="Alpine Linux v3.8"
|
PRETTY_NAME="Alpine Linux v3.8"
|
||||||
|
@ -174,19 +173,19 @@ BUG_REPORT_URL="http://bugs.alpinelinux.org"
|
||||||
|
|
||||||
I can also verify that the synced folder came through as expected:
|
I can also verify that the synced folder came through as expected:
|
||||||
```shell
|
```shell
|
||||||
alpine38:~$ ls -l /vagrant
|
ls -l /vagrant # [tl! .cmd]
|
||||||
total 4
|
total 4 # [tl! .nocopy:1]
|
||||||
-rw-r--r-- 1 vagrant vagrant 3117 Feb 20 15:51 Vagrantfile
|
-rw-r--r-- 1 vagrant vagrant 3117 Feb 20 15:51 Vagrantfile
|
||||||
```
|
```
|
||||||
|
|
||||||
Once I'm finished poking at this VM, shutting it down is as easy as:
|
Once I'm finished poking at this VM, shutting it down is as easy as:
|
||||||
```shell
|
```shell
|
||||||
vagrant halt
|
vagrant halt # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And if I want to clean up and remove all traces of the VM, that's just:
|
And if I want to clean up and remove all traces of the VM, that's just:
|
||||||
```shell
|
```shell
|
||||||
vagrant destroy
|
vagrant destroy # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
[^inevitable]: NFS doesn't work properly from within an LXD container, like the ChromeOS Linux development environment.
|
[^inevitable]: NFS doesn't work properly from within an LXD container, like the ChromeOS Linux development environment.
|
||||||
|
@ -202,7 +201,7 @@ Windows 11 makes for a pretty hefty VM which will require significant storage sp
|
||||||
|
|
||||||
Again, I'll create a new folder to hold the Vagrant configuration and do a `vagrant init`:
|
Again, I'll create a new folder to hold the Vagrant configuration and do a `vagrant init`:
|
||||||
```shell
|
```shell
|
||||||
mkdir vagrant-win11
|
mkdir vagrant-win11 # [tl! .cmd:2]
|
||||||
cd vagrant-win11
|
cd vagrant-win11
|
||||||
vagrant init oopsme/windows11-22h2
|
vagrant init oopsme/windows11-22h2
|
||||||
```
|
```
|
||||||
|
@ -212,7 +211,7 @@ And, again, I'll edit the Vagrantfile before starting the VM. This time, though,
|
||||||
Vagrant.configure("2") do |config|
|
Vagrant.configure("2") do |config|
|
||||||
config.vm.box = "oopsme/windows11-22h2"
|
config.vm.box = "oopsme/windows11-22h2"
|
||||||
config.vm.provider :libvirt do |libvirt|
|
config.vm.provider :libvirt do |libvirt|
|
||||||
libvirt.cpus = 4
|
libvirt.cpus = 4 # [tl! highlight:1]
|
||||||
libvirt.memory = 4096
|
libvirt.memory = 4096
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -222,22 +221,22 @@ end
|
||||||
|
|
||||||
Now it's time to bring it up. This one's going to take A While as it syncs the ~12GB Box first.
|
Now it's time to bring it up. This one's going to take A While as it syncs the ~12GB Box first.
|
||||||
```shell
|
```shell
|
||||||
vagrant up
|
vagrant up # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Eventually it should spit out that lovely **Machine booted and ready!** message and I can log in! I *can* do a `vagrant ssh` again to gain a shell in the Windows environment, but I'll probably want to interact with those sweet sweet graphics. That takes a little bit more effort.
|
Eventually it should spit out that lovely **Machine booted and ready!** message and I can log in! I *can* do a `vagrant ssh` again to gain a shell in the Windows environment, but I'll probably want to interact with those sweet sweet graphics. That takes a little bit more effort.
|
||||||
|
|
||||||
First, I'll use `virsh -c qemu:///system list` to see the running VM(s):
|
First, I'll use `virsh -c qemu:///system list` to see the running VM(s):
|
||||||
```shell
|
```shell
|
||||||
; virsh -c qemu:///system list
|
virsh -c qemu:///system list # [tl! .cmd]
|
||||||
Id Name State
|
Id Name State # [tl! .nocopy:2]
|
||||||
---------------------------------------
|
---------------------------------------
|
||||||
10 vagrant-win11_default running
|
10 vagrant-win11_default running
|
||||||
```
|
```
|
||||||
|
|
||||||
Then I can tell `virt-viewer` that I'd like to attach a session there:
|
Then I can tell `virt-viewer` that I'd like to attach a session there:
|
||||||
```shell
|
```shell
|
||||||
virt-viewer -c qemu:///system -a vagrant-win11_default
|
virt-viewer -c qemu:///system -a vagrant-win11_default # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
I log in with the default password `vagrant`, and I'm in Windows 11 land!
|
I log in with the default password `vagrant`, and I'm in Windows 11 land!
|
||||||
|
|
|
@ -22,62 +22,64 @@ I eventually came across [this blog post](https://www.virtualnebula.com/blog/201
|
||||||
### Preparing the SSH host
|
### Preparing the SSH host
|
||||||
I deployed a Windows Server 2019 Core VM to use as my SSH host, and I joined it to my AD domain as `win02.lab.bowdre.net`. Once that's taken care of, I need to install the RSAT DNS tools so that I can use the `Add-DnsServerResourceRecord` and associated cmdlets. I can do that through PowerShell like so:
|
I deployed a Windows Server 2019 Core VM to use as my SSH host, and I joined it to my AD domain as `win02.lab.bowdre.net`. Once that's taken care of, I need to install the RSAT DNS tools so that I can use the `Add-DnsServerResourceRecord` and associated cmdlets. I can do that through PowerShell like so:
|
||||||
```powershell
|
```powershell
|
||||||
# Install RSAT DNS tools
|
# Install RSAT DNS tools [tl! .nocopy]
|
||||||
Add-WindowsCapability -online -name Rsat.Dns.Tools~~~~0.0.1.0
|
Add-WindowsCapability -online -name Rsat.Dns.Tools~~~~0.0.1.0 # [tl! .cmd_pwsh]
|
||||||
```
|
```
|
||||||
|
|
||||||
Instead of using a third-party SSH server, I'll use the OpenSSH Server that's already available in Windows 10 (1809+) and Server 2019:
|
Instead of using a third-party SSH server, I'll use the OpenSSH Server that's already available in Windows 10 (1809+) and Server 2019:
|
||||||
```powershell
|
```powershell
|
||||||
# Install OpenSSH Server
|
# Install OpenSSH Server [tl! .nocopy]
|
||||||
Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
|
Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0 # [tl! .cmd_pwsh]
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll also want to set it so that the default shell upon SSH login is PowerShell (rather than the standard Command Prompt) so that I can have easy access to those DNS cmdlets:
|
I'll also want to set it so that the default shell upon SSH login is PowerShell (rather than the standard Command Prompt) so that I can have easy access to those DNS cmdlets:
|
||||||
```powershell
|
```powershell
|
||||||
# Set PowerShell as the default Shell (for access to DNS cmdlets)
|
# Set PowerShell as the default Shell (for access to DNS cmdlets) # [tl! .nocopy]
|
||||||
New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" -Name DefaultShell -Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" -PropertyType String -Force
|
New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" -Name DefaultShell ` # [tl! .cmd_pwsh:2
|
||||||
|
-Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" `
|
||||||
|
-PropertyType String -Force
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll be using my `lab\vra` service account for managing DNS. I've already given it the appropriate rights on the DNS server, but I'll also add it to the Administrators group on my SSH host:
|
I'll be using my `lab\vra` service account for managing DNS. I've already given it the appropriate rights on the DNS server, but I'll also add it to the Administrators group on my SSH host:
|
||||||
```powershell
|
```powershell
|
||||||
# Add the service account as a local administrator
|
# Add the service account as a local administrator # [tl! .nocopy]
|
||||||
Add-LocalGroupMember -Group Administrators -Member "lab\vra"
|
Add-LocalGroupMember -Group Administrators -Member "lab\vra" # [tl! .cmd_pwsh]
|
||||||
```
|
```
|
||||||
|
|
||||||
And I'll modify the OpenSSH configuration so that only members of that Administrators group are permitted to log into the server via SSH:
|
And I'll modify the OpenSSH configuration so that only members of that Administrators group are permitted to log into the server via SSH:
|
||||||
```powershell
|
```powershell
|
||||||
# Restrict SSH access to members in the local Administrators group
|
# Restrict SSH access to members in the local Administrators group [tl! .nocopy]
|
||||||
(Get-Content "C:\ProgramData\ssh\sshd_config") -Replace "# Authentication:", "$&`nAllowGroups Administrators" | Set-Content "C:\ProgramData\ssh\sshd_config"
|
(Get-Content "C:\ProgramData\ssh\sshd_config") -Replace "# Authentication:", `
|
||||||
|
"$&`nAllowGroups Administrators" | Set-Content "C:\ProgramData\ssh\sshd_config" # [tl! .cmd_pwsh:-1]
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally, I'll start the `sshd` service and set it to start up automatically:
|
Finally, I'll start the `sshd` service and set it to start up automatically:
|
||||||
```powershell
|
```powershell
|
||||||
# Start service and set it to automatic
|
# Start service and set it to automatic [tl! .nocopy]
|
||||||
Set-Service -Name sshd -StartupType Automatic -Status Running
|
Set-Service -Name sshd -StartupType Automatic -Status Running # [tl! .cmd_pwsh]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### A quick test
|
#### A quick test
|
||||||
At this point, I can log in to the server via SSH and confirm that I can create and delete records in my DNS zone:
|
At this point, I can log in to the server via SSH and confirm that I can create and delete records in my DNS zone:
|
||||||
```powershell
|
```powershell
|
||||||
$ ssh vra@win02.lab.bowdre.net
|
ssh vra@win02.lab.bowdre.net # [tl! .cmd_pwsh]
|
||||||
vra@win02.lab.bowdre.net's password:
|
vra@win02.lab.bowdre.net`'s password: # [tl! .nocopy:3]
|
||||||
|
|
||||||
Windows PowerShell
|
Windows PowerShell
|
||||||
Copyright (C) Microsoft Corporation. All rights reserved.
|
Copyright (C) Microsoft Corporation. All rights reserved.
|
||||||
|
Add-DnsServerResourceRecordA -ComputerName win01.lab.bowdre.net `
|
||||||
PS C:\Users\vra> Add-DnsServerResourceRecordA -ComputerName win01.lab.bowdre.net -Name testy -ZoneName lab.bowdre.net -AllowUpdateAny -IPv4Address 172.16.99.99
|
-Name testy -ZoneName lab.bowdre.net -AllowUpdateAny -IPv4Address 172.16.99.99 # [tl! .cmd_pwsh:-1]
|
||||||
|
nslookup testy # [tl! .cmd_pwsh]
|
||||||
PS C:\Users\vra> nslookup testy
|
Server: win01.lab.bowdre.net # [tl! .nocopy:start]
|
||||||
Server: win01.lab.bowdre.net
|
|
||||||
Address: 192.168.1.5
|
Address: 192.168.1.5
|
||||||
|
|
||||||
Name: testy.lab.bowdre.net
|
Name: testy.lab.bowdre.net
|
||||||
Address: 172.16.99.99
|
Address: 172.16.99.99
|
||||||
|
# [tl! .nocopy:end]
|
||||||
PS C:\Users\vra> Remove-DnsServerResourceRecord -ComputerName win01.lab.bowdre.net -Name testy -ZoneName lab.bowdre.net -RRType A -Force
|
Remove-DnsServerResourceRecord -ComputerName win01.lab.bowdre.net `
|
||||||
|
-Name testy -ZoneName lab.bowdre.net -RRType A -Force # [tl! .cmd_pwsh:-1]
|
||||||
PS C:\Users\vra> nslookup testy
|
nslookup testy # [tl! .cmd_pwsh]
|
||||||
Server: win01.lab.bowdre.net
|
Server: win01.lab.bowdre.net # [tl! .nocopy:3]
|
||||||
Address: 192.168.1.5
|
Address: 192.168.1.5
|
||||||
|
|
||||||
*** win01.lab.bowdre.net can't find testy: Non-existent domain
|
*** win01.lab.bowdre.net can't find testy: Non-existent domain
|
||||||
|
@ -112,22 +114,23 @@ resources:
|
||||||
|
|
||||||
So here's the complete cloud template that I've been working on:
|
So here's the complete cloud template that I've been working on:
|
||||||
```yaml
|
```yaml
|
||||||
formatVersion: 1
|
# torchlight! {"lineNumbers": true}
|
||||||
|
formatVersion: 1 # [tl! focus:1]
|
||||||
inputs:
|
inputs:
|
||||||
site:
|
site: # [tl! collapse:5]
|
||||||
type: string
|
type: string
|
||||||
title: Site
|
title: Site
|
||||||
enum:
|
enum:
|
||||||
- BOW
|
- BOW
|
||||||
- DRE
|
- DRE
|
||||||
image:
|
image: # [tl! collapse:6]
|
||||||
type: string
|
type: string
|
||||||
title: Operating System
|
title: Operating System
|
||||||
oneOf:
|
oneOf:
|
||||||
- title: Windows Server 2019
|
- title: Windows Server 2019
|
||||||
const: ws2019
|
const: ws2019
|
||||||
default: ws2019
|
default: ws2019
|
||||||
size:
|
size: # [tl! collapse:10]
|
||||||
title: Resource Size
|
title: Resource Size
|
||||||
type: string
|
type: string
|
||||||
oneOf:
|
oneOf:
|
||||||
|
@ -138,18 +141,18 @@ inputs:
|
||||||
- title: 'Small [2vCPU|2GB]'
|
- title: 'Small [2vCPU|2GB]'
|
||||||
const: small
|
const: small
|
||||||
default: small
|
default: small
|
||||||
network:
|
network: # [tl! collapse:2]
|
||||||
title: Network
|
title: Network
|
||||||
type: string
|
type: string
|
||||||
adJoin:
|
adJoin: # [tl! collapse:3]
|
||||||
title: Join to AD domain
|
title: Join to AD domain
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
staticDns:
|
staticDns: # [tl! highlight:3 focus:3]
|
||||||
title: Create static DNS record
|
title: Create static DNS record
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
environment:
|
environment: # [tl! collapse:10]
|
||||||
type: string
|
type: string
|
||||||
title: Environment
|
title: Environment
|
||||||
oneOf:
|
oneOf:
|
||||||
|
@ -160,7 +163,7 @@ inputs:
|
||||||
- title: Production
|
- title: Production
|
||||||
const: P
|
const: P
|
||||||
default: D
|
default: D
|
||||||
function:
|
function: # [tl! collapse:14]
|
||||||
type: string
|
type: string
|
||||||
title: Function Code
|
title: Function Code
|
||||||
oneOf:
|
oneOf:
|
||||||
|
@ -175,34 +178,34 @@ inputs:
|
||||||
- title: Testing (TST)
|
- title: Testing (TST)
|
||||||
const: TST
|
const: TST
|
||||||
default: TST
|
default: TST
|
||||||
app:
|
app: # [tl! collapse:5]
|
||||||
type: string
|
type: string
|
||||||
title: Application Code
|
title: Application Code
|
||||||
minLength: 3
|
minLength: 3
|
||||||
maxLength: 3
|
maxLength: 3
|
||||||
default: xxx
|
default: xxx
|
||||||
description:
|
description: # [tl! collapse:4]
|
||||||
type: string
|
type: string
|
||||||
title: Description
|
title: Description
|
||||||
description: Server function/purpose
|
description: Server function/purpose
|
||||||
default: Testing and evaluation
|
default: Testing and evaluation
|
||||||
poc_name:
|
poc_name: # [tl! collapse:3]
|
||||||
type: string
|
type: string
|
||||||
title: Point of Contact Name
|
title: Point of Contact Name
|
||||||
default: Jack Shephard
|
default: Jack Shephard
|
||||||
poc_email:
|
poc_email: # [tl! collapse:4]
|
||||||
type: string
|
type: string
|
||||||
title: Point of Contact Email
|
title: Point of Contact Email
|
||||||
default: jack.shephard@virtuallypotato.com
|
default: username@example.com
|
||||||
pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$'
|
pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$'
|
||||||
ticket:
|
ticket: # [tl! collapse:3]
|
||||||
type: string
|
type: string
|
||||||
title: Ticket/Request Number
|
title: Ticket/Request Number
|
||||||
default: 4815162342
|
default: 4815162342
|
||||||
resources:
|
resources: # [tl! focus:3]
|
||||||
Cloud_vSphere_Machine_1:
|
Cloud_vSphere_Machine_1:
|
||||||
type: Cloud.vSphere.Machine
|
type: Cloud.vSphere.Machine
|
||||||
properties:
|
properties: # [tl! collapse:start]
|
||||||
image: '${input.image}'
|
image: '${input.image}'
|
||||||
flavor: '${input.size}'
|
flavor: '${input.size}'
|
||||||
site: '${input.site}'
|
site: '${input.site}'
|
||||||
|
@ -212,9 +215,9 @@ resources:
|
||||||
ignoreActiveDirectory: '${!input.adJoin}'
|
ignoreActiveDirectory: '${!input.adJoin}'
|
||||||
activeDirectory:
|
activeDirectory:
|
||||||
relativeDN: '${"OU=Servers,OU=Computers,OU=" + input.site + ",OU=LAB"}'
|
relativeDN: '${"OU=Servers,OU=Computers,OU=" + input.site + ",OU=LAB"}'
|
||||||
customizationSpec: '${input.adJoin ? "vra-win-domain" : "vra-win-workgroup"}'
|
customizationSpec: '${input.adJoin ? "vra-win-domain" : "vra-win-workgroup"}' # [tl! collapse:end]
|
||||||
staticDns: '${input.staticDns}'
|
staticDns: '${input.staticDns}' # [tl! focus highlight]
|
||||||
dnsDomain: lab.bowdre.net
|
dnsDomain: lab.bowdre.net # [tl! collapse:start]
|
||||||
poc: '${input.poc_name + " (" + input.poc_email + ")"}'
|
poc: '${input.poc_name + " (" + input.poc_email + ")"}'
|
||||||
ticket: '${input.ticket}'
|
ticket: '${input.ticket}'
|
||||||
description: '${input.description}'
|
description: '${input.description}'
|
||||||
|
@ -222,10 +225,10 @@ resources:
|
||||||
- network: '${resource.Cloud_vSphere_Network_1.id}'
|
- network: '${resource.Cloud_vSphere_Network_1.id}'
|
||||||
assignment: static
|
assignment: static
|
||||||
constraints:
|
constraints:
|
||||||
- tag: 'comp:${to_lower(input.site)}'
|
- tag: 'comp:${to_lower(input.site)}' # [tl! collapse:end]
|
||||||
Cloud_vSphere_Network_1:
|
Cloud_vSphere_Network_1:
|
||||||
type: Cloud.vSphere.Network
|
type: Cloud.vSphere.Network
|
||||||
properties:
|
properties: # [tl! collapse:3]
|
||||||
networkType: existing
|
networkType: existing
|
||||||
constraints:
|
constraints:
|
||||||
- tag: 'net:${input.network}'
|
- tag: 'net:${input.network}'
|
||||||
|
@ -245,7 +248,7 @@ That should take care of the front-end changes. Now for the back-end stuff: I ne
|
||||||
|
|
||||||
|
|
||||||
### The vRO solution
|
### The vRO solution
|
||||||
I will be adding the DNS action on to my existing "VM Post-Provisioning" workflow (described [here](/adding-vm-notes-and-custom-attributes-with-vra8), which gets triggered after the VM has been successfully deployed.
|
I will be adding the DNS action on to my existing "VM Post-Provisioning" workflow (described [here](/adding-vm-notes-and-custom-attributes-with-vra8), which gets triggered after the VM has been successfully deployed.
|
||||||
|
|
||||||
#### Configuration Element
|
#### Configuration Element
|
||||||
But first, I'm going to go to the **Assets > Configurations** section of the Orchestrator UI and create a new Configuration Element to store variables related to the SSH host and DNS configuration.
|
But first, I'm going to go to the **Assets > Configurations** section of the Orchestrator UI and create a new Configuration Element to store variables related to the SSH host and DNS configuration.
|
||||||
|
@ -258,7 +261,7 @@ And then I create the following variables:
|
||||||
|
|
||||||
| Variable | Value | Type |
|
| Variable | Value | Type |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `sshHost` | `win02.lab.bowdre.net` | string |
|
| `sshHost` | `win02.lab.bowdre.net` | string |
|
||||||
| `sshUser` | `vra` | string |
|
| `sshUser` | `vra` | string |
|
||||||
| `sshPass` | `*****` | secureString |
|
| `sshPass` | `*****` | secureString |
|
||||||
| `dnsServer` | `[win01.lab.bowdre.net]` | Array/string |
|
| `dnsServer` | `[win01.lab.bowdre.net]` | Array/string |
|
||||||
|
@ -280,9 +283,12 @@ Now we're ready for the good part: inserting a new scriptable task into the work
|
||||||
![Task inputs](20210809_task_inputs.png)
|
![Task inputs](20210809_task_inputs.png)
|
||||||
|
|
||||||
And here's the JavaScript for the task:
|
And here's the JavaScript for the task:
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: Create DNS Record task
|
// JavaScript: Create DNS Record task
|
||||||
// Inputs: inputProperties (Properties), dnsServers (Array/string), sshHost (string), sshUser (string), sshPass (secureString), supportedDomains (Array/string)
|
// Inputs: inputProperties (Properties), dnsServers (Array/string),
|
||||||
|
// sshHost (string), sshUser (string), sshPass (secureString),
|
||||||
|
// supportedDomains (Array/string)
|
||||||
// Outputs: None
|
// Outputs: None
|
||||||
|
|
||||||
var staticDns = inputProperties.customProperties.staticDns;
|
var staticDns = inputProperties.customProperties.staticDns;
|
||||||
|
@ -312,7 +318,7 @@ if (staticDns == "true" && supportedDomains.indexOf(dnsDomain) >= 0) {
|
||||||
System.log("Successfully created DNS record!")
|
System.log("Successfully created DNS record!")
|
||||||
// make a note that it was successful so we don't repeat this unnecessarily
|
// make a note that it was successful so we don't repeat this unnecessarily
|
||||||
created = true;
|
created = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sshSession.disconnect()
|
sshSession.disconnect()
|
||||||
|
@ -341,9 +347,12 @@ The schema will include a single scriptable task:
|
||||||
|
|
||||||
And it's going to be *pretty damn similar* to the other one:
|
And it's going to be *pretty damn similar* to the other one:
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: Delete DNS Record task
|
// JavaScript: Delete DNS Record task
|
||||||
// Inputs: inputProperties (Properties), dnsServers (Array/string), sshHost (string), sshUser (string), sshPass (secureString), supportedDomains (Array/string)
|
// Inputs: inputProperties (Properties), dnsServers (Array/string),
|
||||||
|
// sshHost (string), sshUser (string), sshPass (secureString),
|
||||||
|
// supportedDomains (Array/string)
|
||||||
// Outputs: None
|
// Outputs: None
|
||||||
|
|
||||||
var staticDns = inputProperties.customProperties.staticDns;
|
var staticDns = inputProperties.customProperties.staticDns;
|
||||||
|
@ -373,7 +382,7 @@ if (staticDns == "true" && supportedDomains.indexOf(dnsDomain) >= 0) {
|
||||||
System.log("Successfully deleted DNS record!")
|
System.log("Successfully deleted DNS record!")
|
||||||
// make a note that it was successful so we don't repeat this unnecessarily
|
// make a note that it was successful so we don't repeat this unnecessarily
|
||||||
deleted = true;
|
deleted = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sshSession.disconnect()
|
sshSession.disconnect()
|
||||||
|
@ -397,8 +406,8 @@ Once the deployment completes, I go back into vRO, find the most recent item in
|
||||||
|
|
||||||
And I can run a quick query to make sure that name actually resolves:
|
And I can run a quick query to make sure that name actually resolves:
|
||||||
```shell
|
```shell
|
||||||
❯ dig +short bow-ttst-xxx023.lab.bowdre.net A
|
dig +short bow-ttst-xxx023.lab.bowdre.net A # [tl! .cmd]
|
||||||
172.16.30.10
|
172.16.30.10 # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
It works!
|
It works!
|
||||||
|
@ -411,8 +420,8 @@ Again, I'll check the **Workflow Runs** in vRO to see that the deprovisioning ta
|
||||||
|
|
||||||
And I can `dig` a little more to make sure the name doesn't resolve anymore:
|
And I can `dig` a little more to make sure the name doesn't resolve anymore:
|
||||||
```shell
|
```shell
|
||||||
❯ dig +short bow-ttst-xxx023.lab.bowdre.net A
|
dig +short bow-ttst-xxx023.lab.bowdre.net A # [tl! .cmd]
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
It *really* works!
|
It *really* works!
|
||||||
|
|
|
@ -19,8 +19,8 @@ Here's how.
|
||||||
#### Step Zero: Prereqs
|
#### Step Zero: Prereqs
|
||||||
You'll need Windows 10 1903 build 18362 or newer (on x64). You can check by running `ver` from a Command Prompt:
|
You'll need Windows 10 1903 build 18362 or newer (on x64). You can check by running `ver` from a Command Prompt:
|
||||||
```powershell
|
```powershell
|
||||||
C:\> ver
|
ver # [tl! .cmd_pwsh]
|
||||||
Microsoft Windows [Version 10.0.18363.1082]
|
Microsoft Windows [Version 10.0.18363.1082] # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
We're interested in that third set of numbers. 18363 is bigger than 18362 so we're good to go!
|
We're interested in that third set of numbers. 18363 is bigger than 18362 so we're good to go!
|
||||||
|
|
||||||
|
@ -28,13 +28,13 @@ We're interested in that third set of numbers. 18363 is bigger than 18362 so we'
|
||||||
*(Not needed if you've already been using WSL1.)*
|
*(Not needed if you've already been using WSL1.)*
|
||||||
You can do this by dropping the following into an elevated Powershell prompt:
|
You can do this by dropping the following into an elevated Powershell prompt:
|
||||||
```powershell
|
```powershell
|
||||||
dism.exe /online /enable-feature /featurename:Microsoft-Windows-Subsystem-Linux /all /norestart
|
dism.exe /online /enable-feature /featurename:Microsoft-Windows-Subsystem-Linux /all /norestart # [tl! .cmd_pwsh]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Step Two: Enable the Virtual Machine Platform feature
|
#### Step Two: Enable the Virtual Machine Platform feature
|
||||||
Drop this in an elevated Powershell:
|
Drop this in an elevated Powershell:
|
||||||
```powershell
|
```powershell
|
||||||
dism.exe /online /enable-feature /featurename:VirtualMachinePlatform /all /norestart
|
dism.exe /online /enable-feature /featurename:VirtualMachinePlatform /all /norestart # [tl! .cmd_pwsh]
|
||||||
```
|
```
|
||||||
And then reboot (this is still Windows, after all).
|
And then reboot (this is still Windows, after all).
|
||||||
|
|
||||||
|
@ -44,22 +44,22 @@ Download it from [here](https://wslstorestorage.blob.core.windows.net/wslblob/ws
|
||||||
#### Step Four: Set WSL2 as your default
|
#### Step Four: Set WSL2 as your default
|
||||||
Open a Powershell window and run:
|
Open a Powershell window and run:
|
||||||
```powershell
|
```powershell
|
||||||
wsl --set-default-version 2
|
wsl --set-default-version 2 # [tl! .cmd_pwsh]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Step Five: Install a Linux distro, or upgrade an existing one
|
#### Step Five: Install a Linux distro, or upgrade an existing one
|
||||||
If you're brand new to this WSL thing, head over to the [Microsoft Store](https://aka.ms/wslstore) and download your favorite Linux distribution. Once it's installed, launch it and you'll be prompted to set up a Linux username and password.
|
If you're brand new to this WSL thing, head over to the [Microsoft Store](https://aka.ms/wslstore) and download your favorite Linux distribution. Once it's installed, launch it and you'll be prompted to set up a Linux username and password.
|
||||||
|
|
||||||
If you've already got a WSL1 distro installed, first run `wsl -l -v` in Powershell to make sure you know the distro name:
|
If you've already got a WSL1 distro installed, first run `wsl -l -v` in Powershell to make sure you know the distro name:
|
||||||
```powershell
|
```powershell
|
||||||
PS C:\Users\jbowdre> wsl -l -v
|
wsl -l -v # [tl! .cmd_pwsh]
|
||||||
NAME STATE VERSION
|
NAME STATE VERSION # [tl! .nocopy:1]
|
||||||
* Debian Running 2
|
* Debian Running 2
|
||||||
```
|
```
|
||||||
And then upgrade the distro to WSL2 with `wsl --set-version <distro_name> 2`:
|
And then upgrade the distro to WSL2 with `wsl --set-version <distro_name> 2`:
|
||||||
```powershell
|
```powershell
|
||||||
PS C:\Users\jbowdre> wsl --set-version Debian 2
|
PS C:\Users\jbowdre> wsl --set-version Debian 2 # [tl! .cmd_pwsh]
|
||||||
Conversion in progress, this may take a few minutes...
|
Conversion in progress, this may take a few minutes... # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
Cool!
|
Cool!
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
title: "Easy Push Notifications With ntfy.sh"
|
title: "Easy Push Notifications With ntfy.sh"
|
||||||
date: 2023-09-17
|
date: 2023-09-17
|
||||||
# lastmod: 2023-09-17
|
lastmod: 2023-10-21
|
||||||
description: "Deploying and configuring a self-hosted pub-sub notification handler, getting another server to send a notifcation when it boots, and integrating the notification handler into Home Assistant."
|
description: "Deploying and configuring a self-hosted pub-sub notification handler, getting another server to send a notifcation when it boots, and integrating the notification handler into Home Assistant."
|
||||||
featured: false
|
featured: false
|
||||||
toc: true
|
toc: true
|
||||||
|
@ -43,12 +43,13 @@ I'm going to use the [Docker setup](https://docs.ntfy.sh/install/#docker) on a s
|
||||||
So I'll start by creating a new directory at `/opt/ntfy/` to hold the goods, and create a compose config.
|
So I'll start by creating a new directory at `/opt/ntfy/` to hold the goods, and create a compose config.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ sudo mkdir -p /opt/ntfy
|
sudo mkdir -p /opt/ntfy # [tl! .cmd:1]
|
||||||
$ sudo vim /opt/ntfy/docker-compose.yml
|
sudo vim /opt/ntfy/docker-compose.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
`/opt/ntfy/docker-compose.yml`:
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /opt/ntfy/docker-compose.yml
|
||||||
version: "2.3"
|
version: "2.3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
@ -58,15 +59,18 @@ services:
|
||||||
command:
|
command:
|
||||||
- serve
|
- serve
|
||||||
environment:
|
environment:
|
||||||
- TZ=UTC # optional: set desired timezone
|
- TZ=UTC # optional, set desired timezone
|
||||||
volumes:
|
volumes:
|
||||||
- ./cache/ntfy:/var/cache/ntfy
|
- ./cache/ntfy:/var/cache/ntfy
|
||||||
- ./etc/ntfy:/etc/ntfy
|
- ./etc/ntfy:/etc/ntfy
|
||||||
- ./lib/ntf:/var/lib/ntfy
|
- ./lib/ntf:/var/lib/ntfy
|
||||||
ports:
|
ports:
|
||||||
- 2586:80
|
- 2586:80
|
||||||
healthcheck: # optional: remember to adapt the host:port to your environment
|
healthcheck: # optional, remember to adapt the host and port to your environment
|
||||||
test: ["CMD-SHELL", "wget -q --tries=1 http://localhost:8080/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1"]
|
test: [
|
||||||
|
"CMD-SHELL",
|
||||||
|
"wget -q --tries=1 http://localhost:8080/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1"
|
||||||
|
]
|
||||||
interval: 60s
|
interval: 60s
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
retries: 3
|
retries: 3
|
||||||
|
@ -79,21 +83,22 @@ This config will create/mount folders in the working directory to store the ntfy
|
||||||
|
|
||||||
I can go ahead and bring it up:
|
I can go ahead and bring it up:
|
||||||
```shell
|
```shell
|
||||||
$ sudo docker-compose up -d
|
sudo docker-compose up -d # [tl! focus:start .cmd]
|
||||||
Creating network "ntfy_default" with the default driver
|
Creating network "ntfy_default" with the default driver # [tl! .nocopy:start]
|
||||||
Pulling ntfy (binwiederhier/ntfy:)...
|
Pulling ntfy (binwiederhier/ntfy:)...
|
||||||
latest: Pulling from binwiederhier/ntfy
|
latest: Pulling from binwiederhier/ntfy # [tl! focus:end]
|
||||||
7264a8db6415: Pull complete
|
7264a8db6415: Pull complete
|
||||||
1ac6a3b2d03b: Pull complete
|
1ac6a3b2d03b: Pull complete
|
||||||
Digest: sha256:da08556da89a3f7317557fd39cf302c6e4691b4f8ce3a68aa7be86c4141e11c8
|
Digest: sha256:da08556da89a3f7317557fd39cf302c6e4691b4f8ce3a68aa7be86c4141e11c8
|
||||||
Status: Downloaded newer image for binwiederhier/ntfy:latest
|
Status: Downloaded newer image for binwiederhier/ntfy:latest # [tl! focus:1]
|
||||||
Creating ntfy ... done
|
Creating ntfy ... done # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Caddy Reverse Proxy
|
#### Caddy Reverse Proxy
|
||||||
I'll also want to add [the following](https://docs.ntfy.sh/config/#nginxapache2caddy) to my Caddy config:
|
I'll also want to add [the following](https://docs.ntfy.sh/config/#nginxapache2caddy) to my Caddy config:
|
||||||
`/etc/caddy/Caddyfile`:
|
```text
|
||||||
```
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /etc/caddy/Caddyfile
|
||||||
ntfy.runtimeterror.dev, http://ntfy.runtimeterror.dev {
|
ntfy.runtimeterror.dev, http://ntfy.runtimeterror.dev {
|
||||||
reverse_proxy localhost:2586
|
reverse_proxy localhost:2586
|
||||||
|
|
||||||
|
@ -110,7 +115,7 @@ ntfy.runtimeterror.dev, http://ntfy.runtimeterror.dev {
|
||||||
|
|
||||||
And I'll restart Caddy to apply the config:
|
And I'll restart Caddy to apply the config:
|
||||||
```shell
|
```shell
|
||||||
$ sudo systemctl restart caddy
|
sudo systemctl restart caddy # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Now I can point my browser to `https://ntfy.runtimeterror.dev` and see the web interface:
|
Now I can point my browser to `https://ntfy.runtimeterror.dev` and see the web interface:
|
||||||
|
@ -121,9 +126,9 @@ I can subscribe to a new topic:
|
||||||
![Subscribing to a public topic](subscribe_public_topic.png)
|
![Subscribing to a public topic](subscribe_public_topic.png)
|
||||||
|
|
||||||
And publish a message to it:
|
And publish a message to it:
|
||||||
```shell
|
```curl
|
||||||
$ curl -d "Hi" https://ntfy.runtimeterror.dev/testy
|
curl -d "Hi" https://ntfy.runtimeterror.dev/testy # [tl! .cmd]
|
||||||
{"id":"80bUl6cKwgBP","time":1694981305,"expires":1695024505,"event":"message","topic":"testy","message":"Hi"}
|
{"id":"80bUl6cKwgBP","time":1694981305,"expires":1695024505,"event":"message","topic":"testy","message":"Hi"} # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
Which will then show up as a notification in my browser:
|
Which will then show up as a notification in my browser:
|
||||||
|
@ -134,8 +139,9 @@ Which will then show up as a notification in my browser:
|
||||||
So now I've got my own ntfy server, and I've verified that it works for unauthenticated notifications. I don't really want to operate *anything* on the internet without requiring authentication, though, so I'm going to configure ntfy to prevent unauthenticated reads and writes.
|
So now I've got my own ntfy server, and I've verified that it works for unauthenticated notifications. I don't really want to operate *anything* on the internet without requiring authentication, though, so I'm going to configure ntfy to prevent unauthenticated reads and writes.
|
||||||
|
|
||||||
I'll start by creating a `server.yml` config file which will be mounted into the container. This config will specify where to store the user database and switch the default ACL to `deny-all`:
|
I'll start by creating a `server.yml` config file which will be mounted into the container. This config will specify where to store the user database and switch the default ACL to `deny-all`:
|
||||||
`/opt/ntfy/etc/ntfy/server.yml`:
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /opt/ntfy/etc/ntfy/server.yml
|
||||||
auth-file: "/var/lib/ntfy/user.db"
|
auth-file: "/var/lib/ntfy/user.db"
|
||||||
auth-default-access: "deny-all"
|
auth-default-access: "deny-all"
|
||||||
base-url: "https://ntfy.runtimeterror.dev"
|
base-url: "https://ntfy.runtimeterror.dev"
|
||||||
|
@ -143,7 +149,7 @@ base-url: "https://ntfy.runtimeterror.dev"
|
||||||
|
|
||||||
I can then restart the container, and try again to subscribe to the same (or any other topic):
|
I can then restart the container, and try again to subscribe to the same (or any other topic):
|
||||||
```shell
|
```shell
|
||||||
$ sudo docker-compose down && sudo docker-compose up -d
|
sudo docker-compose down && sudo docker-compose up -d # [tl! .cmd]
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -152,31 +158,33 @@ Now I get prompted to log in:
|
||||||
|
|
||||||
I'll need to use the ntfy CLI to create/manage entries in the user DB, and that means first grabbing a shell inside the container:
|
I'll need to use the ntfy CLI to create/manage entries in the user DB, and that means first grabbing a shell inside the container:
|
||||||
```shell
|
```shell
|
||||||
$ sudo docker exec -it ntfy /bin/sh
|
sudo docker exec -it ntfy /bin/sh # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
For now, I'm going to create three users: one as an administrator, one as a "writer", and one as a "reader". I'll be prompted for a password for each:
|
For now, I'm going to create three users: one as an administrator, one as a "writer", and one as a "reader". I'll be prompted for a password for each:
|
||||||
```shell
|
```shell
|
||||||
$ ntfy user add --role=admin administrator
|
ntfy user add --role=admin administrator # [tl! .cmd]
|
||||||
user administrator added with role admin
|
user administrator added with role admin # [tl! .nocopy:1]
|
||||||
$ ntfy user add writer
|
|
||||||
user writer added with role user
|
ntfy user add writer # [tl! .cmd]
|
||||||
$ ntfy user add reader
|
user writer added with role user # [tl! .nocopy:1]
|
||||||
user reader added with role user
|
|
||||||
|
ntfy user add reader # [tl! .cmd]
|
||||||
|
user reader added with role user # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
The admin user has global read+write access, but right now the other two can't do anything. Let's make it so that `writer` can write to all topics, and `reader` can read from all topics:
|
The admin user has global read+write access, but right now the other two can't do anything. Let's make it so that `writer` can write to all topics, and `reader` can read from all topics:
|
||||||
```shell
|
```shell
|
||||||
$ ntfy access writer '*' write
|
ntfy access writer '*' write # [tl! .cmd:1]
|
||||||
$ ntfy access reader '*' read
|
ntfy access reader '*' read
|
||||||
```
|
```
|
||||||
|
|
||||||
I could lock these down further by selecting specific topic names instead of `'*'` but this will do fine for now.
|
I could lock these down further by selecting specific topic names instead of `'*'` but this will do fine for now.
|
||||||
|
|
||||||
Let's go ahead and verify the access as well:
|
Let's go ahead and verify the access as well:
|
||||||
```shell
|
```shell
|
||||||
$ ntfy access
|
ntfy access # [tl! .cmd]
|
||||||
user administrator (role: admin, tier: none)
|
user administrator (role: admin, tier: none) # [tl! .nocopy:8]
|
||||||
- read-write access to all topics (admin role)
|
- read-write access to all topics (admin role)
|
||||||
user reader (role: user, tier: none)
|
user reader (role: user, tier: none)
|
||||||
- read-only access to topic *
|
- read-only access to topic *
|
||||||
|
@ -189,16 +197,16 @@ user * (role: anonymous, tier: none)
|
||||||
|
|
||||||
While I'm at it, I also want to configure an access token to be used with the `writer` account. I'll be able to use that instead of username+password when publishing messages.
|
While I'm at it, I also want to configure an access token to be used with the `writer` account. I'll be able to use that instead of username+password when publishing messages.
|
||||||
```shell
|
```shell
|
||||||
$ ntfy token add writer
|
ntfy token add writer # [tl! .cmd]
|
||||||
token tk_mm8o6cwxmox11wrnh8miehtivxk7m created for user writer, never expires
|
token tk_mm8o6cwxmox11wrnh8miehtivxk7m created for user writer, never expires # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
I can go back to the web, subscribe to the `testy` topic again using the `reader` credentials, and then test sending an authenticated notification with `curl`:
|
I can go back to the web, subscribe to the `testy` topic again using the `reader` credentials, and then test sending an authenticated notification with `curl`:
|
||||||
```shell
|
```curl
|
||||||
$ curl -H "Authorization: Bearer tk_mm8o6cwxmox11wrnh8miehtivxk7m" \
|
curl -H "Authorization: Bearer tk_mm8o6cwxmox11wrnh8miehtivxk7m" \ # [tl! .cmd]
|
||||||
-d "Once more, with auth!" \
|
-d "Once more, with auth!" \
|
||||||
https://ntfy.runtimeterror.dev/testy
|
https://ntfy.runtimeterror.dev/testy
|
||||||
{"id":"0dmX9emtehHe","time":1694987274,"expires":1695030474,"event":"message","topic":"testy","message":"Once more, with auth!"}
|
{"id":"0dmX9emtehHe","time":1694987274,"expires":1695030474,"event":"message","topic":"testy","message":"Once more, with auth!"} # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
![Authenticated notification](authenticated_notification.png)
|
![Authenticated notification](authenticated_notification.png)
|
||||||
|
@ -215,6 +223,7 @@ I may want to wind up having servers notify for a variety of conditions so I'll
|
||||||
|
|
||||||
`/usr/local/bin/ntfy_push.sh`:
|
`/usr/local/bin/ntfy_push.sh`:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
curl \
|
curl \
|
||||||
|
@ -228,8 +237,8 @@ Note that I'm using a new topic name now: `server_alerts`. Topics are automatica
|
||||||
|
|
||||||
Okay, now let's make it executable and then give it a quick test:
|
Okay, now let's make it executable and then give it a quick test:
|
||||||
```shell
|
```shell
|
||||||
$ chmod +x /usr/local/bin/ntfy_push.sh
|
chmod +x /usr/local/bin/ntfy_push.sh # [tl! .cmd:1]
|
||||||
$ /usr/local/bin/ntfy_push.sh "Script Test" "This is a test from the magic script I just wrote."
|
/usr/local/bin/ntfy_push.sh "Script Test" "This is a test from the magic script I just wrote."
|
||||||
```
|
```
|
||||||
|
|
||||||
![Script test](script_test.png)
|
![Script test](script_test.png)
|
||||||
|
@ -239,6 +248,7 @@ I don't know an easy way to tell a systemd service definition to pass arguments
|
||||||
|
|
||||||
`/usr/local/bin/ntfy_boot_complete.sh`:
|
`/usr/local/bin/ntfy_boot_complete.sh`:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
TITLE="$(hostname -s)"
|
TITLE="$(hostname -s)"
|
||||||
|
@ -249,13 +259,14 @@ MESSAGE="System boot complete"
|
||||||
|
|
||||||
And this one should be executable as well:
|
And this one should be executable as well:
|
||||||
```shell
|
```shell
|
||||||
$ chmod +x /usr/local/bin/ntfy_boot_complete.sh
|
chmod +x /usr/local/bin/ntfy_boot_complete.sh # [tl! .cmd]
|
||||||
```
|
```
|
||||||
##### Service Definition
|
##### Service Definition
|
||||||
Finally I can create and register the service definition so that the script will run at each system boot.
|
Finally I can create and register the service definition so that the script will run at each system boot.
|
||||||
|
|
||||||
`/etc/systemd/system/ntfy_boot_complete.service`:
|
`/etc/systemd/system/ntfy_boot_complete.service`:
|
||||||
```
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
[Unit]
|
[Unit]
|
||||||
After=network.target
|
After=network.target
|
||||||
|
|
||||||
|
@ -267,7 +278,7 @@ WantedBy=default.target
|
||||||
```
|
```
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo systemctl daemon-reload
|
sudo systemctl daemon-reload # [tl! .cmd:1]
|
||||||
sudo systemctl enable --now ntfy_boot_complete.service
|
sudo systemctl enable --now ntfy_boot_complete.service
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -285,8 +296,9 @@ Enabling ntfy as a notification handler is pretty straight-forward, and it will
|
||||||
##### Notify Configuration
|
##### Notify Configuration
|
||||||
I'll add ntfy to Home Assistant by using the [RESTful Notifications](https://www.home-assistant.io/integrations/notify.rest/) integration. For that, I just need to update my instance's `configuration.yaml` to configure the connection.
|
I'll add ntfy to Home Assistant by using the [RESTful Notifications](https://www.home-assistant.io/integrations/notify.rest/) integration. For that, I just need to update my instance's `configuration.yaml` to configure the connection.
|
||||||
|
|
||||||
`configuration.yaml`:
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# configuration.yaml
|
||||||
notify:
|
notify:
|
||||||
- name: ntfy
|
- name: ntfy
|
||||||
platform: rest
|
platform: rest
|
||||||
|
@ -302,6 +314,8 @@ notify:
|
||||||
|
|
||||||
The `Authorization` line references a secret stored in `secrets.yaml`:
|
The `Authorization` line references a secret stored in `secrets.yaml`:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# secrets.yaml
|
||||||
ntfy_token: Bearer tk_mm8o6cwxmox11wrnh8miehtivxk7m
|
ntfy_token: Bearer tk_mm8o6cwxmox11wrnh8miehtivxk7m
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -319,6 +333,7 @@ I'll use the Home Assistant UI to push a notification through ntfy if any of my
|
||||||
|
|
||||||
The business end of this is the service call at the end:
|
The business end of this is the service call at the end:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
service: notify.ntfy
|
service: notify.ntfy
|
||||||
data:
|
data:
|
||||||
title: Leak detected!
|
title: Leak detected!
|
||||||
|
|
|
@ -52,13 +52,13 @@ Running `tanzu completion --help` will tell you what's needed, and you can just
|
||||||
|
|
||||||
So to get the completions to load automatically whenever you start a `bash` shell, run:
|
So to get the completions to load automatically whenever you start a `bash` shell, run:
|
||||||
```shell
|
```shell
|
||||||
tanzu completion bash > $HOME/.tanzu/completion.bash.inc
|
tanzu completion bash > $HOME/.tanzu/completion.bash.inc # [tl! .cmd:1]
|
||||||
printf "\n# Tanzu shell completion\nsource '$HOME/.tanzu/completion.bash.inc'\n" >> $HOME/.bash_profile
|
printf "\n# Tanzu shell completion\nsource '$HOME/.tanzu/completion.bash.inc'\n" >> $HOME/.bash_profile
|
||||||
```
|
```
|
||||||
|
|
||||||
For a `zsh` shell, it's:
|
For a `zsh` shell, it's:
|
||||||
```shell
|
```shell
|
||||||
echo "autoload -U compinit; compinit" >> ~/.zshrc
|
echo "autoload -U compinit; compinit" >> ~/.zshrc # [tl! .cmd:1]
|
||||||
tanzu completion zsh > "${fpath[1]}/_tanzu"
|
tanzu completion zsh > "${fpath[1]}/_tanzu"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -85,8 +85,8 @@ Let's start with the gear (hardware and software) I needed to make this work:
|
||||||
The very first task is to write the required firmware image (download [here](https://github.com/jaredmcneill/quartz64_uefi/releases)) to a micro SD card. I used a 64GB card that I had lying around but you could easily get by with a *much* smaller one; the firmware image is tiny, and the card can't be used for storing anything else. Since I'm doing this on a Chromebook, I'll be using the [Chromebook Recovery Utility (CRU)](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) for writing the images to external storage as described [in another post](/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/).
|
The very first task is to write the required firmware image (download [here](https://github.com/jaredmcneill/quartz64_uefi/releases)) to a micro SD card. I used a 64GB card that I had lying around but you could easily get by with a *much* smaller one; the firmware image is tiny, and the card can't be used for storing anything else. Since I'm doing this on a Chromebook, I'll be using the [Chromebook Recovery Utility (CRU)](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) for writing the images to external storage as described [in another post](/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/).
|
||||||
|
|
||||||
After downloading [`QUARTZ64_EFI.img.gz`](https://github.com/jaredmcneill/quartz64_uefi/releases/download/2022-07-20/QUARTZ64_EFI.img.gz), I need to get it into a format recognized by CRU and, in this case, that means extracting the gzipped archive and then compressing the `.img` file into a standard `.zip`:
|
After downloading [`QUARTZ64_EFI.img.gz`](https://github.com/jaredmcneill/quartz64_uefi/releases/download/2022-07-20/QUARTZ64_EFI.img.gz), I need to get it into a format recognized by CRU and, in this case, that means extracting the gzipped archive and then compressing the `.img` file into a standard `.zip`:
|
||||||
```
|
```shell
|
||||||
gunzip QUARTZ64_EFI.img.gz
|
gunzip QUARTZ64_EFI.img.gz # [tl! .cmd:1]
|
||||||
zip QUARTZ64_EFI.img.zip QUARTZ64_EFI.img
|
zip QUARTZ64_EFI.img.zip QUARTZ64_EFI.img
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -98,8 +98,8 @@ I can then write it to the micro SD card by opening CRU, clicking on the gear ic
|
||||||
I'll also need to prepare the ESXi installation media (download [here](https://customerconnect.vmware.com/downloads/get-download?downloadGroup=ESXI-ARM)). For that, I'll be using a 256GB USB drive. Due to the limited storage options on the Quartz64, I'll be installing ESXi onto the same drive I use to boot the installer so, in this case, the more storage the better. By default, ESXi 7.0 will consume up to 128GB for the new `ESX-OSData` partition; whatever is leftover will be made available as a VMFS datastore. That could be problematic given the unavailable/flaky USB support of the Quartz64. (While you *can* install ESXi onto a smaller drive, down to about ~20GB, the lack of additional storage on this hardware makes it pretty important to take advantage of as much space as you can.)
|
I'll also need to prepare the ESXi installation media (download [here](https://customerconnect.vmware.com/downloads/get-download?downloadGroup=ESXI-ARM)). For that, I'll be using a 256GB USB drive. Due to the limited storage options on the Quartz64, I'll be installing ESXi onto the same drive I use to boot the installer so, in this case, the more storage the better. By default, ESXi 7.0 will consume up to 128GB for the new `ESX-OSData` partition; whatever is leftover will be made available as a VMFS datastore. That could be problematic given the unavailable/flaky USB support of the Quartz64. (While you *can* install ESXi onto a smaller drive, down to about ~20GB, the lack of additional storage on this hardware makes it pretty important to take advantage of as much space as you can.)
|
||||||
|
|
||||||
In any case, to make the downloaded `VMware-VMvisor-Installer-7.0-20133114.aarch64.iso` writeable with CRU all I need to do is add `.bin` to the end of the filename:
|
In any case, to make the downloaded `VMware-VMvisor-Installer-7.0-20133114.aarch64.iso` writeable with CRU all I need to do is add `.bin` to the end of the filename:
|
||||||
```
|
```shell
|
||||||
mv VMware-VMvisor-Installer-7.0-20133114.aarch64.iso{,.bin}
|
mv VMware-VMvisor-Installer-7.0-20133114.aarch64.iso{,.bin} # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Then it's time to write the image onto the USB drive:
|
Then it's time to write the image onto the USB drive:
|
||||||
|
@ -202,12 +202,12 @@ As I mentioned earlier, my initial goal is to deploy a Tailscale node on my new
|
||||||
#### Deploying Photon OS
|
#### Deploying Photon OS
|
||||||
VMware provides Photon in a few different formats, as described on the [download page](https://github.com/vmware/photon/wiki/Downloading-Photon-OS). I'm going to use the "OVA with virtual hardware v13 arm64" version so I'll kick off that download of `photon_uefi.ova`. I'm actually going to download that file straight to my `deb01` Linux VM:
|
VMware provides Photon in a few different formats, as described on the [download page](https://github.com/vmware/photon/wiki/Downloading-Photon-OS). I'm going to use the "OVA with virtual hardware v13 arm64" version so I'll kick off that download of `photon_uefi.ova`. I'm actually going to download that file straight to my `deb01` Linux VM:
|
||||||
```shell
|
```shell
|
||||||
wget https://packages.vmware.com/photon/4.0/Rev2/ova/photon_uefi.ova
|
wget https://packages.vmware.com/photon/4.0/Rev2/ova/photon_uefi.ova # [tl! .cmd]
|
||||||
```
|
```
|
||||||
and then spawn a quick Python web server to share it out:
|
and then spawn a quick Python web server to share it out:
|
||||||
```shell
|
```shell
|
||||||
❯ python3 -m http.server
|
python3 -m http.server # [tl! .cmd]
|
||||||
Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ...
|
Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ... # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
That will let me deploy from a resource already inside my lab network instead of transferring the OVA from my laptop. So now I can go back to my vSphere Client and go through the steps to **Deploy OVF Template** to the new host, and I'll plug in the URL `http://deb01.lab.bowdre.net:8000/photon_uefi.ova`:
|
That will let me deploy from a resource already inside my lab network instead of transferring the OVA from my laptop. So now I can go back to my vSphere Client and go through the steps to **Deploy OVF Template** to the new host, and I'll plug in the URL `http://deb01.lab.bowdre.net:8000/photon_uefi.ova`:
|
||||||
|
@ -232,13 +232,13 @@ The default password for Photon's `root` user is `changeme`. You'll be forced to
|
||||||
![First login, and the requisite password change](first_login.png)
|
![First login, and the requisite password change](first_login.png)
|
||||||
|
|
||||||
Now that I'm in, I'll set the hostname appropriately:
|
Now that I'm in, I'll set the hostname appropriately:
|
||||||
```bash
|
```shell
|
||||||
hostnamectl set-hostname pho01
|
hostnamectl set-hostname pho01 # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
For now, the VM pulled an IP from DHCP but I would like to configure that statically instead. To do that, I'll create a new interface file:
|
For now, the VM pulled an IP from DHCP but I would like to configure that statically instead. To do that, I'll create a new interface file:
|
||||||
```bash
|
```shell
|
||||||
cat > /etc/systemd/network/10-static-en.network << "EOF"
|
cat > /etc/systemd/network/10-static-en.network << "EOF" # [tl! .cmd_root]
|
||||||
|
|
||||||
[Match]
|
[Match]
|
||||||
Name = eth0
|
Name = eth0
|
||||||
|
@ -252,29 +252,30 @@ IPForward = yes
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
chmod 644 /etc/systemd/network/10-static-en.network
|
chmod 644 /etc/systemd/network/10-static-en.network # [tl! .cmd_root:1]
|
||||||
systemctl restart systemd-networkd
|
systemctl restart systemd-networkd
|
||||||
```
|
```
|
||||||
|
|
||||||
I'm including `IPForward = yes` to [enable IP forwarding](https://tailscale.com/kb/1104/enable-ip-forwarding/) for Tailscale.
|
I'm including `IPForward = yes` to [enable IP forwarding](https://tailscale.com/kb/1104/enable-ip-forwarding/) for Tailscale.
|
||||||
|
|
||||||
With networking sorted, it's probably a good idea to check for and apply any available updates:
|
With networking sorted, it's probably a good idea to check for and apply any available updates:
|
||||||
```bash
|
```shell
|
||||||
tdnf update -y
|
tdnf update -y # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll also go ahead and create a normal user account (with sudo privileges) for me to use:
|
I'll also go ahead and create a normal user account (with sudo privileges) for me to use:
|
||||||
```bash
|
```shell
|
||||||
useradd -G wheel -m john
|
useradd -G wheel -m john # [tl! .cmd_root:1]
|
||||||
passwd john
|
passwd john
|
||||||
```
|
```
|
||||||
|
|
||||||
Now I can use SSH to connect to the VM and ditch the web console:
|
Now I can use SSH to connect to the VM and ditch the web console:
|
||||||
```bash
|
```shell
|
||||||
❯ ssh pho01.lab.bowdre.net
|
ssh pho01.lab.bowdre.net # [tl! .cmd]
|
||||||
Password:
|
Password: # [tl! .nocopy]
|
||||||
john@pho01 [ ~ ]$ sudo whoami
|
|
||||||
|
|
||||||
|
sudo whoami # [tl! .cmd]
|
||||||
|
# [tl! .nocopy:start]
|
||||||
We trust you have received the usual lecture from the local System
|
We trust you have received the usual lecture from the local System
|
||||||
Administrator. It usually boils down to these three things:
|
Administrator. It usually boils down to these three things:
|
||||||
|
|
||||||
|
@ -283,7 +284,7 @@ Administrator. It usually boils down to these three things:
|
||||||
#3) With great power comes great responsibility.
|
#3) With great power comes great responsibility.
|
||||||
|
|
||||||
[sudo] password for john
|
[sudo] password for john
|
||||||
root
|
root # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Looking good! I'll now move on to the justification[^justification] for this entire exercise:
|
Looking good! I'll now move on to the justification[^justification] for this entire exercise:
|
||||||
|
@ -292,44 +293,42 @@ Looking good! I'll now move on to the justification[^justification] for this ent
|
||||||
#### Installing Tailscale
|
#### Installing Tailscale
|
||||||
If I *weren't* doing this on hard mode, I could use Tailscale's [install script](https://tailscale.com/download) like I do on every other Linux system. Hard mode is what I do though, and the installer doesn't directly support Photon OS. I'll instead consult the [manual install instructions](https://tailscale.com/download/linux/static) which tell me to download the appropriate binaries from [https://pkgs.tailscale.com/stable/#static](https://pkgs.tailscale.com/stable/#static). So I'll grab the link for the latest `arm64` build and pull the down to the VM:
|
If I *weren't* doing this on hard mode, I could use Tailscale's [install script](https://tailscale.com/download) like I do on every other Linux system. Hard mode is what I do though, and the installer doesn't directly support Photon OS. I'll instead consult the [manual install instructions](https://tailscale.com/download/linux/static) which tell me to download the appropriate binaries from [https://pkgs.tailscale.com/stable/#static](https://pkgs.tailscale.com/stable/#static). So I'll grab the link for the latest `arm64` build and pull the down to the VM:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl https://pkgs.tailscale.com/stable/tailscale_1.22.2_arm64.tgz --output tailscale_arm64.tgz
|
curl https://pkgs.tailscale.com/stable/tailscale_1.22.2_arm64.tgz --output tailscale_arm64.tgz # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Then I can unpack it:
|
Then I can unpack it:
|
||||||
```bash
|
```shell
|
||||||
sudo tdnf install tar
|
sudo tdnf install tar # [tl! .cmd:2]
|
||||||
tar xvf tailscale_arm64.tgz
|
tar xvf tailscale_arm64.tgz
|
||||||
cd tailscale_1.22.2_arm64/
|
cd tailscale_1.22.2_arm64/
|
||||||
```
|
```
|
||||||
|
|
||||||
So I've got the `tailscale` and `tailscaled` binaries as well as some sample service configs in the `systemd` directory:
|
So I've got the `tailscale` and `tailscaled` binaries as well as some sample service configs in the `systemd` directory:
|
||||||
```bash
|
```shell
|
||||||
john@pho01 [ ~/tailscale_1.22.2_arm64 ]$
|
ls # [tl! .cmd]
|
||||||
.:
|
total 32288 # [tl! .nocopy:4]
|
||||||
total 32288
|
|
||||||
drwxr-x--- 2 john users 4096 Mar 18 02:44 systemd
|
drwxr-x--- 2 john users 4096 Mar 18 02:44 systemd
|
||||||
-rwxr-x--- 1 john users 12187139 Mar 18 02:44 tailscale
|
-rwxr-x--- 1 john users 12187139 Mar 18 02:44 tailscale
|
||||||
-rwxr-x--- 1 john users 20866538 Mar 18 02:44 tailscaled
|
-rwxr-x--- 1 john users 20866538 Mar 18 02:44 tailscaled
|
||||||
|
|
||||||
./systemd:
|
ls ./systemd # [tl! .cmd]
|
||||||
total 8
|
total 8 # [tl! .nocopy:2]
|
||||||
-rw-r----- 1 john users 287 Mar 18 02:44 tailscaled.defaults
|
-rw-r----- 1 john users 287 Mar 18 02:44 tailscaled.defaults
|
||||||
-rw-r----- 1 john users 674 Mar 18 02:44 tailscaled.service
|
-rw-r----- 1 john users 674 Mar 18 02:44 tailscaled.service
|
||||||
```
|
```
|
||||||
|
|
||||||
Dealing with the binaries is straight-forward. I'll drop them into `/usr/bin/` and `/usr/sbin/` (respectively) and set the file permissions:
|
Dealing with the binaries is straight-forward. I'll drop them into `/usr/bin/` and `/usr/sbin/` (respectively) and set the file permissions:
|
||||||
```bash
|
```shell
|
||||||
sudo install -m 755 tailscale /usr/bin/
|
sudo install -m 755 tailscale /usr/bin/ # [tl! .cmd:1]
|
||||||
sudo install -m 755 tailscaled /usr/sbin/
|
sudo install -m 755 tailscaled /usr/sbin/
|
||||||
```
|
```
|
||||||
|
|
||||||
Then I'll descend to the `systemd` folder and see what's up:
|
Then I'll descend to the `systemd` folder and see what's up:
|
||||||
```bash
|
```shell
|
||||||
john@pho01 [ ~/tailscale_1.22.2_arm64/ ]$ cd systemd/
|
cd systemd/ # [tl! .cmd:1]
|
||||||
|
cat tailscaled.defaults
|
||||||
john@pho01 [ ~/tailscale_1.22.2_arm64/systemd ]$ cat tailscaled.defaults
|
# Set the port to listen on for incoming VPN packets. [tl! .nocopy:8]
|
||||||
# Set the port to listen on for incoming VPN packets.
|
|
||||||
# Remote nodes will automatically be informed about the new port number,
|
# Remote nodes will automatically be informed about the new port number,
|
||||||
# but you might want to configure this in order to set external firewall
|
# but you might want to configure this in order to set external firewall
|
||||||
# settings.
|
# settings.
|
||||||
|
@ -338,8 +337,8 @@ PORT="41641"
|
||||||
# Extra flags you might want to pass to tailscaled.
|
# Extra flags you might want to pass to tailscaled.
|
||||||
FLAGS=""
|
FLAGS=""
|
||||||
|
|
||||||
john@pho01 [ ~/tailscale_1.22.2_arm64/systemd ]$ cat tailscaled.service
|
cat tailscaled.service # [tl! .cmd]
|
||||||
[Unit]
|
[Unit] # [tl! .nocopy:start]
|
||||||
Description=Tailscale node agent
|
Description=Tailscale node agent
|
||||||
Documentation=https://tailscale.com/kb/
|
Documentation=https://tailscale.com/kb/
|
||||||
Wants=network-pre.target
|
Wants=network-pre.target
|
||||||
|
@ -362,28 +361,28 @@ CacheDirectoryMode=0750
|
||||||
Type=notify
|
Type=notify
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
`tailscaled.defaults` contains the default configuration that will be referenced by the service, and `tailscaled.service` tells me that it expects to find it at `/etc/defaults/tailscaled`. So I'll copy it there and set the perms:
|
`tailscaled.defaults` contains the default configuration that will be referenced by the service, and `tailscaled.service` tells me that it expects to find it at `/etc/defaults/tailscaled`. So I'll copy it there and set the perms:
|
||||||
```bash
|
```shell
|
||||||
sudo install -m 644 tailscaled.defaults /etc/defaults/tailscaled
|
sudo install -m 644 tailscaled.defaults /etc/defaults/tailscaled # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
`tailscaled.service` will get dropped in `/usr/lib/systemd/system/`:
|
`tailscaled.service` will get dropped in `/usr/lib/systemd/system/`:
|
||||||
```bash
|
```shell
|
||||||
sudo install -m 644 tailscaled.service /usr/lib/systemd/system/
|
sudo install -m 644 tailscaled.service /usr/lib/systemd/system/ # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Then I'll enable the service and start it:
|
Then I'll enable the service and start it:
|
||||||
```bash
|
```shell
|
||||||
sudo systemctl enable tailscaled.service
|
sudo systemctl enable tailscaled.service # [tl! .cmd:1]
|
||||||
sudo systemctl start tailscaled.service
|
sudo systemctl start tailscaled.service
|
||||||
```
|
```
|
||||||
|
|
||||||
And finally log in to Tailscale, including my `tag:home` tag for [ACL purposes](/secure-networking-made-simple-with-tailscale/#acls) and a route advertisement for my home network so that my other Tailscale nodes can use this one to access other devices as well:
|
And finally log in to Tailscale, including my `tag:home` tag for [ACL purposes](/secure-networking-made-simple-with-tailscale/#acls) and a route advertisement for my home network so that my other Tailscale nodes can use this one to access other devices as well:
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --advertise-tags "tag:home" --advertise-route "192.168.1.0/24"
|
sudo tailscale up --advertise-tags "tag:home" --advertise-route "192.168.1.0/24" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
That will return a URL I can use to authenticate, and I'll then able to to view and manage the new Tailscale node from the `login.tailscale.com` admin portal:
|
That will return a URL I can use to authenticate, and I'll then able to to view and manage the new Tailscale node from the `login.tailscale.com` admin portal:
|
||||||
|
@ -408,7 +407,6 @@ Now I can remotely access the VM (and thus my homelab!) from any of my other Tai
|
||||||
|
|
||||||
### Conclusion
|
### Conclusion
|
||||||
I actually received the Quartz64 waay back on March 2nd, and it's taken me until this week to get all the pieces in place and working the way I wanted.
|
I actually received the Quartz64 waay back on March 2nd, and it's taken me until this week to get all the pieces in place and working the way I wanted.
|
||||||
{{< tweet user="johndotbowdre" id="1499194756148125701" >}}
|
|
||||||
|
|
||||||
As is so often the case, a lot of time and effort would have been saved if I had RTFM'd[^rtfm] before diving in to the deep end. I definitely hadn't anticipated all the limitations that would come with the Quartz64 SBC before ordering mine. Now that it's done, though, I'm pretty pleased with the setup, and I feel like I learned quite a bit along the way. I keep reminding myself that this is still a very new hardware platform. I'm excited to see how things improve with future development efforts.
|
As is so often the case, a lot of time and effort would have been saved if I had RTFM'd[^rtfm] before diving in to the deep end. I definitely hadn't anticipated all the limitations that would come with the Quartz64 SBC before ordering mine. Now that it's done, though, I'm pretty pleased with the setup, and I feel like I learned quite a bit along the way. I keep reminding myself that this is still a very new hardware platform. I'm excited to see how things improve with future development efforts.
|
||||||
|
|
||||||
|
|
|
@ -74,9 +74,9 @@ Success! My new ingress rules appear at the bottom of the list.
|
||||||
![New rules added](s5Y0rycng.png)
|
![New rules added](s5Y0rycng.png)
|
||||||
|
|
||||||
That gets traffic from the internet and to my instance, but the OS is still going to drop the traffic at its own firewall. I'll need to work with `iptables` to change that. (You typically use `ufw` to manage firewalls more easily on Ubuntu, but it isn't included on this minimal image and seemed to butt heads with `iptables` when I tried adding it. I eventually decided it was better to just interact with `iptables` directly). I'll start by listing the existing rules on the `INPUT` chain:
|
That gets traffic from the internet and to my instance, but the OS is still going to drop the traffic at its own firewall. I'll need to work with `iptables` to change that. (You typically use `ufw` to manage firewalls more easily on Ubuntu, but it isn't included on this minimal image and seemed to butt heads with `iptables` when I tried adding it. I eventually decided it was better to just interact with `iptables` directly). I'll start by listing the existing rules on the `INPUT` chain:
|
||||||
```
|
```shell
|
||||||
$ sudo iptables -L INPUT --line-numbers
|
sudo iptables -L INPUT --line-numbers # [tl! .cmd]
|
||||||
Chain INPUT (policy ACCEPT)
|
Chain INPUT (policy ACCEPT) # [tl! .nocopy:7]
|
||||||
num target prot opt source destination
|
num target prot opt source destination
|
||||||
1 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
|
1 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
|
||||||
2 ACCEPT icmp -- anywhere anywhere
|
2 ACCEPT icmp -- anywhere anywhere
|
||||||
|
@ -87,15 +87,15 @@ num target prot opt source destination
|
||||||
```
|
```
|
||||||
|
|
||||||
Note the `REJECT all` statement at line `6`. I'll need to insert my new `ACCEPT` rules for ports `80` and `443` above that implicit deny all:
|
Note the `REJECT all` statement at line `6`. I'll need to insert my new `ACCEPT` rules for ports `80` and `443` above that implicit deny all:
|
||||||
```
|
```shell
|
||||||
sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 80 -j ACCEPT
|
sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 80 -j ACCEPT # [tl! .cmd:1]
|
||||||
sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 443 -j ACCEPT
|
sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 443 -j ACCEPT
|
||||||
```
|
```
|
||||||
|
|
||||||
And then I'll confirm that the order is correct:
|
And then I'll confirm that the order is correct:
|
||||||
```
|
```shell
|
||||||
$ sudo iptables -L INPUT --line-numbers
|
sudo iptables -L INPUT --line-numbers # [tl! .cmd]
|
||||||
Chain INPUT (policy ACCEPT)
|
Chain INPUT (policy ACCEPT) # [tl! .nocopy:9]
|
||||||
num target prot opt source destination
|
num target prot opt source destination
|
||||||
1 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
|
1 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
|
||||||
2 ACCEPT icmp -- anywhere anywhere
|
2 ACCEPT icmp -- anywhere anywhere
|
||||||
|
@ -108,9 +108,9 @@ num target prot opt source destination
|
||||||
```
|
```
|
||||||
|
|
||||||
I can use `nmap` running from my local Linux environment to confirm that I can now reach those ports on the VM. (They're still "closed" since nothing is listening on the ports yet, but the connections aren't being rejected.)
|
I can use `nmap` running from my local Linux environment to confirm that I can now reach those ports on the VM. (They're still "closed" since nothing is listening on the ports yet, but the connections aren't being rejected.)
|
||||||
```
|
```shell
|
||||||
$ nmap -Pn matrix.bowdre.net
|
nmap -Pn matrix.bowdre.net # [tl! .cmd]
|
||||||
Starting Nmap 7.70 ( https://nmap.org ) at 2021-06-27 12:49 CDT
|
Starting Nmap 7.70 ( https://nmap.org ) at 2021-06-27 12:49 CDT # [tl! .nocopy:10]
|
||||||
Nmap scan report for matrix.bowdre.net(150.136.6.180)
|
Nmap scan report for matrix.bowdre.net(150.136.6.180)
|
||||||
Host is up (0.086s latency).
|
Host is up (0.086s latency).
|
||||||
Other addresses for matrix.bowdre.net (not scanned): 2607:7700:0:1d:0:1:9688:6b4
|
Other addresses for matrix.bowdre.net (not scanned): 2607:7700:0:1d:0:1:9688:6b4
|
||||||
|
@ -125,16 +125,16 @@ Nmap done: 1 IP address (1 host up) scanned in 8.44 seconds
|
||||||
|
|
||||||
Cool! Before I move on, I'll be sure to make the rules persistent so they'll be re-applied whenever `iptables` starts up:
|
Cool! Before I move on, I'll be sure to make the rules persistent so they'll be re-applied whenever `iptables` starts up:
|
||||||
|
|
||||||
Make rules persistent:
|
```shell
|
||||||
```
|
sudo netfilter-persistent save # [tl! .cmd]
|
||||||
$ sudo netfilter-persistent save
|
run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save # [tl! .nocopy:1]
|
||||||
run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save
|
|
||||||
run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save
|
run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save
|
||||||
```
|
```
|
||||||
|
|
||||||
### Reverse proxy setup
|
### Reverse proxy setup
|
||||||
I had initially planned on using `certbot` to generate Let's Encrypt certificates, and then reference the certs as needed from an `nginx` or Apache reverse proxy configuration. While researching how the [proxy would need to be configured to front Synapse](https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md), I found this sample `nginx` configuration:
|
I had initially planned on using `certbot` to generate Let's Encrypt certificates, and then reference the certs as needed from an `nginx` or Apache reverse proxy configuration. While researching how the [proxy would need to be configured to front Synapse](https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md), I found this sample `nginx` configuration:
|
||||||
```conf
|
```text
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
server {
|
server {
|
||||||
listen 443 ssl http2;
|
listen 443 ssl http2;
|
||||||
listen [::]:443 ssl http2;
|
listen [::]:443 ssl http2;
|
||||||
|
@ -159,7 +159,8 @@ server {
|
||||||
```
|
```
|
||||||
|
|
||||||
And this sample Apache one:
|
And this sample Apache one:
|
||||||
```conf
|
```text
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
<VirtualHost *:443>
|
<VirtualHost *:443>
|
||||||
SSLEngine on
|
SSLEngine on
|
||||||
ServerName matrix.example.com
|
ServerName matrix.example.com
|
||||||
|
@ -185,7 +186,8 @@ And this sample Apache one:
|
||||||
```
|
```
|
||||||
|
|
||||||
I also found this sample config for another web server called [Caddy](https://caddyserver.com):
|
I also found this sample config for another web server called [Caddy](https://caddyserver.com):
|
||||||
```
|
```text
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
matrix.example.com {
|
matrix.example.com {
|
||||||
reverse_proxy /_matrix/* http://localhost:8008
|
reverse_proxy /_matrix/* http://localhost:8008
|
||||||
reverse_proxy /_synapse/client/* http://localhost:8008
|
reverse_proxy /_synapse/client/* http://localhost:8008
|
||||||
|
@ -198,8 +200,8 @@ example.com:8448 {
|
||||||
|
|
||||||
One of these looks much simpler than the other two. I'd never heard of Caddy so I did some quick digging, and I found that it would actually [handle the certificates entirely automatically](https://caddyserver.com/docs/automatic-https) - in addition to having a much easier config. [Installing Caddy](https://caddyserver.com/docs/install#debian-ubuntu-raspbian) wasn't too bad, either:
|
One of these looks much simpler than the other two. I'd never heard of Caddy so I did some quick digging, and I found that it would actually [handle the certificates entirely automatically](https://caddyserver.com/docs/automatic-https) - in addition to having a much easier config. [Installing Caddy](https://caddyserver.com/docs/install#debian-ubuntu-raspbian) wasn't too bad, either:
|
||||||
|
|
||||||
```sh
|
```shell
|
||||||
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https
|
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https # [tl! .cmd:4]
|
||||||
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo apt-key add -
|
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo apt-key add -
|
||||||
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list
|
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list
|
||||||
sudo apt update
|
sudo apt update
|
||||||
|
@ -207,18 +209,19 @@ sudo apt install caddy
|
||||||
```
|
```
|
||||||
|
|
||||||
Then I just need to put my configuration into the default `Caddyfile`, including the required `.well-known` delegation piece from earlier.
|
Then I just need to put my configuration into the default `Caddyfile`, including the required `.well-known` delegation piece from earlier.
|
||||||
```
|
```text
|
||||||
$ sudo vi /etc/caddy/Caddyfile
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /etc/caddy/Caddyfile
|
||||||
matrix.bowdre.net {
|
matrix.bowdre.net {
|
||||||
reverse_proxy /_matrix/* http://localhost:8008
|
reverse_proxy /_matrix/* http://localhost:8008
|
||||||
reverse_proxy /_synapse/client/* http://localhost:8008
|
reverse_proxy /_synapse/client/* http://localhost:8008
|
||||||
}
|
}
|
||||||
|
|
||||||
bowdre.net {
|
bowdre.net {
|
||||||
route {
|
route {
|
||||||
respond /.well-known/matrix/server `{"m.server": "matrix.bowdre.net:443"}`
|
respond /.well-known/matrix/server `{"m.server": "matrix.bowdre.net:443"}`
|
||||||
redir https://virtuallypotato.com
|
redir https://virtuallypotato.com
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
There's a lot happening in that 11-line `Caddyfile`, but it's not complicated by any means. The `matrix.bowdre.net` section is pretty much exactly yanked from the sample config, and it's going to pass any requests that start like `matrix.bowdre.net/_matrix/` or `matrix.bowdre.net/_synapse/client/` through to the Synapse server listening locally on port `8008`. Caddy will automatically request and apply a Let's Encrypt or ZeroSSL cert for any server names spelled out in the config - very slick!
|
There's a lot happening in that 11-line `Caddyfile`, but it's not complicated by any means. The `matrix.bowdre.net` section is pretty much exactly yanked from the sample config, and it's going to pass any requests that start like `matrix.bowdre.net/_matrix/` or `matrix.bowdre.net/_synapse/client/` through to the Synapse server listening locally on port `8008`. Caddy will automatically request and apply a Let's Encrypt or ZeroSSL cert for any server names spelled out in the config - very slick!
|
||||||
|
@ -228,16 +231,16 @@ I set up the `bowdre.net` section to return the appropriate JSON string to tell
|
||||||
(I wouldn't need that section at all if I were using a separate web server for `bowdre.net`; instead, I'd basically just add that `respond /.well-known/matrix/server` line to that other server's config.)
|
(I wouldn't need that section at all if I were using a separate web server for `bowdre.net`; instead, I'd basically just add that `respond /.well-known/matrix/server` line to that other server's config.)
|
||||||
|
|
||||||
Now to enable the `caddy` service, start it, and restart it so that it loads the new config:
|
Now to enable the `caddy` service, start it, and restart it so that it loads the new config:
|
||||||
```
|
```shell
|
||||||
sudo systemctl enable caddy
|
sudo systemctl enable caddy # [tl! .cmd:2]
|
||||||
sudo systemctl start caddy
|
sudo systemctl start caddy
|
||||||
sudo systemctl restart caddy
|
sudo systemctl restart caddy
|
||||||
```
|
```
|
||||||
|
|
||||||
If I repeat my `nmap` scan from earlier, I'll see that the HTTP and HTTPS ports are now open. The server still isn't actually serving anything on those ports yet, but at least it's listening.
|
If I repeat my `nmap` scan from earlier, I'll see that the HTTP and HTTPS ports are now open. The server still isn't actually serving anything on those ports yet, but at least it's listening.
|
||||||
```
|
```shell
|
||||||
$ nmap -Pn matrix.bowdre.net
|
nmap -Pn matrix.bowdre.net # [tl! .cmd]
|
||||||
Starting Nmap 7.70 ( https://nmap.org ) at 2021-06-27 13:44 CDT
|
Starting Nmap 7.70 ( https://nmap.org ) at 2021-06-27 13:44 CDT # [tl! .nocopy:9]
|
||||||
Nmap scan report for matrix.bowdre.net (150.136.6.180)
|
Nmap scan report for matrix.bowdre.net (150.136.6.180)
|
||||||
Host is up (0.034s latency).
|
Host is up (0.034s latency).
|
||||||
Not shown: 997 filtered ports
|
Not shown: 997 filtered ports
|
||||||
|
@ -265,56 +268,57 @@ Okay, let's actually serve something up now.
|
||||||
#### Docker setup
|
#### Docker setup
|
||||||
Before I can get on with [deploying Synapse in Docker](https://hub.docker.com/r/matrixdotorg/synapse), I first need to [install Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository) on the system:
|
Before I can get on with [deploying Synapse in Docker](https://hub.docker.com/r/matrixdotorg/synapse), I first need to [install Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository) on the system:
|
||||||
|
|
||||||
```sh
|
```shell
|
||||||
sudo apt-get install \
|
sudo apt-get install \ # [tl! .cmd]
|
||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
gnupg \
|
gnupg \
|
||||||
lsb-release
|
lsb-release
|
||||||
|
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | \ # [tl! .cmd]
|
||||||
|
sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||||
|
|
||||||
echo \
|
echo \ # [tl! .cmd]
|
||||||
"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
|
"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
|
||||||
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
|
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
|
|
||||||
sudo apt install docker-ce docker-ce-cli containerd.io
|
sudo apt install docker-ce docker-ce-cli containerd.io
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll also [install Docker Compose](https://docs.docker.com/compose/install/#install-compose):
|
I'll also [install Docker Compose](https://docs.docker.com/compose/install/#install-compose):
|
||||||
```sh
|
```shell
|
||||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" \ # [tl! .cmd]
|
||||||
|
-o /usr/local/bin/docker-compose
|
||||||
sudo chmod +x /usr/local/bin/docker-compose
|
sudo chmod +x /usr/local/bin/docker-compose # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And I'll add my `ubuntu` user to the `docker` group so that I won't have to run every docker command with `sudo`:
|
And I'll add my `ubuntu` user to the `docker` group so that I won't have to run every docker command with `sudo`:
|
||||||
```
|
```shell
|
||||||
sudo usermod -G docker -a ubuntu
|
sudo usermod -G docker -a ubuntu # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll log out and back in so that the membership change takes effect, and then test both `docker` and `docker-compose` to make sure they're working:
|
I'll log out and back in so that the membership change takes effect, and then test both `docker` and `docker-compose` to make sure they're working:
|
||||||
```
|
```shell
|
||||||
$ docker --version
|
docker --version # [tl! .cmd]
|
||||||
Docker version 20.10.7, build f0df350
|
Docker version 20.10.7, build f0df350 # [tl! .nocopy:1]
|
||||||
|
|
||||||
$ docker-compose --version
|
docker-compose --version # [tl! .cmd]
|
||||||
docker-compose version 1.29.2, build 5becea4c
|
docker-compose version 1.29.2, build 5becea4c # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Synapse setup
|
#### Synapse setup
|
||||||
Now I'll make a place for the Synapse installation to live, including a `data` folder that will be mounted into the container:
|
Now I'll make a place for the Synapse installation to live, including a `data` folder that will be mounted into the container:
|
||||||
```
|
```shell
|
||||||
sudo mkdir -p /opt/matrix/synapse/data
|
sudo mkdir -p /opt/matrix/synapse/data # [tl! .cmd:1]
|
||||||
cd /opt/matrix/synapse
|
cd /opt/matrix/synapse
|
||||||
```
|
```
|
||||||
|
|
||||||
And then I'll create the compose file to define the deployment:
|
And then I'll create the compose file to define the deployment:
|
||||||
```yaml
|
```yaml
|
||||||
$ sudo vi docker-compose.yml
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /opt/matrix/synapse/docker-compose.yaml
|
||||||
services:
|
services:
|
||||||
synapse:
|
synapse:
|
||||||
container_name: "synapse"
|
container_name: "synapse"
|
||||||
|
@ -328,13 +332,13 @@ services:
|
||||||
|
|
||||||
Before I can fire this up, I'll need to generate an initial configuration as [described in the documentation](https://hub.docker.com/r/matrixdotorg/synapse). Here I'll specify the server name that I'd like other Matrix servers to know mine by (`bowdre.net`):
|
Before I can fire this up, I'll need to generate an initial configuration as [described in the documentation](https://hub.docker.com/r/matrixdotorg/synapse). Here I'll specify the server name that I'd like other Matrix servers to know mine by (`bowdre.net`):
|
||||||
|
|
||||||
```sh
|
```shell
|
||||||
$ docker run -it --rm \
|
docker run -it --rm \ # [tl! .cmd]
|
||||||
-v "/opt/matrix/synapse/data:/data" \
|
-v "/opt/matrix/synapse/data:/data" \
|
||||||
-e SYNAPSE_SERVER_NAME=bowdre.net \
|
-e SYNAPSE_SERVER_NAME=bowdre.net \
|
||||||
-e SYNAPSE_REPORT_STATS=yes \
|
-e SYNAPSE_REPORT_STATS=yes \
|
||||||
matrixdotorg/synapse generate
|
matrixdotorg/synapse generate
|
||||||
|
# [tl! .nocopy:start]
|
||||||
Unable to find image 'matrixdotorg/synapse:latest' locally
|
Unable to find image 'matrixdotorg/synapse:latest' locally
|
||||||
latest: Pulling from matrixdotorg/synapse
|
latest: Pulling from matrixdotorg/synapse
|
||||||
69692152171a: Pull complete
|
69692152171a: Pull complete
|
||||||
|
@ -351,7 +355,7 @@ Status: Downloaded newer image for matrixdotorg/synapse:latest
|
||||||
Creating log config /data/bowdre.net.log.config
|
Creating log config /data/bowdre.net.log.config
|
||||||
Generating config file /data/homeserver.yaml
|
Generating config file /data/homeserver.yaml
|
||||||
Generating signing key file /data/bowdre.net.signing.key
|
Generating signing key file /data/bowdre.net.signing.key
|
||||||
A config file has been generated in '/data/homeserver.yaml' for server name 'bowdre.net'. Please review this file and customise it to your needs.
|
A config file has been generated in '/data/homeserver.yaml' for server name 'bowdre.net'. Please review this file and customise it to your needs. # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
As instructed, I'll use `sudo vi data/homeserver.yaml` to review/modify the generated config. I'll leave
|
As instructed, I'll use `sudo vi data/homeserver.yaml` to review/modify the generated config. I'll leave
|
||||||
|
@ -373,16 +377,16 @@ so that I can create a user account without fumbling with the CLI. I'll be sure
|
||||||
There are a bunch of other useful configurations that can be made here, but these will do to get things going for now.
|
There are a bunch of other useful configurations that can be made here, but these will do to get things going for now.
|
||||||
|
|
||||||
Time to start it up:
|
Time to start it up:
|
||||||
```
|
```shell
|
||||||
$ docker-compose up -d
|
docker-compose up -d # [tl! .cmd]
|
||||||
Creating network "synapse_default" with the default driver
|
Creating network "synapse_default" with the default driver # [tl! .nocopy:1]
|
||||||
Creating synapse ... done
|
Creating synapse ... done
|
||||||
```
|
```
|
||||||
|
|
||||||
And use `docker ps` to confirm that it's running:
|
And use `docker ps` to confirm that it's running:
|
||||||
```
|
```shell
|
||||||
$ docker ps
|
docker ps # [tl! .cmd]
|
||||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES # [tl! .nocopy:1]
|
||||||
573612ec5735 matrixdotorg/synapse "/start.py" 25 seconds ago Up 23 seconds (healthy) 8009/tcp, 127.0.0.1:8008->8008/tcp, 8448/tcp synapse
|
573612ec5735 matrixdotorg/synapse "/start.py" 25 seconds ago Up 23 seconds (healthy) 8009/tcp, 127.0.0.1:8008->8008/tcp, 8448/tcp synapse
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -400,6 +404,7 @@ And I can view the JSON report at the bottom of the page to confirm that it's co
|
||||||
"m.server": "matrix.bowdre.net:443",
|
"m.server": "matrix.bowdre.net:443",
|
||||||
"CacheExpiresAt": 0
|
"CacheExpiresAt": 0
|
||||||
},
|
},
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Now I can fire up my [Matrix client of choice](https://element.io/get-started)), specify my homeserver using its full FQDN, and [register](https://app.element.io/#/register) a new user account:
|
Now I can fire up my [Matrix client of choice](https://element.io/get-started)), specify my homeserver using its full FQDN, and [register](https://app.element.io/#/register) a new user account:
|
||||||
|
@ -414,23 +419,21 @@ All in, I'm pretty pleased with how this little project turned out, and I learne
|
||||||
|
|
||||||
### Update: Updating
|
### Update: Updating
|
||||||
After a while, it's probably a good idea to update both the Ubntu server and the Synapse container running on it. Updating the server itself is as easy as:
|
After a while, it's probably a good idea to update both the Ubntu server and the Synapse container running on it. Updating the server itself is as easy as:
|
||||||
```sh
|
```shell
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
sudo apt upgrade
|
sudo apt upgrade
|
||||||
# And, if needed:
|
|
||||||
sudo reboot
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Here's what I do to update the container:
|
Here's what I do to update the container:
|
||||||
```sh
|
```shell
|
||||||
# Move to the working directory
|
# Move to the working directory # [tl! .nocopy]
|
||||||
cd /opt/matrix/synapse
|
cd /opt/matrix/synapse # [tl! .cmd]
|
||||||
# Pull a new version of the synapse image
|
# Pull a new version of the synapse image # [tl! .nocopy]
|
||||||
docker-compose pull
|
docker-compose pull # [tl! .cmd]
|
||||||
# Stop the container
|
# Stop the container # [tl! .nocopy]
|
||||||
docker-compose down
|
docker-compose down # [tl! .cmd]
|
||||||
# Start it back up without the old version
|
# Start it back up without the old version # [tl! .nocopy]
|
||||||
docker-compose up -d --remove-orphans
|
docker-compose up -d --remove-orphans # [tl! .cmd]
|
||||||
# Periodically remove the old docker images
|
# Periodically remove the old docker images # [tl! .nocopy]
|
||||||
docker image prune
|
docker image prune # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
|
@ -15,39 +15,40 @@ I found myself with a sudden need for parsing a Linux server's logs to figure ou
|
||||||
### Find IP-ish strings
|
### Find IP-ish strings
|
||||||
This will get you all occurrences of things which look vaguely like IPv4 addresses:
|
This will get you all occurrences of things which look vaguely like IPv4 addresses:
|
||||||
```shell
|
```shell
|
||||||
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT
|
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT # [tl! .cmd]
|
||||||
```
|
```
|
||||||
(It's not a perfect IP address regex since it would match things like `987.654.321.555` but it's close enough for my needs.)
|
(It's not a perfect IP address regex since it would match things like `987.654.321.555` but it's close enough for my needs.)
|
||||||
|
|
||||||
### Filter out `localhost`
|
### Filter out `localhost`
|
||||||
The log likely include a LOT of traffic to/from `127.0.0.1` so let's toss out `localhost` by piping through `grep -v "127.0.0.1"` (`-v` will do an inverse match - only return results which *don't* match the given expression):
|
The log likely include a LOT of traffic to/from `127.0.0.1` so let's toss out `localhost` by piping through `grep -v "127.0.0.1"` (`-v` will do an inverse match - only return results which *don't* match the given expression):
|
||||||
```shell
|
```shell
|
||||||
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1"
|
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Count up the duplicates
|
### Count up the duplicates
|
||||||
Now we need to know how many times each IP shows up in the log. We can do that by passing the output through `uniq -c` (`uniq` will filter for unique entries, and the `-c` flag will return a count of how many times each result appears):
|
Now we need to know how many times each IP shows up in the log. We can do that by passing the output through `uniq -c` (`uniq` will filter for unique entries, and the `-c` flag will return a count of how many times each result appears):
|
||||||
```shell
|
```shell
|
||||||
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c
|
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Sort the results
|
### Sort the results
|
||||||
We can use `sort` to sort the results. `-n` tells it sort based on numeric rather than character values, and `-r` reverses the list so that the larger numbers appear at the top:
|
We can use `sort` to sort the results. `-n` tells it sort based on numeric rather than character values, and `-r` reverses the list so that the larger numbers appear at the top:
|
||||||
```shell
|
```shell
|
||||||
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c | sort -n -r
|
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c | sort -n -r # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Top 5
|
### Top 5
|
||||||
And, finally, let's use `head -n 5` to only get the first five results:
|
And, finally, let's use `head -n 5` to only get the first five results:
|
||||||
```shell
|
```shell
|
||||||
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c | sort -n -r | head -n 5
|
grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ACCESS_LOG.TXT | grep -v "127.0.0.1" | uniq -c | sort -n -r | head -n 5 # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Bonus round!
|
### Bonus round!
|
||||||
You know how old log files get rotated and compressed into files like `logname.1.gz`? I *very* recently learned that there are versions of the standard Linux text manipulation tools which can work directly on compressed log files, without having to first extract the files. I'd been doing things the hard way for years - no longer, now that I know about `zcat`, `zdiff`, `zgrep`, and `zless`!
|
You know how old log files get rotated and compressed into files like `logname.1.gz`? I *very* recently learned that there are versions of the standard Linux text manipulation tools which can work directly on compressed log files, without having to first extract the files. I'd been doing things the hard way for years - no longer, now that I know about `zcat`, `zdiff`, `zgrep`, and `zless`!
|
||||||
|
|
||||||
So let's use a `for` loop to iterate through 20 of those compressed logs, and use `date -r [filename]` to get the timestamp for each log as we go:
|
So let's use a `for` loop to iterate through 20 of those compressed logs, and use `date -r [filename]` to get the timestamp for each log as we go:
|
||||||
```bash
|
```shell
|
||||||
for i in {1..20}; do date -r ACCESS_LOG.$i.gz; zgrep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' \ACCESS_LOG.log.$i.gz | grep -v "127.0.0.1" | uniq -c | sort -n -r | head -n 5; done
|
for i in {1..20}; do date -r ACCESS_LOG.$i.gz; zgrep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' \ # [tl! .cmd]
|
||||||
|
ACCESS_LOG.log.$i.gz | grep -v "127.0.0.1" | uniq -c | sort -n -r | head -n 5; done
|
||||||
```
|
```
|
||||||
Nice!
|
Nice!
|
|
@ -39,9 +39,9 @@ ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verif
|
||||||
```
|
```
|
||||||
|
|
||||||
Further, attempting to pull down that URL with `curl` also failed:
|
Further, attempting to pull down that URL with `curl` also failed:
|
||||||
```sh
|
```shell
|
||||||
root@ssc [ ~ ]# curl https://vra.lab.bowdre.net/csp/gateway/am/api/auth/discovery
|
curl https://vra.lab.bowdre.net/csp/gateway/am/api/auth/discovery # [tl! .cmd]
|
||||||
curl: (60) SSL certificate problem: self signed certificate in certificate chain
|
curl: (60) SSL certificate problem: self signed certificate in certificate chain # [tl! .nocopy:5]
|
||||||
More details here: https://curl.se/docs/sslcerts.html
|
More details here: https://curl.se/docs/sslcerts.html
|
||||||
|
|
||||||
curl failed to verify the legitimacy of the server and therefore could not
|
curl failed to verify the legitimacy of the server and therefore could not
|
||||||
|
@ -52,7 +52,7 @@ how to fix it, please visit the web page mentioned above.
|
||||||
In my homelab, I am indeed using self-signed certificates. I also encountered the same issue in my lab at work, though, and I'm using certs issued by our enterprise CA there. I had run into a similar problem with previous versions of SSC, but the [quick-and-dirty workaround to disable certificate verification](https://communities.vmware.com/t5/VMware-vRealize-Discussions/SaltStack-Config-Integration-show-Blank-Page/td-p/2863973) doesn't seem to work anymore.
|
In my homelab, I am indeed using self-signed certificates. I also encountered the same issue in my lab at work, though, and I'm using certs issued by our enterprise CA there. I had run into a similar problem with previous versions of SSC, but the [quick-and-dirty workaround to disable certificate verification](https://communities.vmware.com/t5/VMware-vRealize-Discussions/SaltStack-Config-Integration-show-Blank-Page/td-p/2863973) doesn't seem to work anymore.
|
||||||
|
|
||||||
### The Solution
|
### The Solution
|
||||||
Clearly I needed to import either the vRA system's certificate (for my homelab) or the certificate chain for my enterprise CA (for my work environment) into SSC's certificate store so that it will trust vRA. But how?
|
Clearly I needed to import either the vRA system's certificate (for my homelab) or the certificate chain for my enterprise CA (for my work environment) into SSC's certificate store so that it will trust vRA. But how?
|
||||||
|
|
||||||
I fumbled around for a bit and managed to get the required certs added to the system certificate store so that my `curl` test would succeed, but trying to access the SSC web UI still gave me a big middle finger. I eventually found [this documentation](https://docs.vmware.com/en/VMware-vRealize-Automation-SaltStack-Config/8.6/install-configure-saltstack-config/GUID-21A87CE2-8184-4F41-B71B-0FCBB93F21FC.html#troubleshooting-saltstack-config-environments-with-vrealize-automation-that-use-selfsigned-certificates-3) which describes how to configure SSC to work with self-signed certs, and it held the missing detail of how to tell the SaltStack Returner-as-a-Service (RaaS) component that it should use that system certificate store.
|
I fumbled around for a bit and managed to get the required certs added to the system certificate store so that my `curl` test would succeed, but trying to access the SSC web UI still gave me a big middle finger. I eventually found [this documentation](https://docs.vmware.com/en/VMware-vRealize-Automation-SaltStack-Config/8.6/install-configure-saltstack-config/GUID-21A87CE2-8184-4F41-B71B-0FCBB93F21FC.html#troubleshooting-saltstack-config-environments-with-vrealize-automation-that-use-selfsigned-certificates-3) which describes how to configure SSC to work with self-signed certs, and it held the missing detail of how to tell the SaltStack Returner-as-a-Service (RaaS) component that it should use that system certificate store.
|
||||||
|
|
||||||
|
@ -61,21 +61,22 @@ So here's what I did to get things working in my homelab:
|
||||||
![Exporting the self-signed CA cert](20211105_export_selfsigned_ca.png)
|
![Exporting the self-signed CA cert](20211105_export_selfsigned_ca.png)
|
||||||
2. Open the file in a text editor, and copy the contents into a new file on the SSC appliance. I used `~/vra.crt`.
|
2. Open the file in a text editor, and copy the contents into a new file on the SSC appliance. I used `~/vra.crt`.
|
||||||
3. Append the certificate to the end of the system `ca-bundle.crt`:
|
3. Append the certificate to the end of the system `ca-bundle.crt`:
|
||||||
```sh
|
```shell
|
||||||
cat <vra.crt >> /etc/pki/tls/certs/ca-bundle.crt
|
cat <vra.crt >> /etc/pki/tls/certs/ca-bundle.crt # [tl! .cmd]
|
||||||
```
|
```
|
||||||
4. Test that I can now `curl` from vRA without a certificate error:
|
4. Test that I can now `curl` from vRA without a certificate error:
|
||||||
```sh
|
```curl
|
||||||
root@ssc [ ~ ]# curl https://vra.lab.bowdre.net/csp/gateway/am/api/auth/discovery
|
curl https://vra.lab.bowdre.net/csp/gateway/am/api/auth/discovery # [tl! .cmd]
|
||||||
{"timestamp":1636139143260,"type":"CLIENT_ERROR","status":"400 BAD_REQUEST","error":"Bad Request","serverMessage":"400 BAD_REQUEST \"Required String parameter 'state' is not present\""}
|
{"timestamp":1636139143260,"type":"CLIENT_ERROR","status":"400 BAD_REQUEST","error":"Bad Request","serverMessage":"400 BAD_REQUEST \"Required String parameter 'state' is not present\""} # [tl! .nocopy]
|
||||||
```
|
|
||||||
5. Edit `/usr/lib/systemd/system/raas.service` to update the service definition so it will look to the `ca-bundle.crt` file by adding
|
|
||||||
```
|
```
|
||||||
|
5. Edit `/usr/lib/systemd/system/raas.service` to update the service definition so it will look to the `ca-bundle.crt` file by adding
|
||||||
|
```ini
|
||||||
Environment=REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt
|
Environment=REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt
|
||||||
```
|
```
|
||||||
above the `ExecStart` line:
|
above the `ExecStart` line:
|
||||||
```sh
|
```ini
|
||||||
root@ssc [ ~ ]# cat /usr/lib/systemd/system/raas.service
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /usr/lib/systemd/system/raas.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=The SaltStack Enterprise API Server
|
Description=The SaltStack Enterprise API Server
|
||||||
After=network.target
|
After=network.target
|
||||||
|
@ -90,15 +91,15 @@ RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX AF_NETLINK
|
||||||
PermissionsStartOnly=true
|
PermissionsStartOnly=true
|
||||||
ExecStartPre=/bin/sh -c 'systemctl set-environment FIPS_MODE=$(/opt/vmware/bin/ovfenv -q --key fips-mode)'
|
ExecStartPre=/bin/sh -c 'systemctl set-environment FIPS_MODE=$(/opt/vmware/bin/ovfenv -q --key fips-mode)'
|
||||||
ExecStartPre=/bin/sh -c 'systemctl set-environment NODE_TYPE=$(/opt/vmware/bin/ovfenv -q --key node-type)'
|
ExecStartPre=/bin/sh -c 'systemctl set-environment NODE_TYPE=$(/opt/vmware/bin/ovfenv -q --key node-type)'
|
||||||
Environment=REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt
|
Environment=REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt # [tl! focus]
|
||||||
ExecStart=/usr/bin/raas
|
ExecStart=/usr/bin/raas
|
||||||
TimeoutStopSec=90
|
TimeoutStopSec=90
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
```
|
```
|
||||||
6. Stop and restart the `raas` service:
|
6. Stop and restart the `raas` service:
|
||||||
```sh
|
```shell
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload # [tl! .cmd:2]
|
||||||
systemctl stop raas
|
systemctl stop raas
|
||||||
systemctl start raas
|
systemctl start raas
|
||||||
```
|
```
|
||||||
|
@ -110,8 +111,8 @@ systemctl start raas
|
||||||
The steps for doing this at work with an enterprise CA were pretty similar, with just slightly-different steps 1 and 2:
|
The steps for doing this at work with an enterprise CA were pretty similar, with just slightly-different steps 1 and 2:
|
||||||
1. Access the enterprise CA and download the CA chain, which came in `.p7b` format.
|
1. Access the enterprise CA and download the CA chain, which came in `.p7b` format.
|
||||||
2. Use `openssl` to extract the individual certificates:
|
2. Use `openssl` to extract the individual certificates:
|
||||||
```sh
|
```shell
|
||||||
openssl pkcs7 -inform PEM -outform PEM -in enterprise-ca-chain.p7b -print_certs > enterprise-ca-chain.pem
|
openssl pkcs7 -inform PEM -outform PEM -in enterprise-ca-chain.p7b -print_certs > enterprise-ca-chain.pem # [tl! .cmd]
|
||||||
```
|
```
|
||||||
Copy it to the SSC appliance, and then pick up with Step 3 above.
|
Copy it to the SSC appliance, and then pick up with Step 3 above.
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ At this point, I was ready to actually kick off the deployment. Ahmet made this
|
||||||
|
|
||||||
![Authorize Cloud Shell prompt](20210820_authorize_cloud_shell.png)
|
![Authorize Cloud Shell prompt](20210820_authorize_cloud_shell.png)
|
||||||
|
|
||||||
The script prompted me to select a project and a region, and then asked for the Sheet ID that I copied earlier.
|
The script prompted me to select a project and a region, and then asked for the Sheet ID that I copied earlier.
|
||||||
![Cloud Shell deployment](20210820_cloud_shell.png)
|
![Cloud Shell deployment](20210820_cloud_shell.png)
|
||||||
|
|
||||||
### Grant access to the Sheet
|
### Grant access to the Sheet
|
||||||
|
@ -82,10 +82,9 @@ And now I can hand out handy-dandy short links!
|
||||||
|
|
||||||
| Link | Description|
|
| Link | Description|
|
||||||
| --- | --- |
|
| --- | --- |
|
||||||
| [go.bowdre.net/ghia](https://go.bowdre.net/ghia) | 1974 VW Karmann Ghia project |
|
| [go.bowdre.net/coso](https://go.bowdre.net/coso) | Follow me on CounterSocial |
|
||||||
| [go.bowdre.net/conedoge](https://go.bowdre.net/conedoge) | 2014 Subaru BRZ autocross videos |
|
| [go.bowdre.net/conedoge](https://go.bowdre.net/conedoge) | 2014 Subaru BRZ autocross videos |
|
||||||
| [go.bowdre.net/matrix](https://go.bowdre.net/matrix) | Chat with me on Matrix |
|
| [go.bowdre.net/cooltechshit](https://go.bowdre.net/cooltechshit) | A collection of cool tech shit (references and resources) |
|
||||||
| [go.bowdre.net/twits](https://go.bowdre.net/twits) | Follow me on Twitter |
|
| [go.bowdre.net/stuffiuse](https://go.bowdre.net/stuffiuse) | Things that I use (and think you should use too) |
|
||||||
| [go.bowdre.net/stadia](https://go.bowdre.net/stadia) | Game with me on Stadia |
|
|
||||||
| [go.bowdre.net/shorterer](https://go.bowdre.net/shorterer) | This post! |
|
| [go.bowdre.net/shorterer](https://go.bowdre.net/shorterer) | This post! |
|
||||||
|
|
||||||
|
|
|
@ -44,8 +44,8 @@ After hitting **Execute**, the Swagger UI will populate the *Responses* section
|
||||||
![curl request format](login_controller_3.png)
|
![curl request format](login_controller_3.png)
|
||||||
|
|
||||||
So I could easily replicate this using the `curl` utility by just copying and pasting the following into a shell:
|
So I could easily replicate this using the `curl` utility by just copying and pasting the following into a shell:
|
||||||
```shell
|
```curl
|
||||||
curl -X 'POST' \
|
curl -X 'POST' \ # [tl! .cmd]
|
||||||
'https://vra.lab.bowdre.net/csp/gateway/am/api/login' \
|
'https://vra.lab.bowdre.net/csp/gateway/am/api/login' \
|
||||||
-H 'accept: */*' \
|
-H 'accept: */*' \
|
||||||
-H 'Content-Type: application/json' \
|
-H 'Content-Type: application/json' \
|
||||||
|
@ -69,31 +69,32 @@ Now I can go find an IaaS API that I'm interested in querying (like `/iaas/api/f
|
||||||
![Using Swagger to query for flavor mappings](flavor_mappings_swagger_request.png)
|
![Using Swagger to query for flavor mappings](flavor_mappings_swagger_request.png)
|
||||||
|
|
||||||
And here's the result:
|
And here's the result:
|
||||||
```json {hl_lines=[6,10,14,44,48,52,56,60,64]}
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"content": [
|
"content": [
|
||||||
{
|
{
|
||||||
"flavorMappings": {
|
"flavorMappings": {
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"1vCPU | 2GB [tiny]": {
|
"1vCPU | 2GB [tiny]": { // [tl! focus]
|
||||||
"cpuCount": 1,
|
"cpuCount": 1,
|
||||||
"memoryInMB": 2048
|
"memoryInMB": 2048
|
||||||
},
|
},
|
||||||
"1vCPU | 1GB [micro]": {
|
"1vCPU | 1GB [micro]": { // [tl! focus]
|
||||||
"cpuCount": 1,
|
"cpuCount": 1,
|
||||||
"memoryInMB": 1024
|
"memoryInMB": 1024
|
||||||
},
|
},
|
||||||
"2vCPU | 4GB [small]": {
|
"2vCPU | 4GB [small]": { // [tl! focus]
|
||||||
"cpuCount": 2,
|
"cpuCount": 2,
|
||||||
"memoryInMB": 4096
|
"memoryInMB": 4096
|
||||||
}
|
}
|
||||||
},
|
}, // [tl! collapse:5]
|
||||||
"_links": {
|
"_links": {
|
||||||
"region": {
|
"region": {
|
||||||
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f"
|
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},// [tl! collapse:start]
|
||||||
"externalRegionId": "Datacenter:datacenter-39056",
|
"externalRegionId": "Datacenter:datacenter-39056",
|
||||||
"cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68",
|
"cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68",
|
||||||
"name": "",
|
"name": "",
|
||||||
|
@ -107,43 +108,43 @@ And here's the result:
|
||||||
},
|
},
|
||||||
"region": {
|
"region": {
|
||||||
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f"
|
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f"
|
||||||
}
|
} // [tl! collapse:end]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"flavorMappings": {
|
"flavorMappings": {
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"2vCPU | 8GB [medium]": {
|
"2vCPU | 8GB [medium]": { // [tl! focus]
|
||||||
"cpuCount": 2,
|
"cpuCount": 2,
|
||||||
"memoryInMB": 8192
|
"memoryInMB": 8192
|
||||||
},
|
},
|
||||||
"1vCPU | 2GB [tiny]": {
|
"1vCPU | 2GB [tiny]": { // [tl! focus]
|
||||||
"cpuCount": 1,
|
"cpuCount": 1,
|
||||||
"memoryInMB": 2048
|
"memoryInMB": 2048
|
||||||
},
|
},
|
||||||
"8vCPU | 16GB [giant]": {
|
"8vCPU | 16GB [giant]": { // [tl! focus]
|
||||||
"cpuCount": 8,
|
"cpuCount": 8,
|
||||||
"memoryInMB": 16384
|
"memoryInMB": 16384
|
||||||
},
|
},
|
||||||
"1vCPU | 1GB [micro]": {
|
"1vCPU | 1GB [micro]": { // [tl! focus]
|
||||||
"cpuCount": 1,
|
"cpuCount": 1,
|
||||||
"memoryInMB": 1024
|
"memoryInMB": 1024
|
||||||
},
|
},
|
||||||
"2vCPU | 4GB [small]": {
|
"2vCPU | 4GB [small]": { // [tl! focus]
|
||||||
"cpuCount": 2,
|
"cpuCount": 2,
|
||||||
"memoryInMB": 4096
|
"memoryInMB": 4096
|
||||||
},
|
},
|
||||||
"4vCPU | 12GB [large]": {
|
"4vCPU | 12GB [large]": { // [tl! focus]
|
||||||
"cpuCount": 4,
|
"cpuCount": 4,
|
||||||
"memoryInMB": 12288
|
"memoryInMB": 12288
|
||||||
}
|
}
|
||||||
},
|
}, // [tl! collapse:5]
|
||||||
"_links": {
|
"_links": {
|
||||||
"region": {
|
"region": {
|
||||||
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
}, // [tl! collapse:start]
|
||||||
"externalRegionId": "Datacenter:datacenter-1001",
|
"externalRegionId": "Datacenter:datacenter-1001",
|
||||||
"cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68",
|
"cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68",
|
||||||
"name": "",
|
"name": "",
|
||||||
|
@ -158,7 +159,7 @@ And here's the result:
|
||||||
"region": {
|
"region": {
|
||||||
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
||||||
}
|
}
|
||||||
}
|
} // [tl! collapse:end]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"totalElements": 2,
|
"totalElements": 2,
|
||||||
|
@ -176,133 +177,134 @@ As you can see, Swagger can really help to jump-start the exploration of a new A
|
||||||
|
|
||||||
Installing the [Debian package](https://httpie.io/docs/cli/debian-and-ubuntu) is a piece of ~~cake~~ _pie_[^pie]:
|
Installing the [Debian package](https://httpie.io/docs/cli/debian-and-ubuntu) is a piece of ~~cake~~ _pie_[^pie]:
|
||||||
```shell
|
```shell
|
||||||
curl -SsL https://packages.httpie.io/deb/KEY.gpg | sudo apt-key add -
|
curl -SsL https://packages.httpie.io/deb/KEY.gpg | sudo apt-key add - # [tl! .cmd:3]
|
||||||
sudo curl -SsL -o /etc/apt/sources.list.d/httpie.list https://packages.httpie.io/deb/httpie.list
|
sudo curl -SsL -o /etc/apt/sources.list.d/httpie.list https://packages.httpie.io/deb/httpie.list
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install httpie
|
sudo apt install httpie
|
||||||
```
|
```
|
||||||
|
|
||||||
Once installed, running `http` will give me a quick overview of how to use this new tool:
|
Once installed, running `http` will give me a quick overview of how to use this new tool:
|
||||||
```shell {hl_lines=[3]}
|
```shell
|
||||||
; http
|
http # [tl! .cmd]
|
||||||
usage:
|
usage: # [tl! .nocopy:start]
|
||||||
http [METHOD] URL [REQUEST_ITEM ...]
|
http [METHOD] URL [REQUEST_ITEM ...]
|
||||||
|
|
||||||
error:
|
error:
|
||||||
the following arguments are required: URL
|
the following arguments are required: URL
|
||||||
|
|
||||||
for more information:
|
for more information:
|
||||||
run 'http --help' or visit https://httpie.io/docs/cli
|
run 'http --help' or visit https://httpie.io/docs/cli # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
HTTPie cleverly interprets anything passed after the URL as a [request item](https://httpie.io/docs/cli/request-items), and it determines the item type based on a simple key/value syntax:
|
HTTPie cleverly interprets anything passed after the URL as a [request item](https://httpie.io/docs/cli/request-items), and it determines the item type based on a simple key/value syntax:
|
||||||
> Each request item is simply a key/value pair separated with the following characters: `:` (headers), `=` (data field, e.g., JSON, form), `:=` (raw data field), `==` (query parameters), `@` (file upload).
|
> Each request item is simply a key/value pair separated with the following characters: `:` (headers), `=` (data field, e.g., JSON, form), `:=` (raw data field), `==` (query parameters), `@` (file upload).
|
||||||
|
|
||||||
So my earlier request for an authentication token becomes:
|
So my earlier request for an authentication token becomes:
|
||||||
```shell
|
```shell
|
||||||
https POST vra.lab.bowdre.net/csp/gateway/am/api/login username='vra' password='********' domain='lab.bowdre.net'
|
https POST vra.lab.bowdre.net/csp/gateway/am/api/login username='vra' password='********' domain='lab.bowdre.net' # [tl! .cmd]
|
||||||
```
|
```
|
||||||
{{% notice tip "Working with Self-Signed Certificates" %}}
|
{{% notice tip "Working with Self-Signed Certificates" %}}
|
||||||
If your vRA endpoint is using a self-signed or otherwise untrusted certificate, pass the HTTPie option `--verify=no` to ignore certificate errors:
|
If your vRA endpoint is using a self-signed or otherwise untrusted certificate, pass the HTTPie option `--verify=no` to ignore certificate errors:
|
||||||
```
|
```shell
|
||||||
https --verify=no POST [URL] [REQUEST_ITEMS]
|
https --verify=no POST [URL] [REQUEST_ITEMS] # [tl! .cmd]
|
||||||
```
|
```
|
||||||
{{% /notice %}}
|
{{% /notice %}}
|
||||||
|
|
||||||
Running that will return a bunch of interesting headers but I'm mainly interested in the response body:
|
Running that will return a bunch of interesting headers but I'm mainly interested in the response body:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"cspAuthToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjI4NDY0MjAzMzA2NDQwMTQ2NDQifQ.eyJpc3MiOiJDTj1QcmVsdWRlIElkZW50aXR5IFNlcnZpY2UsT1U9Q01CVSxPPVZNd2FyZSxMPVNvZmlhLFNUPVNvZmlhLEM9QkciLCJpYXQiOjE2NTQwMjQw[...]HBOQQwEepXTNAaTv9gWMKwvPzktmKWyJFmC64FGomRyRyWiJMkLy3xmvYQERwxaDj_15-ErjC6F3c2mV1qIqES2oZbEpjxar16ZVSPshIaOoWRXe5uZB21tkuwVMgZuuwgmpliG_JBa1Y6Oh0FZBbI7o0ERro9qOW-s2npz4Csv5FwcXt0fa4esbXXIKINjqZMh9NDDb23bUabSag"
|
"cspAuthToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjI4NDY0MjAzMzA2NDQwMTQ2NDQifQ.eyJpc3MiOiJDTj1QcmVsdWRlIElkZW50aXR5IFNlcnZpY2UsT1U9Q01CVSxPPVZNd2FyZSxMPVNvZmlh[...]HBOQQwEepXTNAaTv9gWMKwvPzktmKWyJFmC64FGomRyRyWiJMkLy3xmvYQERwxaDj_15-npz4Csv5FwcXt0fa"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
There's the auth token[^token] that I'll need for subsequent requests. I'll store that in a variable so that it's easier to wield:
|
There's the auth token[^token] that I'll need for subsequent requests. I'll store that in a variable so that it's easier to wield:
|
||||||
```shell
|
```shell
|
||||||
token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjI4NDY0MjAzMzA2NDQwMTQ2NDQifQ.eyJpc3MiOiJDTj1QcmVsdWRlIElkZW50aXR5IFNlcnZpY2UsT1U9Q01CVSxPPVZNd2FyZSxMPVNvZmlhLFNUPVNvZmlhLEM9QkciLCJpYXQiOjE2NTQwMjQw[...]HBOQQwEepXTNAaTv9gWMKwvPzktmKWyJFmC64FGomRyRyWiJMkLy3xmvYQERwxaDj_15-ErjC6F3c2mV1qIqES2oZbEpjxar16ZVSPshIaOoWRXe5uZB21tkuwVMgZuuwgmpliG_JBa1Y6Oh0FZBbI7o0ERro9qOW-s2npz4Csv5FwcXt0fa4esbXXIKINjqZMh9NDDb23bUabSag
|
token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjI4NDY0MjAzMzA2NDQwMTQ2NDQifQ.eyJpc3MiOiJDTj1QcmVsdWRlIElkZW50aXR5IFNlcnZpY2UsT1U9Q01CVSxPPVZNd2FyZSxMPVNvZmlh[...]HBOQQwEepXTNAaTv9gWMKwvPzktmKWyJFmC64FGomRyRyWiJMkLy3xmvYQERwxaDj_15-npz4Csv5FwcXt0fa # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
So now if I want to find out which images have been configured in vRA, I can ask:
|
So now if I want to find out which images have been configured in vRA, I can ask:
|
||||||
```shell
|
```shell
|
||||||
https GET vra.lab.bowdre.net/iaas/api/images "Authorization: Bearer $token"
|
https GET vra.lab.bowdre.net/iaas/api/images "Authorization: Bearer $token" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
{{% notice note "Request Items" %}}
|
{{% notice note "Request Items" %}}
|
||||||
Remember from above that HTTPie will automatically insert key/value pairs separated by a colon into the request header.
|
Remember from above that HTTPie will automatically insert key/value pairs separated by a colon into the request header.
|
||||||
{{% /notice %}}
|
{{% /notice %}}
|
||||||
|
|
||||||
And I'll get back some headers followed by an JSON object detailing the defined image mappings broken up by region:
|
And I'll get back some headers followed by an JSON object detailing the defined image mappings broken up by region:
|
||||||
```json {hl_lines=[11,14,37,40,53,56]}
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"content": [
|
"content": [
|
||||||
{
|
{
|
||||||
"_links": {
|
"_links": {
|
||||||
"region": {
|
"region": {
|
||||||
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f"
|
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f"
|
||||||
}
|
|
||||||
},
|
|
||||||
"externalRegionId": "Datacenter:datacenter-39056",
|
|
||||||
"mapping": {
|
|
||||||
"Photon 4": {
|
|
||||||
"_links": {
|
|
||||||
"region": {
|
|
||||||
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cloudConfig": "",
|
|
||||||
"constraints": [],
|
|
||||||
"description": "photon-arm",
|
|
||||||
"externalId": "50023810-ae56-3c58-f374-adf6e0645886",
|
|
||||||
"externalRegionId": "Datacenter:datacenter-39056",
|
|
||||||
"id": "8885e87d8a5898cf12b5abc3e5c715e5a65f7179",
|
|
||||||
"isPrivate": false,
|
|
||||||
"name": "photon-arm",
|
|
||||||
"osFamily": "LINUX"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"_links": {
|
|
||||||
"region": {
|
|
||||||
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"externalRegionId": "Datacenter:datacenter-1001",
|
|
||||||
"mapping": {
|
|
||||||
"Photon 4": {
|
|
||||||
"_links": {
|
|
||||||
"region": {
|
|
||||||
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cloudConfig": "",
|
|
||||||
"constraints": [],
|
|
||||||
"description": "photon",
|
|
||||||
"externalId": "50028cf1-88b8-52e8-58a1-b8354d4207b0",
|
|
||||||
"externalRegionId": "Datacenter:datacenter-1001",
|
|
||||||
"id": "d417648249e9740d7561188fa2a3a3ab4e8ccf85",
|
|
||||||
"isPrivate": false,
|
|
||||||
"name": "photon",
|
|
||||||
"osFamily": "LINUX"
|
|
||||||
},
|
|
||||||
"Windows Server 2019": {
|
|
||||||
"_links": {
|
|
||||||
"region": {
|
|
||||||
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cloudConfig": "",
|
|
||||||
"constraints": [],
|
|
||||||
"description": "ws2019",
|
|
||||||
"externalId": "500235ad-1022-fec3-8ad1-00433beee103",
|
|
||||||
"externalRegionId": "Datacenter:datacenter-1001",
|
|
||||||
"id": "7e05f4e57ac55135cf7a7f8b951aa8ccfcc335d8",
|
|
||||||
"isPrivate": false,
|
|
||||||
"name": "ws2019",
|
|
||||||
"osFamily": "WINDOWS"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
],
|
},
|
||||||
"numberOfElements": 2,
|
"externalRegionId": "Datacenter:datacenter-39056",
|
||||||
"totalElements": 2
|
"mapping": {
|
||||||
|
"Photon 4": { // [tl! focus]
|
||||||
|
"_links": {
|
||||||
|
"region": {
|
||||||
|
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f" // [tl! focus]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cloudConfig": "",
|
||||||
|
"constraints": [],
|
||||||
|
"description": "photon-arm",
|
||||||
|
"externalId": "50023810-ae56-3c58-f374-adf6e0645886",
|
||||||
|
"externalRegionId": "Datacenter:datacenter-39056",
|
||||||
|
"id": "8885e87d8a5898cf12b5abc3e5c715e5a65f7179",
|
||||||
|
"isPrivate": false,
|
||||||
|
"name": "photon-arm",
|
||||||
|
"osFamily": "LINUX"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"_links": {
|
||||||
|
"region": {
|
||||||
|
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"externalRegionId": "Datacenter:datacenter-1001",
|
||||||
|
"mapping": {
|
||||||
|
"Photon 4": { // [tl! focus]
|
||||||
|
"_links": {
|
||||||
|
"region": {
|
||||||
|
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" // [tl! focus]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cloudConfig": "",
|
||||||
|
"constraints": [],
|
||||||
|
"description": "photon",
|
||||||
|
"externalId": "50028cf1-88b8-52e8-58a1-b8354d4207b0",
|
||||||
|
"externalRegionId": "Datacenter:datacenter-1001",
|
||||||
|
"id": "d417648249e9740d7561188fa2a3a3ab4e8ccf85",
|
||||||
|
"isPrivate": false,
|
||||||
|
"name": "photon",
|
||||||
|
"osFamily": "LINUX"
|
||||||
|
},
|
||||||
|
"Windows Server 2019": { // [tl! focus]
|
||||||
|
"_links": {
|
||||||
|
"region": {
|
||||||
|
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" // [tl! focus]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cloudConfig": "",
|
||||||
|
"constraints": [],
|
||||||
|
"description": "ws2019",
|
||||||
|
"externalId": "500235ad-1022-fec3-8ad1-00433beee103",
|
||||||
|
"externalRegionId": "Datacenter:datacenter-1001",
|
||||||
|
"id": "7e05f4e57ac55135cf7a7f8b951aa8ccfcc335d8",
|
||||||
|
"isPrivate": false,
|
||||||
|
"name": "ws2019",
|
||||||
|
"osFamily": "WINDOWS"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"numberOfElements": 2,
|
||||||
|
"totalElements": 2
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
This doesn't give me the *name* of the regions, but I could use the `_links.region.href` data to quickly match up images which exist in a given region.[^foreshadowing]
|
This doesn't give me the *name* of the regions, but I could use the `_links.region.href` data to quickly match up images which exist in a given region.[^foreshadowing]
|
||||||
|
@ -377,6 +379,7 @@ I'll head into **Library > Actions** to create a new action inside my `com.virtu
|
||||||
| `variableName` | `string` | Name of desired variable inside Configuration |
|
| `variableName` | `string` | Name of desired variable inside Configuration |
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/*
|
/*
|
||||||
JavaScript: getConfigValue action
|
JavaScript: getConfigValue action
|
||||||
Inputs: path (string), configurationName (string), variableName (string)
|
Inputs: path (string), configurationName (string), variableName (string)
|
||||||
|
@ -397,6 +400,7 @@ Next, I'll create another action in my `com.virtuallypotato.utility` module whic
|
||||||
![vraLogin action](vraLogin_action.png)
|
![vraLogin action](vraLogin_action.png)
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/*
|
/*
|
||||||
JavaScript: vraLogin action
|
JavaScript: vraLogin action
|
||||||
Inputs: none
|
Inputs: none
|
||||||
|
@ -429,6 +433,7 @@ I like to clean up after myself so I'm also going to create a `vraLogout` action
|
||||||
| `token` | `string` | Auth token of the session to destroy |
|
| `token` | `string` | Auth token of the session to destroy |
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/*
|
/*
|
||||||
JavaScript: vraLogout action
|
JavaScript: vraLogout action
|
||||||
Inputs: token (string)
|
Inputs: token (string)
|
||||||
|
@ -459,6 +464,7 @@ My final "utility" action for this effort will run in between `vraLogin` and `vr
|
||||||
|`content`|`string`|Any additional data to pass with the request|
|
|`content`|`string`|Any additional data to pass with the request|
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/*
|
/*
|
||||||
JavaScript: vraExecute action
|
JavaScript: vraExecute action
|
||||||
Inputs: token (string), method (string), uri (string), content (string)
|
Inputs: token (string), method (string), uri (string), content (string)
|
||||||
|
@ -496,7 +502,8 @@ This action will:
|
||||||
Other actions wanting to interact with the vRA REST API will follow the same basic formula, though with some more logic and capability baked in.
|
Other actions wanting to interact with the vRA REST API will follow the same basic formula, though with some more logic and capability baked in.
|
||||||
|
|
||||||
Anyway, here's my first swing:
|
Anyway, here's my first swing:
|
||||||
```JavaScript
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/*
|
/*
|
||||||
JavaScript: vraTester action
|
JavaScript: vraTester action
|
||||||
Inputs: none
|
Inputs: none
|
||||||
|
@ -513,7 +520,8 @@ Pretty simple, right? Let's see if it works:
|
||||||
![vraTester action](vraTester_action.png)
|
![vraTester action](vraTester_action.png)
|
||||||
|
|
||||||
It did! Though that result is a bit hard to parse visually, so I'm going to prettify it a bit:
|
It did! Though that result is a bit hard to parse visually, so I'm going to prettify it a bit:
|
||||||
```json {hl_lines=[17,35,56,74]}
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"tags": [],
|
"tags": [],
|
||||||
|
@ -530,7 +538,7 @@ It did! Though that result is a bit hard to parse visually, so I'm going to pret
|
||||||
"folder": "vRA_Deploy",
|
"folder": "vRA_Deploy",
|
||||||
"externalRegionId": "Datacenter:datacenter-1001",
|
"externalRegionId": "Datacenter:datacenter-1001",
|
||||||
"cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68",
|
"cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68",
|
||||||
"name": "NUC",
|
"name": "NUC", // [tl! focus]
|
||||||
"id": "3d4f048a-385d-4759-8c04-117a170d060c",
|
"id": "3d4f048a-385d-4759-8c04-117a170d060c",
|
||||||
"updatedAt": "2022-06-02",
|
"updatedAt": "2022-06-02",
|
||||||
"organizationId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9",
|
"organizationId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9",
|
||||||
|
@ -548,7 +556,7 @@ It did! Though that result is a bit hard to parse visually, so I'm going to pret
|
||||||
"href": "/iaas/api/zones/3d4f048a-385d-4759-8c04-117a170d060c"
|
"href": "/iaas/api/zones/3d4f048a-385d-4759-8c04-117a170d060c"
|
||||||
},
|
},
|
||||||
"region": {
|
"region": {
|
||||||
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136"
|
"href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" // [tl! focus]
|
||||||
},
|
},
|
||||||
"cloud-account": {
|
"cloud-account": {
|
||||||
"href": "/iaas/api/cloud-accounts/75d29635-f128-4b85-8cf9-95a9e5981c68"
|
"href": "/iaas/api/cloud-accounts/75d29635-f128-4b85-8cf9-95a9e5981c68"
|
||||||
|
@ -569,7 +577,7 @@ It did! Though that result is a bit hard to parse visually, so I'm going to pret
|
||||||
},
|
},
|
||||||
"externalRegionId": "Datacenter:datacenter-39056",
|
"externalRegionId": "Datacenter:datacenter-39056",
|
||||||
"cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68",
|
"cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68",
|
||||||
"name": "QTZ",
|
"name": "QTZ", // [tl! focus]
|
||||||
"id": "84470591-74a2-4659-87fd-e5d174a679a2",
|
"id": "84470591-74a2-4659-87fd-e5d174a679a2",
|
||||||
"updatedAt": "2022-06-02",
|
"updatedAt": "2022-06-02",
|
||||||
"organizationId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9",
|
"organizationId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9",
|
||||||
|
@ -587,7 +595,7 @@ It did! Though that result is a bit hard to parse visually, so I'm going to pret
|
||||||
"href": "/iaas/api/zones/84470591-74a2-4659-87fd-e5d174a679a2"
|
"href": "/iaas/api/zones/84470591-74a2-4659-87fd-e5d174a679a2"
|
||||||
},
|
},
|
||||||
"region": {
|
"region": {
|
||||||
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f"
|
"href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f" // [tl! focus]
|
||||||
},
|
},
|
||||||
"cloud-account": {
|
"cloud-account": {
|
||||||
"href": "/iaas/api/cloud-accounts/75d29635-f128-4b85-8cf9-95a9e5981c68"
|
"href": "/iaas/api/cloud-accounts/75d29635-f128-4b85-8cf9-95a9e5981c68"
|
||||||
|
@ -610,6 +618,7 @@ This action will basically just repeat the call that I tested above in `vraTeste
|
||||||
![vraGetZones action](vraGetZones_action.png)
|
![vraGetZones action](vraGetZones_action.png)
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/*
|
/*
|
||||||
JavaScript: vraGetZones action
|
JavaScript: vraGetZones action
|
||||||
Inputs: none
|
Inputs: none
|
||||||
|
@ -640,6 +649,7 @@ Oh, and the whole thing is wrapped in a conditional so that the code only execut
|
||||||
| `zoneName` | `string` | The name of the Zone selected in the request form |
|
| `zoneName` | `string` | The name of the Zone selected in the request form |
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/* JavaScript: vraGetImages action
|
/* JavaScript: vraGetImages action
|
||||||
Inputs: zoneName (string)
|
Inputs: zoneName (string)
|
||||||
Return type: array/string
|
Return type: array/string
|
||||||
|
@ -709,6 +719,7 @@ Next I'll repeat the same steps to create a new `image` input. This time, though
|
||||||
|
|
||||||
The full code for my template now looks like this:
|
The full code for my template now looks like this:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
formatVersion: 1
|
formatVersion: 1
|
||||||
inputs:
|
inputs:
|
||||||
zoneName:
|
zoneName:
|
||||||
|
|
|
@ -50,21 +50,21 @@ I've described the [process of creating a new instance on OCI in a past post](/f
|
||||||
|
|
||||||
### Prepare the server
|
### Prepare the server
|
||||||
Once the server's up and running, I go through the usual steps of applying any available updates:
|
Once the server's up and running, I go through the usual steps of applying any available updates:
|
||||||
```bash
|
```shell
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
sudo apt upgrade
|
sudo apt upgrade
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Install Tailscale
|
#### Install Tailscale
|
||||||
And then I'll install Tailscale using their handy-dandy bootstrap script:
|
And then I'll install Tailscale using their handy-dandy bootstrap script:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl -fsSL https://tailscale.com/install.sh | sh
|
curl -fsSL https://tailscale.com/install.sh | sh # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
When I bring up the Tailscale interface, I'll use the `--advertise-tags` flag to identify the server with an [ACL tag](https://tailscale.com/kb/1068/acl-tags/). ([Within my tailnet](/secure-networking-made-simple-with-tailscale/#acls)[^tailnet], all of my other clients are able to connect to devices bearing the `cloud` tag but `cloud` servers can only reach back to other devices for performing DNS lookups.)
|
When I bring up the Tailscale interface, I'll use the `--advertise-tags` flag to identify the server with an [ACL tag](https://tailscale.com/kb/1068/acl-tags/). ([Within my tailnet](/secure-networking-made-simple-with-tailscale/#acls)[^tailnet], all of my other clients are able to connect to devices bearing the `cloud` tag but `cloud` servers can only reach back to other devices for performing DNS lookups.)
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --advertise-tags "tag:cloud"
|
sudo tailscale up --advertise-tags "tag:cloud" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
[^tailnet]: [Tailscale's term](https://tailscale.com/kb/1136/tailnet/) for the private network which securely links Tailscale-connected devices.
|
[^tailnet]: [Tailscale's term](https://tailscale.com/kb/1136/tailnet/) for the private network which securely links Tailscale-connected devices.
|
||||||
|
@ -72,22 +72,22 @@ sudo tailscale up --advertise-tags "tag:cloud"
|
||||||
#### Install Docker
|
#### Install Docker
|
||||||
Next I install Docker and `docker-compose`:
|
Next I install Docker and `docker-compose`:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo apt install ca-certificates curl gnupg lsb-release
|
sudo apt install ca-certificates curl gnupg lsb-release # [tl! .cmd:2]
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||||
echo \
|
echo \
|
||||||
"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
|
"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
|
||||||
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
sudo apt install docker-ce docker-ce-cli containerd.io docker-compose docker-compose-plugin
|
sudo apt install docker-ce docker-ce-cli containerd.io docker-compose docker-compose-plugin
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Configure firewall
|
#### Configure firewall
|
||||||
This server automatically had an iptables firewall rule configured to permit SSH access. For Gitea, I'll also need to configure HTTP/HTTPS access. [As before](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#firewall-configuration), I need to be mindful of the explicit `REJECT all` rule at the bottom of the `INPUT` chain:
|
This server automatically had an iptables firewall rule configured to permit SSH access. For Gitea, I'll also need to configure HTTP/HTTPS access. [As before](/federated-matrix-server-synapse-on-oracle-clouds-free-tier/#firewall-configuration), I need to be mindful of the explicit `REJECT all` rule at the bottom of the `INPUT` chain:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
$ sudo iptables -L INPUT --line-numbers
|
sudo iptables -L INPUT --line-numbers # [tl! .cmd]
|
||||||
Chain INPUT (policy ACCEPT)
|
Chain INPUT (policy ACCEPT) # [tl! .nocopy:8]
|
||||||
num target prot opt source destination
|
num target prot opt source destination
|
||||||
1 ts-input all -- anywhere anywhere
|
1 ts-input all -- anywhere anywhere
|
||||||
2 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
|
2 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
|
||||||
|
@ -99,32 +99,31 @@ num target prot opt source destination
|
||||||
```
|
```
|
||||||
|
|
||||||
So I'll insert the new rules at line 6:
|
So I'll insert the new rules at line 6:
|
||||||
```bash
|
```shell
|
||||||
sudo iptables -L INPUT --line-numbers
|
sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 80 -j ACCEPT # [tl! .cmd:1]
|
||||||
sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 80 -j ACCEPT
|
|
||||||
sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 443 -j ACCEPT
|
sudo iptables -I INPUT 6 -m state --state NEW -p tcp --dport 443 -j ACCEPT
|
||||||
```
|
```
|
||||||
|
|
||||||
And confirm that it did what I wanted it to:
|
And confirm that it did what I wanted it to:
|
||||||
```bash
|
```shell
|
||||||
$ sudo iptables -L INPUT --line-numbers
|
sudo iptables -L INPUT --line-numbers # [tl! focus .cmd]
|
||||||
Chain INPUT (policy ACCEPT)
|
Chain INPUT (policy ACCEPT) # [tl! .nocopy:10]
|
||||||
num target prot opt source destination
|
num target prot opt source destination
|
||||||
1 ts-input all -- anywhere anywhere
|
1 ts-input all -- anywhere anywhere
|
||||||
2 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
|
2 ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
|
||||||
3 ACCEPT icmp -- anywhere anywhere
|
3 ACCEPT icmp -- anywhere anywhere
|
||||||
4 ACCEPT all -- anywhere anywhere
|
4 ACCEPT all -- anywhere anywhere
|
||||||
5 ACCEPT udp -- anywhere anywhere udp spt:ntp
|
5 ACCEPT udp -- anywhere anywhere udp spt:ntp
|
||||||
6 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:https
|
6 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:https # [tl! focus:1]
|
||||||
7 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:http
|
7 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:http
|
||||||
8 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh
|
8 ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh
|
||||||
9 REJECT all -- anywhere anywhere reject-with icmp-host-prohibited
|
9 REJECT all -- anywhere anywhere reject-with icmp-host-prohibited
|
||||||
```
|
```
|
||||||
|
|
||||||
That looks good, so let's save the new rules:
|
That looks good, so let's save the new rules:
|
||||||
```bash
|
```shell
|
||||||
$ sudo netfilter-persistent save
|
sudo netfilter-persistent save # [tl! .cmd]
|
||||||
run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save
|
run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save # [tl! .nocopy:1]
|
||||||
run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save
|
run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -139,19 +138,19 @@ I'm now ready to move on with installing Gitea itself.
|
||||||
I'll start with creating a `git` user. This account will be set as the owner of the data volume used by the Gitea container, but will also (perhaps more importantly) facilitate [SSH passthrough](https://docs.gitea.io/en-us/install-with-docker/#ssh-container-passthrough) into the container for secure git operations.
|
I'll start with creating a `git` user. This account will be set as the owner of the data volume used by the Gitea container, but will also (perhaps more importantly) facilitate [SSH passthrough](https://docs.gitea.io/en-us/install-with-docker/#ssh-container-passthrough) into the container for secure git operations.
|
||||||
|
|
||||||
Here's where I create the account and also generate what will become the SSH key used by the git server:
|
Here's where I create the account and also generate what will become the SSH key used by the git server:
|
||||||
```bash
|
```shell
|
||||||
sudo useradd -s /bin/bash -m git
|
sudo useradd -s /bin/bash -m git # [tl! .cmd:1]
|
||||||
sudo -u git ssh-keygen -t ecdsa -C "Gitea Host Key"
|
sudo -u git ssh-keygen -t ecdsa -C "Gitea Host Key"
|
||||||
```
|
```
|
||||||
|
|
||||||
The `git` user's SSH public key gets added as-is directly to that user's `authorized_keys` file:
|
The `git` user's SSH public key gets added as-is directly to that user's `authorized_keys` file:
|
||||||
```bash
|
```shell
|
||||||
sudo -u git cat /home/git/.ssh/id_ecdsa.pub | sudo -u git tee -a /home/git/.ssh/authorized_keys
|
sudo -u git cat /home/git/.ssh/id_ecdsa.pub | sudo -u git tee -a /home/git/.ssh/authorized_keys # [tl! .cmd:1]
|
||||||
sudo -u git chmod 600 /home/git/.ssh/authorized_keys
|
sudo -u git chmod 600 /home/git/.ssh/authorized_keys
|
||||||
```
|
```
|
||||||
|
|
||||||
When other users add their SSH public keys into Gitea's web UI, those will get added to `authorized_keys` with a little something extra: an alternate command to perform git actions instead of just SSH ones:
|
When other users add their SSH public keys into Gitea's web UI, those will get added to `authorized_keys` with a little something extra: an alternate command to perform git actions instead of just SSH ones:
|
||||||
```
|
```text
|
||||||
command="/usr/local/bin/gitea --config=/data/gitea/conf/app.ini serv key-1",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty <user pubkey>
|
command="/usr/local/bin/gitea --config=/data/gitea/conf/app.ini serv key-1",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty <user pubkey>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -160,12 +159,13 @@ No users have added their keys to Gitea just yet so if you look at `/home/git/.s
|
||||||
{{% /notice %}}
|
{{% /notice %}}
|
||||||
|
|
||||||
So I'll go ahead and create that extra command:
|
So I'll go ahead and create that extra command:
|
||||||
```bash
|
```shell
|
||||||
cat <<"EOF" | sudo tee /usr/local/bin/gitea
|
cat <<"EOF" | sudo tee /usr/local/bin/gitea # [tl! .cmd]
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
ssh -p 2222 -o StrictHostKeyChecking=no git@127.0.0.1 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@"
|
ssh -p 2222 -o StrictHostKeyChecking=no git@127.0.0.1 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@"
|
||||||
EOF
|
EOF
|
||||||
sudo chmod +x /usr/local/bin/gitea
|
|
||||||
|
sudo chmod +x /usr/local/bin/gitea # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
So when I use a `git` command to interact with the server via SSH, the commands will get relayed into the Docker container on port 2222.
|
So when I use a `git` command to interact with the server via SSH, the commands will get relayed into the Docker container on port 2222.
|
||||||
|
@ -174,26 +174,27 @@ So when I use a `git` command to interact with the server via SSH, the commands
|
||||||
That takes care of most of the prep work, so now I'm ready to create the `docker-compose.yaml` file which will tell Docker how to host Gitea.
|
That takes care of most of the prep work, so now I'm ready to create the `docker-compose.yaml` file which will tell Docker how to host Gitea.
|
||||||
|
|
||||||
I'm going to place this in `/opt/gitea`:
|
I'm going to place this in `/opt/gitea`:
|
||||||
```bash
|
```shell
|
||||||
sudo mkdir -p /opt/gitea
|
sudo mkdir -p /opt/gitea # [tl! .cmd:1]
|
||||||
cd /opt/gitea
|
cd /opt/gitea
|
||||||
```
|
```
|
||||||
|
|
||||||
And I want to be sure that my new `git` user owns the `./data` directory which will be where the git contents get stored:
|
And I want to be sure that my new `git` user owns the `./data` directory which will be where the git contents get stored:
|
||||||
```bash
|
```shell
|
||||||
sudo mkdir data
|
sudo mkdir data # [tl! .cmd:1]
|
||||||
sudo chown git:git -R data
|
sudo chown git:git -R data
|
||||||
```
|
```
|
||||||
|
|
||||||
Now to create the file:
|
Now to create the file:
|
||||||
```bash
|
```shell
|
||||||
sudo vi docker-compose.yaml
|
sudo vi docker-compose.yaml # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
The basic contents of the file came from the [Gitea documentation for Installation with Docker](https://docs.gitea.io/en-us/install-with-docker/), but I also included some (highlighted) additional environment variables based on the [Configuration Cheat Sheet](https://docs.gitea.io/en-us/config-cheat-sheet/):
|
The basic contents of the file came from the [Gitea documentation for Installation with Docker](https://docs.gitea.io/en-us/install-with-docker/), but I also included some (highlighted) additional environment variables based on the [Configuration Cheat Sheet](https://docs.gitea.io/en-us/config-cheat-sheet/):
|
||||||
|
|
||||||
`docker-compose.yaml`:
|
`docker-compose.yaml`:
|
||||||
```yaml {hl_lines=["12-13","19-31",38,43]}
|
```yaml {linenos=true,hl_lines=["12-13","19-31",38,43]}
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
version: "3"
|
version: "3"
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
|
@ -205,14 +206,14 @@ services:
|
||||||
image: gitea/gitea:latest
|
image: gitea/gitea:latest
|
||||||
container_name: gitea
|
container_name: gitea
|
||||||
environment:
|
environment:
|
||||||
- USER_UID=1003
|
- USER_UID=1003 # [tl! highlight:1]
|
||||||
- USER_GID=1003
|
- USER_GID=1003
|
||||||
- GITEA__database__DB_TYPE=postgres
|
- GITEA__database__DB_TYPE=postgres
|
||||||
- GITEA__database__HOST=db:5432
|
- GITEA__database__HOST=db:5432
|
||||||
- GITEA__database__NAME=gitea
|
- GITEA__database__NAME=gitea
|
||||||
- GITEA__database__USER=gitea
|
- GITEA__database__USER=gitea
|
||||||
- GITEA__database__PASSWD=gitea
|
- GITEA__database__PASSWD=gitea
|
||||||
- GITEA____APP_NAME=Gitea
|
- GITEA____APP_NAME=Gitea # [tl! highlight:start]
|
||||||
- GITEA__log__MODE=file
|
- GITEA__log__MODE=file
|
||||||
- GITEA__openid__ENABLE_OPENID_SIGNIN=false
|
- GITEA__openid__ENABLE_OPENID_SIGNIN=false
|
||||||
- GITEA__other__SHOW_FOOTER_VERSION=false
|
- GITEA__other__SHOW_FOOTER_VERSION=false
|
||||||
|
@ -224,19 +225,19 @@ services:
|
||||||
- GITEA__server__LANDING_PAGE=explore
|
- GITEA__server__LANDING_PAGE=explore
|
||||||
- GITEA__service__DISABLE_REGISTRATION=true
|
- GITEA__service__DISABLE_REGISTRATION=true
|
||||||
- GITEA__service_0X2E_explore__DISABLE_USERS_PAGE=true
|
- GITEA__service_0X2E_explore__DISABLE_USERS_PAGE=true
|
||||||
- GITEA__ui__DEFAULT_THEME=arc-green
|
- GITEA__ui__DEFAULT_THEME=arc-green # [tl! highlight:end]
|
||||||
|
|
||||||
restart: always
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
- gitea
|
- gitea
|
||||||
volumes:
|
volumes:
|
||||||
- ./data:/data
|
- ./data:/data
|
||||||
- /home/git/.ssh/:/data/git/.ssh
|
- /home/git/.ssh/:/data/git/.ssh # [tl! highlight]
|
||||||
- /etc/timezone:/etc/timezone:ro
|
- /etc/timezone:/etc/timezone:ro
|
||||||
- /etc/localtime:/etc/localtime:ro
|
- /etc/localtime:/etc/localtime:ro
|
||||||
ports:
|
ports:
|
||||||
- "3000:3000"
|
- "3000:3000"
|
||||||
- "127.0.0.1:2222:22"
|
- "127.0.0.1:2222:22" # [tl! highlight]
|
||||||
depends_on:
|
depends_on:
|
||||||
- db
|
- db
|
||||||
|
|
||||||
|
@ -279,21 +280,22 @@ Let's go through the extra configs in a bit more detail:
|
||||||
Beyond the environment variables, I also defined a few additional options to allow the SSH passthrough to function. Mounting the `git` user's SSH config directory into the container will ensure that user keys defined in Gitea will also be reflected outside of the container, and setting the container to listen on local port `2222` will allow it to receive the forwarded SSH connections:
|
Beyond the environment variables, I also defined a few additional options to allow the SSH passthrough to function. Mounting the `git` user's SSH config directory into the container will ensure that user keys defined in Gitea will also be reflected outside of the container, and setting the container to listen on local port `2222` will allow it to receive the forwarded SSH connections:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
volumes:
|
volumes: # [tl! focus]
|
||||||
[...]
|
- ./data:/data
|
||||||
- /home/git/.ssh/:/data/git/.ssh
|
- /home/git/.ssh/:/data/git/.ssh # [tl! focus]
|
||||||
[...]
|
- /etc/timezone:/etc/timezone:ro
|
||||||
ports:
|
- /etc/localtime:/etc/localtime:ro
|
||||||
[...]
|
ports: # [tl! focus]
|
||||||
- "127.0.0.1:2222:22"
|
- "3000:3000"
|
||||||
|
- "127.0.0.1:2222:22" # [tl! focus]
|
||||||
```
|
```
|
||||||
|
|
||||||
With the config in place, I'm ready to fire it up:
|
With the config in place, I'm ready to fire it up:
|
||||||
|
|
||||||
#### Start containers
|
#### Start containers
|
||||||
Starting Gitea is as simple as
|
Starting Gitea is as simple as
|
||||||
```bash
|
```shell
|
||||||
sudo docker-compose up -d
|
sudo docker-compose up -d # [tl! .cmd]
|
||||||
```
|
```
|
||||||
which will spawn both the Gitea server as well as a `postgres` database to back it.
|
which will spawn both the Gitea server as well as a `postgres` database to back it.
|
||||||
|
|
||||||
|
@ -305,8 +307,8 @@ I've [written before](/federated-matrix-server-synapse-on-oracle-clouds-free-tie
|
||||||
#### Install Caddy
|
#### Install Caddy
|
||||||
So exactly how simple does Caddy make this? Well let's start with installing Caddy on the system:
|
So exactly how simple does Caddy make this? Well let's start with installing Caddy on the system:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https
|
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https # [tl! .cmd:4]
|
||||||
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
|
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
|
||||||
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list
|
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list
|
||||||
sudo apt update
|
sudo apt update
|
||||||
|
@ -315,14 +317,14 @@ sudo apt install caddy
|
||||||
|
|
||||||
#### Configure Caddy
|
#### Configure Caddy
|
||||||
Configuring Caddy is as simple as creating a Caddyfile:
|
Configuring Caddy is as simple as creating a Caddyfile:
|
||||||
```bash
|
```shell
|
||||||
sudo vi /etc/caddy/Caddyfile
|
sudo vi /etc/caddy/Caddyfile # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Within that file, I tell it which fully-qualified domain name(s) I'd like it to respond to (and manage SSL certificates for), as well as that I'd like it to function as a reverse proxy and send the incoming traffic to the same port `3000` that used by the Docker container:
|
Within that file, I tell it which fully-qualified domain name(s) I'd like it to respond to (and manage SSL certificates for), as well as that I'd like it to function as a reverse proxy and send the incoming traffic to the same port `3000` that used by the Docker container:
|
||||||
```
|
```text
|
||||||
git.bowdre.net {
|
git.bowdre.net {
|
||||||
reverse_proxy localhost:3000
|
reverse_proxy localhost:3000
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -330,8 +332,8 @@ That's it. I don't need to worry about headers or ACME configurations or anythin
|
||||||
|
|
||||||
#### Start Caddy
|
#### Start Caddy
|
||||||
All that's left at this point is to start up Caddy:
|
All that's left at this point is to start up Caddy:
|
||||||
```bash
|
```shell
|
||||||
sudo systemctl enable caddy
|
sudo systemctl enable caddy # [tl! .cmd:2]
|
||||||
sudo systemctl start caddy
|
sudo systemctl start caddy
|
||||||
sudo systemctl restart caddy
|
sudo systemctl restart caddy
|
||||||
```
|
```
|
||||||
|
@ -358,25 +360,26 @@ And then I can log out and log back in with my new non-admin identity!
|
||||||
#### Add SSH public key
|
#### Add SSH public key
|
||||||
Associating a public key with my new Gitea account will allow me to easily authenticate my pushes from the command line. I can create a new SSH public/private keypair by following [GitHub's instructions](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent):
|
Associating a public key with my new Gitea account will allow me to easily authenticate my pushes from the command line. I can create a new SSH public/private keypair by following [GitHub's instructions](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent):
|
||||||
```shell
|
```shell
|
||||||
ssh-keygen -t ed25519 -C "user@example.com"
|
ssh-keygen -t ed25519 -C "user@example.com" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll view the contents of the public key - and go ahead and copy the output for future use:
|
I'll view the contents of the public key - and go ahead and copy the output for future use:
|
||||||
```
|
```shell
|
||||||
; cat ~/.ssh/id_ed25519.pub
|
cat ~/.ssh/id_ed25519.pub # [tl! .cmd]
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF5ExSsQfr6pAFBEZ7yx0oljSnpnOixvp8DS26STcx2J user@example.com
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF5ExSsQfr6pAFBEZ7yx0oljSnpnOixvp8DS26STcx2J user@example.com # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
Back in the Gitea UI, I'll click the user menu up top and select **Settings**, then the *SSH / GPG Keys* tab, and click the **Add Key** button:
|
Back in the Gitea UI, I'll click the user menu up top and select **Settings**, then the *SSH / GPG Keys* tab, and click the **Add Key** button:
|
||||||
|
|
||||||
![User menu](user_menu.png)
|
![User menu](user_menu.png)
|
||||||
![Adding a public key](add_key.png)
|
![Adding a public key](add_key.png)
|
||||||
|
|
||||||
I can give the key a name and then paste in that public key, and then click the lower **Add Key** button to insert the new key.
|
I can give the key a name and then paste in that public key, and then click the lower **Add Key** button to insert the new key.
|
||||||
|
|
||||||
To verify that the SSH passthrough magic I [configured earlier](#prepare-git-user) is working, I can take a look at `git`'s `authorized_keys` file:
|
To verify that the SSH passthrough magic I [configured earlier](#prepare-git-user) is working, I can take a look at `git`'s `authorized_keys` file:
|
||||||
```shell{hl_lines=3}
|
```shell
|
||||||
; sudo tail -2 /home/git/.ssh/authorized_keys
|
sudo tail -2 /home/git/.ssh/authorized_keys # [tl! .cmd]
|
||||||
# gitea public key
|
# gitea public key [tl! .nocopy:1]
|
||||||
command="/usr/local/bin/gitea --config=/data/gitea/conf/app.ini serv key-3",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty,no-user-rc,restrict ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF5ExSsQfr6pAFBEZ7yx0oljSnpnOixvp8DS26STcx2J user@example.com
|
command="/usr/local/bin/gitea --config=/data/gitea/conf/app.ini serv key-3",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty,no-user-rc,restrict ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF5ExSsQfr6pAFBEZ7yx0oljSnpnOixvp8DS26STcx2J user@example.com
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -389,7 +392,7 @@ I'm already limiting this server's exposure by blocking inbound SSH (except for
|
||||||
|
|
||||||
Installing Fail2ban is simple:
|
Installing Fail2ban is simple:
|
||||||
```shell
|
```shell
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
sudo apt install fail2ban
|
sudo apt install fail2ban
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -405,10 +408,11 @@ Specifically, I'll want to watch `log/gitea.log` for messages like the following
|
||||||
|
|
||||||
So let's create that filter:
|
So let's create that filter:
|
||||||
```shell
|
```shell
|
||||||
sudo vi /etc/fail2ban/filter.d/gitea.conf
|
sudo vi /etc/fail2ban/filter.d/gitea.conf # [tl! .cmd]
|
||||||
```
|
|
||||||
`/etc/fail2ban/filter.d/gitea.conf`:
|
|
||||||
```
|
```
|
||||||
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /etc/fail2ban/filter.d/gitea.conf
|
||||||
[Definition]
|
[Definition]
|
||||||
failregex = .*(Failed authentication attempt|invalid credentials).* from <HOST>
|
failregex = .*(Failed authentication attempt|invalid credentials).* from <HOST>
|
||||||
ignoreregex =
|
ignoreregex =
|
||||||
|
@ -416,10 +420,11 @@ ignoreregex =
|
||||||
|
|
||||||
Next I create the jail, which tells Fail2ban what to do:
|
Next I create the jail, which tells Fail2ban what to do:
|
||||||
```shell
|
```shell
|
||||||
sudo vi /etc/fail2ban/jail.d/gitea.conf
|
sudo vi /etc/fail2ban/jail.d/gitea.conf # [tl! .cmd]
|
||||||
```
|
|
||||||
`/etc/fail2ban/jail.d/gitea.conf`:
|
|
||||||
```
|
```
|
||||||
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /etc/fail2ban/jail.d/gitea.conf
|
||||||
[gitea]
|
[gitea]
|
||||||
enabled = true
|
enabled = true
|
||||||
filter = gitea
|
filter = gitea
|
||||||
|
@ -434,14 +439,14 @@ This configures Fail2ban to watch the log file (`logpath`) inside the data volum
|
||||||
|
|
||||||
Then I just need to enable and start Fail2ban:
|
Then I just need to enable and start Fail2ban:
|
||||||
```shell
|
```shell
|
||||||
sudo systemctl enable fail2ban
|
sudo systemctl enable fail2ban # [tl! .cmd:1]
|
||||||
sudo systemctl start fail2ban
|
sudo systemctl start fail2ban
|
||||||
```
|
```
|
||||||
|
|
||||||
To verify that it's working, I can deliberately fail to log in to the web interface and watch `/var/log/fail2ban.log`:
|
To verify that it's working, I can deliberately fail to log in to the web interface and watch `/var/log/fail2ban.log`:
|
||||||
```shell
|
```shell
|
||||||
; sudo tail -f /var/log/fail2ban.log
|
sudo tail -f /var/log/fail2ban.log # [tl! .cmd]
|
||||||
2022-07-17 21:52:26,978 fail2ban.filter [36042]: INFO [gitea] Found ${MY_HOME_IP}| - 2022-07-17 21:52:26
|
2022-07-17 21:52:26,978 fail2ban.filter [36042]: INFO [gitea] Found ${MY_HOME_IP}| - 2022-07-17 21:52:26 # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
Excellent, let's now move on to creating some content.
|
Excellent, let's now move on to creating some content.
|
||||||
|
@ -470,11 +475,11 @@ The real point of this whole exercise was to sync my Obsidian vault to a Git ser
|
||||||
Once it's created, the new-but-empty repository gives me instructions on how I can interact with it. Note that the SSH address uses the special `git.tadpole-jazz.ts.net` Tailscale domain name which is only accessible within my tailnet.
|
Once it's created, the new-but-empty repository gives me instructions on how I can interact with it. Note that the SSH address uses the special `git.tadpole-jazz.ts.net` Tailscale domain name which is only accessible within my tailnet.
|
||||||
|
|
||||||
|
|
||||||
![Emtpy repository](empty_repo.png)
|
![Empty repository](empty_repo.png)
|
||||||
|
|
||||||
Now I can follow the instructions to initialize my local Obsidian vault (stored at `~/obsidian-vault/`) as a git repository and perform my initial push to Gitea:
|
Now I can follow the instructions to initialize my local Obsidian vault (stored at `~/obsidian-vault/`) as a git repository and perform my initial push to Gitea:
|
||||||
```shell
|
```shell
|
||||||
cd ~/obsidian-vault/
|
cd ~/obsidian-vault/ # [tl! .cmd:5]
|
||||||
git init
|
git init
|
||||||
git add .
|
git add .
|
||||||
git commit -m "initial commit"
|
git commit -m "initial commit"
|
||||||
|
|
|
@ -24,12 +24,13 @@ Before even worrying about the SDK, I needed to [get a phpIPAM instance ready](h
|
||||||
|
|
||||||
Once phpIPAM was running and accessible via the web interface, I then used `openssl` to generate a self-signed certificate to be used for the SSL API connection:
|
Once phpIPAM was running and accessible via the web interface, I then used `openssl` to generate a self-signed certificate to be used for the SSL API connection:
|
||||||
```shell
|
```shell
|
||||||
sudo mkdir /etc/apache2/certificate
|
sudo mkdir /etc/apache2/certificate # [tl! .cmd:2]
|
||||||
cd /etc/apache2/certificate/
|
cd /etc/apache2/certificate/
|
||||||
sudo openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out apache-certificate.crt -keyout apache.key
|
sudo openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out apache-certificate.crt -keyout apache.key
|
||||||
```
|
```
|
||||||
I edited the apache config file to bind that new certificate on port 443, and to redirect requests on port 80 to port 443:
|
I edited the apache config file to bind that new certificate on port 443, and to redirect requests on port 80 to port 443:
|
||||||
```xml
|
```text
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
<VirtualHost *:80>
|
<VirtualHost *:80>
|
||||||
ServerName ipam.lab.bowdre.net
|
ServerName ipam.lab.bowdre.net
|
||||||
Redirect permanent / https://ipam.lab.bowdre.net
|
Redirect permanent / https://ipam.lab.bowdre.net
|
||||||
|
@ -55,6 +56,8 @@ Remember how I've got a "Home" network as well as [several internal networks](/v
|
||||||
|
|
||||||
This is Ubuntu, so I edited `/etc/netplan/99-netcfg-vmware.yaml` to add the `routes` section at the bottom:
|
This is Ubuntu, so I edited `/etc/netplan/99-netcfg-vmware.yaml` to add the `routes` section at the bottom:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# /etc/netplan/99-netcfg-vmware.yaml
|
||||||
network:
|
network:
|
||||||
version: 2
|
version: 2
|
||||||
renderer: networkd
|
renderer: networkd
|
||||||
|
@ -70,20 +73,23 @@ network:
|
||||||
- lab.bowdre.net
|
- lab.bowdre.net
|
||||||
addresses:
|
addresses:
|
||||||
- 192.168.1.5
|
- 192.168.1.5
|
||||||
routes:
|
routes: # [tl! focus:3]
|
||||||
- to: 172.16.0.0/16
|
- to: 172.16.0.0/16
|
||||||
via: 192.168.1.100
|
via: 192.168.1.100
|
||||||
metric: 100
|
metric: 100
|
||||||
```
|
```
|
||||||
I then ran `sudo netplan apply` so the change would take immediate effect and confirmed the route was working by pinging the vCenter's interface on the `172.16.10.0/24` network:
|
I then ran `sudo netplan apply` so the change would take immediate effect and confirmed the route was working by pinging the vCenter's interface on the `172.16.10.0/24` network:
|
||||||
|
```shell
|
||||||
|
sudo netplan apply # [tl! .cmd]
|
||||||
```
|
```
|
||||||
john@ipam:~$ sudo netplan apply
|
```shell
|
||||||
john@ipam:~$ ip route
|
ip route # [tl! .cmd]
|
||||||
default via 192.168.1.1 dev ens160 proto static
|
default via 192.168.1.1 dev ens160 proto static # [tl! .nocopy:3]
|
||||||
172.16.0.0/16 via 192.168.1.100 dev ens160 proto static metric 100
|
172.16.0.0/16 via 192.168.1.100 dev ens160 proto static metric 100
|
||||||
192.168.1.0/24 dev ens160 proto kernel scope link src 192.168.1.14
|
192.168.1.0/24 dev ens160 proto kernel scope link src 192.168.1.14
|
||||||
john@ipam:~$ ping 172.16.10.12
|
|
||||||
PING 172.16.10.12 (172.16.10.12) 56(84) bytes of data.
|
ping 172.16.10.12 # [tl! .cmd]
|
||||||
|
PING 172.16.10.12 (172.16.10.12) 56(84) bytes of data. # [tl! .nocopy:7]
|
||||||
64 bytes from 172.16.10.12: icmp_seq=1 ttl=64 time=0.282 ms
|
64 bytes from 172.16.10.12: icmp_seq=1 ttl=64 time=0.282 ms
|
||||||
64 bytes from 172.16.10.12: icmp_seq=2 ttl=64 time=0.256 ms
|
64 bytes from 172.16.10.12: icmp_seq=2 ttl=64 time=0.256 ms
|
||||||
64 bytes from 172.16.10.12: icmp_seq=3 ttl=64 time=0.241 ms
|
64 bytes from 172.16.10.12: icmp_seq=3 ttl=64 time=0.241 ms
|
||||||
|
@ -94,7 +100,7 @@ rtt min/avg/max/mdev = 0.241/0.259/0.282/0.016 ms
|
||||||
```
|
```
|
||||||
|
|
||||||
Now would also be a good time to go ahead and enable cron jobs so that phpIPAM will automatically scan its defined subnets for changes in IP availability and device status. phpIPAM includes a pair of scripts in `INSTALL_DIR/functions/scripts/`: one for discovering new hosts, and the other for checking the status of previously discovered hosts. So I ran `sudo crontab -e` to edit root's crontab and pasted in these two lines to call both scripts every 15 minutes:
|
Now would also be a good time to go ahead and enable cron jobs so that phpIPAM will automatically scan its defined subnets for changes in IP availability and device status. phpIPAM includes a pair of scripts in `INSTALL_DIR/functions/scripts/`: one for discovering new hosts, and the other for checking the status of previously discovered hosts. So I ran `sudo crontab -e` to edit root's crontab and pasted in these two lines to call both scripts every 15 minutes:
|
||||||
```
|
```text
|
||||||
*/15 * * * * /usr/bin/php /var/www/html/phpipam/functions/scripts/discoveryCheck.php
|
*/15 * * * * /usr/bin/php /var/www/html/phpipam/functions/scripts/discoveryCheck.php
|
||||||
*/15 * * * * /usr/bin/php /var/www/html/phpipam/functions/scripts/pingCheck.php
|
*/15 * * * * /usr/bin/php /var/www/html/phpipam/functions/scripts/pingCheck.php
|
||||||
```
|
```
|
||||||
|
@ -201,8 +207,9 @@ I downloaded the SDK from [here](https://code.vmware.com/web/sdk/1.1.0/vmware-vr
|
||||||
|
|
||||||
The README tells you to extract the .zip and make a simple modification to the `pom.xml` file to "brand" the integration:
|
The README tells you to extract the .zip and make a simple modification to the `pom.xml` file to "brand" the integration:
|
||||||
```xml
|
```xml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
<properties>
|
<properties>
|
||||||
<provider.name>phpIPAM</provider.name>
|
<provider.name>phpIPAM</provider.name> <!-- [tl! focus:2] -->
|
||||||
<provider.description>phpIPAM integration for vRA</provider.description>
|
<provider.description>phpIPAM integration for vRA</provider.description>
|
||||||
<provider.version>1.0.3</provider.version>
|
<provider.version>1.0.3</provider.version>
|
||||||
|
|
||||||
|
@ -217,6 +224,7 @@ You can then kick off the build with `mvn package -PcollectDependencies -Duser.i
|
||||||
|
|
||||||
You'll notice that the form includes fields for Username, Password, and Hostname; we'll also need to specify the API app ID. This can be done by editing `./src/main/resources/endpoint-schema.json`. I added an `apiAppId` field:
|
You'll notice that the form includes fields for Username, Password, and Hostname; we'll also need to specify the API app ID. This can be done by editing `./src/main/resources/endpoint-schema.json`. I added an `apiAppId` field:
|
||||||
```json
|
```json
|
||||||
|
// torchlight! {"lineNumbers":true}
|
||||||
{
|
{
|
||||||
"layout":{
|
"layout":{
|
||||||
"pages":[
|
"pages":[
|
||||||
|
@ -228,7 +236,7 @@ You'll notice that the form includes fields for Username, Password, and Hostname
|
||||||
"id":"section_1",
|
"id":"section_1",
|
||||||
"fields":[
|
"fields":[
|
||||||
{
|
{
|
||||||
"id":"apiAppId",
|
"id":"apiAppId", // [tl! focus]
|
||||||
"display":"textField"
|
"display":"textField"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -254,7 +262,7 @@ You'll notice that the form includes fields for Username, Password, and Hostname
|
||||||
"type":{
|
"type":{
|
||||||
"dataType":"string"
|
"dataType":"string"
|
||||||
},
|
},
|
||||||
"label":"API App ID",
|
"label":"API App ID", // [tl! focus]
|
||||||
"constraints":{
|
"constraints":{
|
||||||
"required":true
|
"required":true
|
||||||
}
|
}
|
||||||
|
@ -317,6 +325,7 @@ Example payload:
|
||||||
|
|
||||||
The `do_validate_endpoint` function has a handy comment letting us know that's where we'll drop in our code:
|
The `do_validate_endpoint` function has a handy comment letting us know that's where we'll drop in our code:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def do_validate_endpoint(self, auth_credentials, cert):
|
def do_validate_endpoint(self, auth_credentials, cert):
|
||||||
# Your implemention goes here
|
# Your implemention goes here
|
||||||
|
|
||||||
|
@ -328,6 +337,7 @@ def do_validate_endpoint(self, auth_credentials, cert):
|
||||||
```
|
```
|
||||||
The example code gives us a nice start at how we'll get our inputs from vRA. So let's expand that a bit:
|
The example code gives us a nice start at how we'll get our inputs from vRA. So let's expand that a bit:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def do_validate_endpoint(self, auth_credentials, cert):
|
def do_validate_endpoint(self, auth_credentials, cert):
|
||||||
# Build variables
|
# Build variables
|
||||||
username = auth_credentials["privateKeyId"]
|
username = auth_credentials["privateKeyId"]
|
||||||
|
@ -337,11 +347,13 @@ def do_validate_endpoint(self, auth_credentials, cert):
|
||||||
```
|
```
|
||||||
As before, we'll construct the "base" URI by inserting the `hostname` and `apiAppId`, and we'll combine the `username` and `password` into our `auth` variable:
|
As before, we'll construct the "base" URI by inserting the `hostname` and `apiAppId`, and we'll combine the `username` and `password` into our `auth` variable:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
uri = f'https://{hostname}/api/{apiAppId}/
|
uri = f'https://{hostname}/api/{apiAppId}/
|
||||||
auth = (username, password)
|
auth = (username, password)
|
||||||
```
|
```
|
||||||
I realized that I'd be needing to do the same authentication steps for each one of these operations, so I created a new `auth_session()` function to do the heavy lifting. Other operations will also need to return the authorization token but for this run we really just need to know whether the authentication was successful, which we can do by checking `req.status_code`.
|
I realized that I'd be needing to do the same authentication steps for each one of these operations, so I created a new `auth_session()` function to do the heavy lifting. Other operations will also need to return the authorization token but for this run we really just need to know whether the authentication was successful, which we can do by checking `req.status_code`.
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def auth_session(uri, auth, cert):
|
def auth_session(uri, auth, cert):
|
||||||
auth_uri = f'{uri}/user/'
|
auth_uri = f'{uri}/user/'
|
||||||
req = requests.post(auth_uri, auth=auth, verify=cert)
|
req = requests.post(auth_uri, auth=auth, verify=cert)
|
||||||
|
@ -349,6 +361,7 @@ def auth_session(uri, auth, cert):
|
||||||
```
|
```
|
||||||
And we'll call that function from `do_validate_endpoint()`:
|
And we'll call that function from `do_validate_endpoint()`:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Test auth connection
|
# Test auth connection
|
||||||
try:
|
try:
|
||||||
response = auth_session(uri, auth, cert)
|
response = auth_session(uri, auth, cert)
|
||||||
|
@ -368,6 +381,7 @@ Confirm that everything worked correctly by hopping over to the **Extensibility*
|
||||||
![Extensibility action runs](e4PTJxfqH.png)
|
![Extensibility action runs](e4PTJxfqH.png)
|
||||||
Select the newest `phpIPAM_ValidateEndpoint` action and make sure it has a happy green *Completed* status. You can also review the Inputs to make sure they look like what you expected:
|
Select the newest `phpIPAM_ValidateEndpoint` action and make sure it has a happy green *Completed* status. You can also review the Inputs to make sure they look like what you expected:
|
||||||
```json
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"__metadata": {
|
"__metadata": {
|
||||||
"headers": {
|
"headers": {
|
||||||
|
@ -395,6 +409,7 @@ That's one operation in the bank!
|
||||||
### Step 6: 'Get IP Ranges' action
|
### Step 6: 'Get IP Ranges' action
|
||||||
So vRA can authenticate against phpIPAM; next, let's actually query to get a list of available IP ranges. This happens in `./src/main/python/get_ip_ranges/source.py`. We'll start by pulling over our `auth_session()` function and flesh it out a bit more to return the authorization token:
|
So vRA can authenticate against phpIPAM; next, let's actually query to get a list of available IP ranges. This happens in `./src/main/python/get_ip_ranges/source.py`. We'll start by pulling over our `auth_session()` function and flesh it out a bit more to return the authorization token:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def auth_session(uri, auth, cert):
|
def auth_session(uri, auth, cert):
|
||||||
auth_uri = f'{uri}/user/'
|
auth_uri = f'{uri}/user/'
|
||||||
req = requests.post(auth_uri, auth=auth, verify=cert)
|
req = requests.post(auth_uri, auth=auth, verify=cert)
|
||||||
|
@ -405,6 +420,7 @@ def auth_session(uri, auth, cert):
|
||||||
```
|
```
|
||||||
We'll then modify `do_get_ip_ranges()` with our needed variables, and then call `auth_session()` to get the necessary token:
|
We'll then modify `do_get_ip_ranges()` with our needed variables, and then call `auth_session()` to get the necessary token:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def do_get_ip_ranges(self, auth_credentials, cert):
|
def do_get_ip_ranges(self, auth_credentials, cert):
|
||||||
# Build variables
|
# Build variables
|
||||||
username = auth_credentials["privateKeyId"]
|
username = auth_credentials["privateKeyId"]
|
||||||
|
@ -419,6 +435,7 @@ def do_get_ip_ranges(self, auth_credentials, cert):
|
||||||
```
|
```
|
||||||
We can then query for the list of subnets, just like we did earlier:
|
We can then query for the list of subnets, just like we did earlier:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Request list of subnets
|
# Request list of subnets
|
||||||
subnet_uri = f'{uri}/subnets/'
|
subnet_uri = f'{uri}/subnets/'
|
||||||
ipRanges = []
|
ipRanges = []
|
||||||
|
@ -430,6 +447,7 @@ I decided to add the extra `filter_by=isPool&filter_value=1` argument to the que
|
||||||
{{% notice note "Update" %}}
|
{{% notice note "Update" %}}
|
||||||
I now filter for networks identified by the designated custom field like so:
|
I now filter for networks identified by the designated custom field like so:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Request list of subnets
|
# Request list of subnets
|
||||||
subnet_uri = f'{uri}/subnets/'
|
subnet_uri = f'{uri}/subnets/'
|
||||||
if enableFilter == "true":
|
if enableFilter == "true":
|
||||||
|
@ -448,6 +466,7 @@ Now is a good time to consult [that white paper](https://docs.vmware.com/en/VMwa
|
||||||
|
|
||||||
For instance, these are pretty direct matches:
|
For instance, these are pretty direct matches:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
ipRange['id'] = str(subnet['id'])
|
ipRange['id'] = str(subnet['id'])
|
||||||
ipRange['description'] = str(subnet['description'])
|
ipRange['description'] = str(subnet['description'])
|
||||||
ipRange['subnetPrefixLength'] = str(subnet['mask'])
|
ipRange['subnetPrefixLength'] = str(subnet['mask'])
|
||||||
|
@ -459,6 +478,7 @@ ipRange['name'] = f"{str(subnet['subnet'])}/{str(subnet['mask'])}"
|
||||||
|
|
||||||
Working with IP addresses in Python can be greatly simplified by use of the `ipaddress` module, so I added an `import ipaddress` statement near the top of the file. I also added it to `requirements.txt` to make sure it gets picked up by the Maven build. I can then use that to figure out the IP version as well as computing reasonable start and end IP addresses:
|
Working with IP addresses in Python can be greatly simplified by use of the `ipaddress` module, so I added an `import ipaddress` statement near the top of the file. I also added it to `requirements.txt` to make sure it gets picked up by the Maven build. I can then use that to figure out the IP version as well as computing reasonable start and end IP addresses:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
network = ipaddress.ip_network(str(subnet['subnet']) + '/' + str(subnet['mask']))
|
network = ipaddress.ip_network(str(subnet['subnet']) + '/' + str(subnet['mask']))
|
||||||
ipRange['ipVersion'] = 'IPv' + str(network.version)
|
ipRange['ipVersion'] = 'IPv' + str(network.version)
|
||||||
ipRange['startIPAddress'] = str(network[1])
|
ipRange['startIPAddress'] = str(network[1])
|
||||||
|
@ -466,6 +486,7 @@ ipRange['endIPAddress'] = str(network[-2])
|
||||||
```
|
```
|
||||||
I'd like to try to get the DNS servers from phpIPAM if they're defined, but I also don't want the whole thing to puke if a subnet doesn't have that defined. phpIPAM returns the DNS servers as a semicolon-delineated string; I need them to look like a Python list:
|
I'd like to try to get the DNS servers from phpIPAM if they're defined, but I also don't want the whole thing to puke if a subnet doesn't have that defined. phpIPAM returns the DNS servers as a semicolon-delineated string; I need them to look like a Python list:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
try:
|
try:
|
||||||
ipRange['dnsServerAddresses'] = [server.strip() for server in str(subnet['nameservers']['namesrv1']).split(';')]
|
ipRange['dnsServerAddresses'] = [server.strip() for server in str(subnet['nameservers']['namesrv1']).split(';')]
|
||||||
except:
|
except:
|
||||||
|
@ -473,6 +494,7 @@ except:
|
||||||
```
|
```
|
||||||
I can also nest another API request to find which address is marked as the gateway for a given subnet:
|
I can also nest another API request to find which address is marked as the gateway for a given subnet:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
gw_req = requests.get(f"{subnet_uri}/{subnet['id']}/addresses/?filter_by=is_gateway&filter_value=1", headers=token, verify=cert)
|
gw_req = requests.get(f"{subnet_uri}/{subnet['id']}/addresses/?filter_by=is_gateway&filter_value=1", headers=token, verify=cert)
|
||||||
if gw_req.status_code == 200:
|
if gw_req.status_code == 200:
|
||||||
gateway = gw_req.json()['data'][0]['ip']
|
gateway = gw_req.json()['data'][0]['ip']
|
||||||
|
@ -480,10 +502,12 @@ if gw_req.status_code == 200:
|
||||||
```
|
```
|
||||||
And then I merge each of these `ipRange` objects into the `ipRanges` list which will be returned to vRA:
|
And then I merge each of these `ipRange` objects into the `ipRanges` list which will be returned to vRA:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
ipRanges.append(ipRange)
|
ipRanges.append(ipRange)
|
||||||
```
|
```
|
||||||
After rearranging a bit and tossing in some logging, here's what I've got:
|
After rearranging a bit and tossing in some logging, here's what I've got:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
for subnet in subnets:
|
for subnet in subnets:
|
||||||
ipRange = {}
|
ipRange = {}
|
||||||
ipRange['id'] = str(subnet['id'])
|
ipRange['id'] = str(subnet['id'])
|
||||||
|
@ -518,7 +542,7 @@ The full code can be found [here](https://github.com/jbowdre/phpIPAM-for-vRA8/bl
|
||||||
In any case, it's time to once again use `mvn package -PcollectDependencies -Duser.id=${UID}` to fire off the build, and then import `phpIPAM.zip` into vRA.
|
In any case, it's time to once again use `mvn package -PcollectDependencies -Duser.id=${UID}` to fire off the build, and then import `phpIPAM.zip` into vRA.
|
||||||
|
|
||||||
vRA runs the `phpIPAM_GetIPRanges` action about every ten minutes so keep checking back on the **Extensibility > Action Runs** view until it shows up. You can then select the action and review the Log to see which IP ranges got picked up:
|
vRA runs the `phpIPAM_GetIPRanges` action about every ten minutes so keep checking back on the **Extensibility > Action Runs** view until it shows up. You can then select the action and review the Log to see which IP ranges got picked up:
|
||||||
```log
|
```
|
||||||
[2021-02-21 23:14:04,026] [INFO] - Querying for auth credentials
|
[2021-02-21 23:14:04,026] [INFO] - Querying for auth credentials
|
||||||
[2021-02-21 23:14:04,051] [INFO] - Credentials obtained successfully!
|
[2021-02-21 23:14:04,051] [INFO] - Credentials obtained successfully!
|
||||||
[2021-02-21 23:14:04,089] [INFO] - Found subnet: 172.16.10.0/24 - 1610-Management.
|
[2021-02-21 23:14:04,089] [INFO] - Found subnet: 172.16.10.0/24 - 1610-Management.
|
||||||
|
@ -540,6 +564,7 @@ Next, we need to figure out how to allocate an IP.
|
||||||
### Step 7: 'Allocate IP' action
|
### Step 7: 'Allocate IP' action
|
||||||
I think we've got a rhythm going now. So we'll dive in to `./src/main/python/allocate_ip/source.py`, create our `auth_session()` function, and add our variables to the `do_allocate_ip()` function. I also created a new `bundle` object to hold the `uri`, `token`, and `cert` items so that I don't have to keep typing those over and over and over.
|
I think we've got a rhythm going now. So we'll dive in to `./src/main/python/allocate_ip/source.py`, create our `auth_session()` function, and add our variables to the `do_allocate_ip()` function. I also created a new `bundle` object to hold the `uri`, `token`, and `cert` items so that I don't have to keep typing those over and over and over.
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def auth_session(uri, auth, cert):
|
def auth_session(uri, auth, cert):
|
||||||
auth_uri = f'{uri}/user/'
|
auth_uri = f'{uri}/user/'
|
||||||
req = requests.post(auth_uri, auth=auth, verify=cert)
|
req = requests.post(auth_uri, auth=auth, verify=cert)
|
||||||
|
@ -567,6 +592,7 @@ def do_allocate_ip(self, auth_credentials, cert):
|
||||||
```
|
```
|
||||||
I left the remainder of `do_allocate_ip()` intact but modified its calls to other functions so that my new `bundle` would be included:
|
I left the remainder of `do_allocate_ip()` intact but modified its calls to other functions so that my new `bundle` would be included:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
allocation_result = []
|
allocation_result = []
|
||||||
try:
|
try:
|
||||||
resource = self.inputs["resourceInfo"]
|
resource = self.inputs["resourceInfo"]
|
||||||
|
@ -582,6 +608,7 @@ except Exception as e:
|
||||||
```
|
```
|
||||||
I also added `bundle` to the `allocate()` function:
|
I also added `bundle` to the `allocate()` function:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def allocate(resource, allocation, context, endpoint, bundle):
|
def allocate(resource, allocation, context, endpoint, bundle):
|
||||||
|
|
||||||
last_error = None
|
last_error = None
|
||||||
|
@ -599,6 +626,7 @@ def allocate(resource, allocation, context, endpoint, bundle):
|
||||||
```
|
```
|
||||||
The heavy lifting is actually handled in `allocate_in_range()`. Right now, my implementation only supports doing a single allocation so I added an escape in case someone asks to do something crazy like allocate *2* IPs. I then set up my variables:
|
The heavy lifting is actually handled in `allocate_in_range()`. Right now, my implementation only supports doing a single allocation so I added an escape in case someone asks to do something crazy like allocate *2* IPs. I then set up my variables:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def allocate_in_range(range_id, resource, allocation, context, endpoint, bundle):
|
def allocate_in_range(range_id, resource, allocation, context, endpoint, bundle):
|
||||||
if int(allocation['size']) ==1:
|
if int(allocation['size']) ==1:
|
||||||
vmName = resource['name']
|
vmName = resource['name']
|
||||||
|
@ -612,7 +640,7 @@ def allocate_in_range(range_id, resource, allocation, context, endpoint, bundle)
|
||||||
raise Exception("Not implemented")
|
raise Exception("Not implemented")
|
||||||
```
|
```
|
||||||
I construct a `payload` that will be passed to the phpIPAM API when an IP gets allocated to a VM:
|
I construct a `payload` that will be passed to the phpIPAM API when an IP gets allocated to a VM:
|
||||||
```python
|
```python {linenos=true}
|
||||||
payload = {
|
payload = {
|
||||||
'hostname': vmName,
|
'hostname': vmName,
|
||||||
'description': f'Reserved by vRA for {owner} at {datetime.now()}'
|
'description': f'Reserved by vRA for {owner} at {datetime.now()}'
|
||||||
|
@ -622,12 +650,14 @@ That timestamp will be handy when reviewing the reservations from the phpIPAM si
|
||||||
|
|
||||||
So now we'll construct the URI and post the allocation request to phpIPAM. We tell it which `range_id` to use and it will return the first available IP.
|
So now we'll construct the URI and post the allocation request to phpIPAM. We tell it which `range_id` to use and it will return the first available IP.
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
allocate_uri = f'{uri}/addresses/first_free/{str(range_id)}/'
|
allocate_uri = f'{uri}/addresses/first_free/{str(range_id)}/'
|
||||||
allocate_req = requests.post(allocate_uri, data=payload, headers=token, verify=cert)
|
allocate_req = requests.post(allocate_uri, data=payload, headers=token, verify=cert)
|
||||||
allocate_req = allocate_req.json()
|
allocate_req = allocate_req.json()
|
||||||
```
|
```
|
||||||
Per the white paper, we'll need to return `ipAllocationId`, `ipAddresses`, `ipRangeId`, and `ipVersion` to vRA in an `AllocationResult`. Once again, I'll leverage the `ipaddress` module for figuring the version (and, once again, I'll add it as an import and to the `requirements.txt` file).
|
Per the white paper, we'll need to return `ipAllocationId`, `ipAddresses`, `ipRangeId`, and `ipVersion` to vRA in an `AllocationResult`. Once again, I'll leverage the `ipaddress` module for figuring the version (and, once again, I'll add it as an import and to the `requirements.txt` file).
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
if allocate_req['success']:
|
if allocate_req['success']:
|
||||||
version = ipaddress.ip_address(allocate_req['data']).version
|
version = ipaddress.ip_address(allocate_req['data']).version
|
||||||
result = {
|
result = {
|
||||||
|
@ -644,6 +674,7 @@ return result
|
||||||
```
|
```
|
||||||
I also implemented a hasty `rollback()` in case something goes wrong and we need to undo the allocation:
|
I also implemented a hasty `rollback()` in case something goes wrong and we need to undo the allocation:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def rollback(allocation_result, bundle):
|
def rollback(allocation_result, bundle):
|
||||||
uri = bundle['uri']
|
uri = bundle['uri']
|
||||||
token = bundle['token']
|
token = bundle['token']
|
||||||
|
@ -658,7 +689,7 @@ def rollback(allocation_result, bundle):
|
||||||
return
|
return
|
||||||
```
|
```
|
||||||
The full `allocate_ip` code is [here](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/python/allocate_ip/source.py). Once more, run `mvn package -PcollectDependencies -Duser.id=${UID}` and import the new `phpIPAM.zip` package into vRA. You can then open a Cloud Assembly Cloud Template associated with one of the specified networks and hit the "Test" button to see if it works. You should see a new `phpIPAM_AllocateIP` action run appear on the **Extensibility > Action runs** tab. Check the Log for something like this:
|
The full `allocate_ip` code is [here](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/python/allocate_ip/source.py). Once more, run `mvn package -PcollectDependencies -Duser.id=${UID}` and import the new `phpIPAM.zip` package into vRA. You can then open a Cloud Assembly Cloud Template associated with one of the specified networks and hit the "Test" button to see if it works. You should see a new `phpIPAM_AllocateIP` action run appear on the **Extensibility > Action runs** tab. Check the Log for something like this:
|
||||||
```log
|
```
|
||||||
[2021-02-22 01:31:41,729] [INFO] - Querying for auth credentials
|
[2021-02-22 01:31:41,729] [INFO] - Querying for auth credentials
|
||||||
[2021-02-22 01:31:41,757] [INFO] - Credentials obtained successfully!
|
[2021-02-22 01:31:41,757] [INFO] - Credentials obtained successfully!
|
||||||
[2021-02-22 01:31:41,773] [INFO] - Allocating from range 12
|
[2021-02-22 01:31:41,773] [INFO] - Allocating from range 12
|
||||||
|
@ -672,6 +703,7 @@ Almost done!
|
||||||
### Step 8: 'Deallocate IP' action
|
### Step 8: 'Deallocate IP' action
|
||||||
The last step is to remove the IP allocation when a vRA deployment gets destroyed. It starts just like the `allocate_ip` action with our `auth_session()` function and variable initialization:
|
The last step is to remove the IP allocation when a vRA deployment gets destroyed. It starts just like the `allocate_ip` action with our `auth_session()` function and variable initialization:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def auth_session(uri, auth, cert):
|
def auth_session(uri, auth, cert):
|
||||||
auth_uri = f'{uri}/user/'
|
auth_uri = f'{uri}/user/'
|
||||||
req = requests.post(auth_uri, auth=auth, verify=cert)
|
req = requests.post(auth_uri, auth=auth, verify=cert)
|
||||||
|
@ -708,6 +740,7 @@ def do_deallocate_ip(self, auth_credentials, cert):
|
||||||
```
|
```
|
||||||
And the `deallocate()` function is basically a prettier version of the `rollback()` function from the `allocate_ip` action:
|
And the `deallocate()` function is basically a prettier version of the `rollback()` function from the `allocate_ip` action:
|
||||||
```python
|
```python
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
def deallocate(resource, deallocation, bundle):
|
def deallocate(resource, deallocation, bundle):
|
||||||
uri = bundle['uri']
|
uri = bundle['uri']
|
||||||
token = bundle['token']
|
token = bundle['token']
|
||||||
|
@ -725,13 +758,14 @@ def deallocate(resource, deallocation, bundle):
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
You can review the full code [here](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/python/deallocate_ip/source.py). Build the package with Maven, import to vRA, and run another test deployment. The `phpIPAM_DeallocateIP` action should complete successfully. Something like this will be in the log:
|
You can review the full code [here](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/python/deallocate_ip/source.py). Build the package with Maven, import to vRA, and run another test deployment. The `phpIPAM_DeallocateIP` action should complete successfully. Something like this will be in the log:
|
||||||
```log
|
```
|
||||||
[2021-02-22 01:36:29,438] [INFO] - Querying for auth credentials
|
[2021-02-22 01:36:29,438] [INFO] - Querying for auth credentials
|
||||||
[2021-02-22 01:36:29,461] [INFO] - Credentials obtained successfully!
|
[2021-02-22 01:36:29,461] [INFO] - Credentials obtained successfully!
|
||||||
[2021-02-22 01:36:29,476] [INFO] - Deallocating ip 172.16.40.3 from range 12
|
[2021-02-22 01:36:29,476] [INFO] - Deallocating ip 172.16.40.3 from range 12
|
||||||
```
|
```
|
||||||
And the Outputs section of the Details tab will show:
|
And the Outputs section of the Details tab will show:
|
||||||
```json
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"ipDeallocations": [
|
"ipDeallocations": [
|
||||||
{
|
{
|
||||||
|
|
|
@ -12,7 +12,7 @@ tags:
|
||||||
- windows
|
- windows
|
||||||
title: Joining VMs to Active Directory in site-specific OUs with vRA8
|
title: Joining VMs to Active Directory in site-specific OUs with vRA8
|
||||||
---
|
---
|
||||||
Connecting a deployed Windows VM to an Active Directory domain is pretty easy; just apply an appropriately-configured [customization spec](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-CAEB6A70-D1CF-446E-BC64-EC42CDB47117.html) and vCenter will take care of it for you. Of course, you'll likely then need to move the newly-created computer object to the correct Organizational Unit so that it gets all the right policies and such.
|
Connecting a deployed Windows VM to an Active Directory domain is pretty easy; just apply an appropriately-configured [customization spec](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-CAEB6A70-D1CF-446E-BC64-EC42CDB47117.html) and vCenter will take care of it for you. Of course, you'll likely then need to move the newly-created computer object to the correct Organizational Unit so that it gets all the right policies and such.
|
||||||
|
|
||||||
Fortunately, vRA 8 supports adding an Active Directory integration to handle staging computer objects in a designated OU. And vRA 8.3 even [introduced the ability](https://blogs.vmware.com/management/2021/02/whats-new-with-vrealize-automation-8-3-technical-overview.html#:~:text=New%20Active%20Directory%20Cloud%20Template%20Properties) to let blueprints override the relative DN path. That will be helpful in my case since I'll want the servers to be placed in different OUs depending on which site they get deployed to:
|
Fortunately, vRA 8 supports adding an Active Directory integration to handle staging computer objects in a designated OU. And vRA 8.3 even [introduced the ability](https://blogs.vmware.com/management/2021/02/whats-new-with-vrealize-automation-8-3-technical-overview.html#:~:text=New%20Active%20Directory%20Cloud%20Template%20Properties) to let blueprints override the relative DN path. That will be helpful in my case since I'll want the servers to be placed in different OUs depending on which site they get deployed to:
|
||||||
|
|
||||||
|
@ -42,17 +42,18 @@ As mentioned above, I'll leverage the customization specs in vCenter to handle t
|
||||||
First, the workgroup spec, appropriately called `vra-win-workgroup`:
|
First, the workgroup spec, appropriately called `vra-win-workgroup`:
|
||||||
![Workgroup spec](AzAna5Dda.png)
|
![Workgroup spec](AzAna5Dda.png)
|
||||||
|
|
||||||
It's about as basic as can be, including using DHCP for the network configuration (which doesn't really matter since the VM will eventually get a [static IP assigned from {php}IPAM](integrating-phpipam-with-vrealize-automation-8)).
|
It's about as basic as can be, including using DHCP for the network configuration (which doesn't really matter since the VM will eventually get a [static IP assigned from {php}IPAM](integrating-phpipam-with-vrealize-automation-8)).
|
||||||
|
|
||||||
`vra-win-domain` is basically the same, with one difference:
|
`vra-win-domain` is basically the same, with one difference:
|
||||||
![Domain spec](0ZYcORuiU.png)
|
![Domain spec](0ZYcORuiU.png)
|
||||||
|
|
||||||
Now to reference these specs from a cloud template...
|
Now to reference these specs from a cloud template...
|
||||||
|
|
||||||
### Cloud template
|
### Cloud template
|
||||||
I want to make sure that users requesting a deployment are able to pick whether or not a system should be joined to the domain, so I'm going to add that as an input option on the template:
|
I want to make sure that users requesting a deployment are able to pick whether or not a system should be joined to the domain, so I'm going to add that as an input option on the template:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
inputs:
|
inputs:
|
||||||
[...]
|
[...]
|
||||||
adJoin:
|
adJoin:
|
||||||
|
@ -62,11 +63,12 @@ inputs:
|
||||||
[...]
|
[...]
|
||||||
```
|
```
|
||||||
|
|
||||||
This new `adJoin` input is a boolean so it will appear on the request form as a checkbox, and it will default to `true`; we'll assume that any Windows deployment should be automatically joined to AD unless this option gets unchecked.
|
This new `adJoin` input is a boolean so it will appear on the request form as a checkbox, and it will default to `true`; we'll assume that any Windows deployment should be automatically joined to AD unless this option gets unchecked.
|
||||||
|
|
||||||
In the `resources` section of the template, I'll set a new property called `ignoreActiveDirectory` to be the inverse of the `adJoin` input; that will tell the AD integration not to do anything if the box to join the VM to the domain is unchecked. I'll also use `activeDirectory: relativeDN` to insert the appropriate site code into the DN where the computer object will be created. And, finally, I'll reference the `customizationSpec` and use [cloud template conditional syntax](https://docs.vmware.com/en/vRealize-Automation/8.4/Using-and-Managing-Cloud-Assembly/GUID-12F0BC64-6391-4E5F-AA48-C5959024F3EB.html#conditions-4) to apply the correct spec based on whether it's a domain or workgroup deployment. (These conditionals take the pattern `'${conditional-expresion ? true-value : false-value}'`).
|
In the `resources` section of the template, I'll set a new property called `ignoreActiveDirectory` to be the inverse of the `adJoin` input; that will tell the AD integration not to do anything if the box to join the VM to the domain is unchecked. I'll also use `activeDirectory: relativeDN` to insert the appropriate site code into the DN where the computer object will be created. And, finally, I'll reference the `customizationSpec` and use [cloud template conditional syntax](https://docs.vmware.com/en/vRealize-Automation/8.4/Using-and-Managing-Cloud-Assembly/GUID-12F0BC64-6391-4E5F-AA48-C5959024F3EB.html#conditions-4) to apply the correct spec based on whether it's a domain or workgroup deployment. (These conditionals take the pattern `'${conditional-expresion ? true-value : false-value}'`).
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
resources:
|
resources:
|
||||||
Cloud_vSphere_Machine_1:
|
Cloud_vSphere_Machine_1:
|
||||||
type: Cloud.vSphere.Machine
|
type: Cloud.vSphere.Machine
|
||||||
|
@ -82,6 +84,7 @@ resources:
|
||||||
Here's the current cloud template in its entirety:
|
Here's the current cloud template in its entirety:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
formatVersion: 1
|
formatVersion: 1
|
||||||
inputs:
|
inputs:
|
||||||
site:
|
site:
|
||||||
|
@ -214,7 +217,7 @@ I don't need to do anything else here since I'm not trying to do any fancy logic
|
||||||
Now to submit the request through Service Broker to see if this actually works:
|
Now to submit the request through Service Broker to see if this actually works:
|
||||||
![Submitting the request](20210721-test-deploy-request.png)
|
![Submitting the request](20210721-test-deploy-request.png)
|
||||||
|
|
||||||
After a few minutes, I can go into Cloud Assembly and navigate to **Extensibility > Activity > Actions Runs** and look at the **Integration Runs** to see if the `ad_machine` action has completed yet.
|
After a few minutes, I can go into Cloud Assembly and navigate to **Extensibility > Activity > Actions Runs** and look at the **Integration Runs** to see if the `ad_machine` action has completed yet.
|
||||||
![Successful ad_machine action](20210721-successful-ad_machine.png)
|
![Successful ad_machine action](20210721-successful-ad_machine.png)
|
||||||
|
|
||||||
Looking good! And once the deployment completes, I can look at the VM in vCenter to see that it has registered a fully-qualified DNS name since it was automatically joined to the domain:
|
Looking good! And once the deployment completes, I can look at the VM in vCenter to see that it has registered a fully-qualified DNS name since it was automatically joined to the domain:
|
||||||
|
@ -224,9 +227,9 @@ I can also repeat the test for a VM deployed to the `DRE` site just to confirm t
|
||||||
![Another domain-joined VM](20210721-vm-joined-2.png)
|
![Another domain-joined VM](20210721-vm-joined-2.png)
|
||||||
|
|
||||||
And I'll fire off another deployment with the `adJoin` box *unchecked* to test that I can also skip the AD configuration completely:
|
And I'll fire off another deployment with the `adJoin` box *unchecked* to test that I can also skip the AD configuration completely:
|
||||||
![VM not joined to the domain](20210721-vm-not-joined.png)
|
![VM not joined to the domain](20210721-vm-not-joined.png)
|
||||||
|
|
||||||
### Conclusion
|
### Conclusion
|
||||||
Confession time: I had actually started writing this posts weeks ago. At that point, my efforts to bend the built-in AD integration to my will had been fairly unsuccessful, so I was instead working on a custom vRO workflow to accomplish the same basic thing. I circled back to try the AD integration again after upgrading the vRA environment to the latest 8.4.2 release, and found that it actually works quite well now. So I happily scrapped my ~50 lines of messy vRO JavaScript in favor of *just three lines* of YAML in the cloud template.
|
Confession time: I had actually started writing this posts weeks ago. At that point, my efforts to bend the built-in AD integration to my will had been fairly unsuccessful, so I was instead working on a custom vRO workflow to accomplish the same basic thing. I circled back to try the AD integration again after upgrading the vRA environment to the latest 8.4.2 release, and found that it actually works quite well now. So I happily scrapped my ~50 lines of messy vRO JavaScript in favor of *just three lines* of YAML in the cloud template.
|
||||||
|
|
||||||
I love it when things work out!
|
I love it when things work out!
|
|
@ -55,7 +55,7 @@ Sounds pretty cool, right? I'm not going to go too deep into "how to Packer" in
|
||||||
### Install Packer
|
### Install Packer
|
||||||
Before being able to *use* Packer, you have to install it. On Debian/Ubuntu Linux, this process consists of adding the HashiCorp GPG key and software repository, and then simply installing the package:
|
Before being able to *use* Packer, you have to install it. On Debian/Ubuntu Linux, this process consists of adding the HashiCorp GPG key and software repository, and then simply installing the package:
|
||||||
```shell
|
```shell
|
||||||
curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add -
|
curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - # [tl! .cmd:2]
|
||||||
sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
|
sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
|
||||||
sudo apt-get update && sudo apt-get install packer
|
sudo apt-get update && sudo apt-get install packer
|
||||||
```
|
```
|
||||||
|
@ -113,7 +113,8 @@ Let's quickly run through that build process, and then I'll back up and examine
|
||||||
### `ubuntu-k8s.pkr.hcl`
|
### `ubuntu-k8s.pkr.hcl`
|
||||||
#### `packer` block
|
#### `packer` block
|
||||||
The first block in the file tells Packer about the minimum version requirements for Packer as well as the external plugins used for the build:
|
The first block in the file tells Packer about the minimum version requirements for Packer as well as the external plugins used for the build:
|
||||||
```
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// BLOCK: packer
|
// BLOCK: packer
|
||||||
// The Packer configuration.
|
// The Packer configuration.
|
||||||
packer {
|
packer {
|
||||||
|
@ -134,7 +135,8 @@ As I mentioned above, I'll be using the official [`vsphere` plugin](https://gith
|
||||||
|
|
||||||
#### `data` block
|
#### `data` block
|
||||||
This section would be used for loading information from various data sources, but I'm only using it for the `sshkey` plugin (as mentioned above).
|
This section would be used for loading information from various data sources, but I'm only using it for the `sshkey` plugin (as mentioned above).
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// BLOCK: data
|
// BLOCK: data
|
||||||
// Defines data sources.
|
// Defines data sources.
|
||||||
data "sshkey" "install" {
|
data "sshkey" "install" {
|
||||||
|
@ -147,7 +149,8 @@ This will generate an ECDSA keypair, and the public key will include the identif
|
||||||
|
|
||||||
#### `locals` block
|
#### `locals` block
|
||||||
Locals are a type of Packer variable which aren't explicitly declared in the `variables.pkr.hcl` file. They only exist within the context of a single build (hence the "local" name). Typical Packer variables are static and don't support string manipulation; locals, however, do support expressions that can be used to change their value on the fly. This makes them very useful when you need to combine variables into a single string or concatenate lists of SSH public keys (such as in the highlighted lines):
|
Locals are a type of Packer variable which aren't explicitly declared in the `variables.pkr.hcl` file. They only exist within the context of a single build (hence the "local" name). Typical Packer variables are static and don't support string manipulation; locals, however, do support expressions that can be used to change their value on the fly. This makes them very useful when you need to combine variables into a single string or concatenate lists of SSH public keys (such as in the highlighted lines):
|
||||||
```text {hl_lines=[10,17]}
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// BLOCK: locals
|
// BLOCK: locals
|
||||||
// Defines local variables.
|
// Defines local variables.
|
||||||
locals {
|
locals {
|
||||||
|
@ -182,7 +185,8 @@ The `source` block tells the `vsphere-iso` builder how to connect to vSphere, wh
|
||||||
|
|
||||||
You'll notice that most of this is just mapping user-defined variables (with the `var.` prefix) to properties used by `vsphere-iso`:
|
You'll notice that most of this is just mapping user-defined variables (with the `var.` prefix) to properties used by `vsphere-iso`:
|
||||||
|
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// BLOCK: source
|
// BLOCK: source
|
||||||
// Defines the builder configuration blocks.
|
// Defines the builder configuration blocks.
|
||||||
source "vsphere-iso" "ubuntu-k8s" {
|
source "vsphere-iso" "ubuntu-k8s" {
|
||||||
|
@ -284,7 +288,8 @@ source "vsphere-iso" "ubuntu-k8s" {
|
||||||
#### `build` block
|
#### `build` block
|
||||||
This block brings everything together and executes the build. It calls the `source.vsphere-iso.ubuntu-k8s` block defined above, and also ties in a `file` and a few `shell` provisioners. `file` provisioners are used to copy files (like SSL CA certificates) into the VM, while the `shell` provisioners run commands and execute scripts. Those will be handy for the post-deployment configuration tasks, like updating and installing packages.
|
This block brings everything together and executes the build. It calls the `source.vsphere-iso.ubuntu-k8s` block defined above, and also ties in a `file` and a few `shell` provisioners. `file` provisioners are used to copy files (like SSL CA certificates) into the VM, while the `shell` provisioners run commands and execute scripts. Those will be handy for the post-deployment configuration tasks, like updating and installing packages.
|
||||||
|
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// BLOCK: build
|
// BLOCK: build
|
||||||
// Defines the builders to run, provisioners, and post-processors.
|
// Defines the builders to run, provisioners, and post-processors.
|
||||||
build {
|
build {
|
||||||
|
@ -323,7 +328,8 @@ Before looking at the build-specific variable definitions, let's take a quick lo
|
||||||
|
|
||||||
Most of these carry descriptions with them so I won't restate them outside of the code block here:
|
Most of these carry descriptions with them so I won't restate them outside of the code block here:
|
||||||
|
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/*
|
/*
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
Ubuntu Server 20.04 LTS variables using the Packer Builder for VMware vSphere (vsphere-iso).
|
Ubuntu Server 20.04 LTS variables using the Packer Builder for VMware vSphere (vsphere-iso).
|
||||||
|
@ -724,7 +730,8 @@ The full `variables.pkr.hcl` can be viewed [here](https://github.com/jbowdre/vsp
|
||||||
Packer automatically knows to load variables defined in files ending in `*.auto.pkrvars.hcl`. Storing the variable values separately from the declarations in `variables.pkr.hcl` makes it easier to protect sensitive values.
|
Packer automatically knows to load variables defined in files ending in `*.auto.pkrvars.hcl`. Storing the variable values separately from the declarations in `variables.pkr.hcl` makes it easier to protect sensitive values.
|
||||||
|
|
||||||
So I'll start by telling Packer what credentials to use for connecting to vSphere, and what vSphere resources to deploy to:
|
So I'll start by telling Packer what credentials to use for connecting to vSphere, and what vSphere resources to deploy to:
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
/*
|
/*
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
Ubuntu Server 20.04 LTS Kubernetes node variables used by the Packer Plugin for VMware vSphere (vsphere-iso).
|
Ubuntu Server 20.04 LTS Kubernetes node variables used by the Packer Plugin for VMware vSphere (vsphere-iso).
|
||||||
|
@ -745,7 +752,8 @@ vsphere_folder = "_Templates"
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll then describe the properties of the VM itself:
|
I'll then describe the properties of the VM itself:
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// Guest Operating System Settings
|
// Guest Operating System Settings
|
||||||
vm_guest_os_language = "en_US"
|
vm_guest_os_language = "en_US"
|
||||||
vm_guest_os_keyboard = "us"
|
vm_guest_os_keyboard = "us"
|
||||||
|
@ -771,7 +779,8 @@ common_remove_cdrom = true
|
||||||
```
|
```
|
||||||
|
|
||||||
Then I'll configure Packer to convert the VM to a template once the build is finished:
|
Then I'll configure Packer to convert the VM to a template once the build is finished:
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// Template and Content Library Settings
|
// Template and Content Library Settings
|
||||||
common_template_conversion = true
|
common_template_conversion = true
|
||||||
common_content_library_name = null
|
common_content_library_name = null
|
||||||
|
@ -786,7 +795,8 @@ common_ovf_export_path = ""
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, I'll tell it where to find the Ubuntu 20.04 ISO I downloaded and placed on a datastore, along with the SHA256 checksum to confirm its integrity:
|
Next, I'll tell it where to find the Ubuntu 20.04 ISO I downloaded and placed on a datastore, along with the SHA256 checksum to confirm its integrity:
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// Removable Media Settings
|
// Removable Media Settings
|
||||||
common_iso_datastore = "nuchost-local"
|
common_iso_datastore = "nuchost-local"
|
||||||
iso_url = null
|
iso_url = null
|
||||||
|
@ -797,7 +807,8 @@ iso_checksum_value = "5035be37a7e9abbdc09f0d257f3e33416c1a0fb322ba860d42d74
|
||||||
```
|
```
|
||||||
|
|
||||||
And then I'll specify the VM's boot device order, as well as the boot command that will be used for loading the `cloud-init` coniguration into the Ubuntu installer:
|
And then I'll specify the VM's boot device order, as well as the boot command that will be used for loading the `cloud-init` coniguration into the Ubuntu installer:
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// Boot Settings
|
// Boot Settings
|
||||||
vm_boot_order = "disk,cdrom"
|
vm_boot_order = "disk,cdrom"
|
||||||
vm_boot_wait = "4s"
|
vm_boot_wait = "4s"
|
||||||
|
@ -814,7 +825,8 @@ vm_boot_command = [
|
||||||
|
|
||||||
Once the installer is booted and running, Packer will wait until the VM is available via SSH and then use these credentials to log in. (How will it be able to log in with those creds? We'll take a look at the `cloud-init` configuration in just a minute...)
|
Once the installer is booted and running, Packer will wait until the VM is available via SSH and then use these credentials to log in. (How will it be able to log in with those creds? We'll take a look at the `cloud-init` configuration in just a minute...)
|
||||||
|
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// Communicator Settings
|
// Communicator Settings
|
||||||
communicator_port = 22
|
communicator_port = 22
|
||||||
communicator_timeout = "20m"
|
communicator_timeout = "20m"
|
||||||
|
@ -832,7 +844,8 @@ ssh_keys = [
|
||||||
Finally, I'll create two lists of scripts that will be run on the VM once the OS install is complete. The `post_install_scripts` will be run immediately after the operating system installation. The `update-packages.sh` script will cause a reboot, and then the set of `pre_final_scripts` will do some cleanup and prepare the VM to be converted to a template.
|
Finally, I'll create two lists of scripts that will be run on the VM once the OS install is complete. The `post_install_scripts` will be run immediately after the operating system installation. The `update-packages.sh` script will cause a reboot, and then the set of `pre_final_scripts` will do some cleanup and prepare the VM to be converted to a template.
|
||||||
|
|
||||||
The last bit of this file also designates the desired version of Kubernetes to be installed.
|
The last bit of this file also designates the desired version of Kubernetes to be installed.
|
||||||
```text
|
```hcl
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// Provisioner Settings
|
// Provisioner Settings
|
||||||
post_install_scripts = [
|
post_install_scripts = [
|
||||||
"scripts/wait-for-cloud-init.sh",
|
"scripts/wait-for-cloud-init.sh",
|
||||||
|
@ -865,6 +878,7 @@ Okay, so we've covered the Packer framework that creates the VM; now let's take
|
||||||
See the bits that look `${ like_this }`? Those place-holders will take input from the [`locals` block of `ubuntu-k8s.pkr.hcl`](#locals-block) mentioned above. So that's how all the OS properties will get set, including the hostname, locale, LVM partition layout, username, password, and SSH keys.
|
See the bits that look `${ like_this }`? Those place-holders will take input from the [`locals` block of `ubuntu-k8s.pkr.hcl`](#locals-block) mentioned above. So that's how all the OS properties will get set, including the hostname, locale, LVM partition layout, username, password, and SSH keys.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#cloud-config
|
#cloud-config
|
||||||
autoinstall:
|
autoinstall:
|
||||||
version: 1
|
version: 1
|
||||||
|
@ -899,7 +913,7 @@ autoinstall:
|
||||||
%{ endfor ~}
|
%{ endfor ~}
|
||||||
%{ endif ~}
|
%{ endif ~}
|
||||||
storage:
|
storage:
|
||||||
config:
|
config: # [tl! collapse:start]
|
||||||
- ptable: gpt
|
- ptable: gpt
|
||||||
path: /dev/sda
|
path: /dev/sda
|
||||||
wipe: superblock
|
wipe: superblock
|
||||||
|
@ -1037,7 +1051,7 @@ autoinstall:
|
||||||
- path: /var/log/audit
|
- path: /var/log/audit
|
||||||
device: format-audit
|
device: format-audit
|
||||||
type: mount
|
type: mount
|
||||||
id: mount-audit
|
id: mount-audit # [tl! collapse:end]
|
||||||
user-data:
|
user-data:
|
||||||
package_upgrade: true
|
package_upgrade: true
|
||||||
disable_root: true
|
disable_root: true
|
||||||
|
@ -1069,6 +1083,7 @@ You can find all of the scripts [here](https://github.com/jbowdre/vsphere-k8s/tr
|
||||||
#### `wait-for-cloud-init.sh`
|
#### `wait-for-cloud-init.sh`
|
||||||
This simply holds up the process until the `/var/lib/cloud//instance/boot-finished` file has been created, signifying the completion of the `cloud-init` process:
|
This simply holds up the process until the `/var/lib/cloud//instance/boot-finished` file has been created, signifying the completion of the `cloud-init` process:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo '>> Waiting for cloud-init...'
|
echo '>> Waiting for cloud-init...'
|
||||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
|
@ -1079,6 +1094,7 @@ done
|
||||||
#### `cleanup-subiquity.sh`
|
#### `cleanup-subiquity.sh`
|
||||||
Next I clean up any network configs that may have been created during the install process:
|
Next I clean up any network configs that may have been created during the install process:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
if [ -f /etc/cloud/cloud.cfg.d/99-installer.cfg ]; then
|
if [ -f /etc/cloud/cloud.cfg.d/99-installer.cfg ]; then
|
||||||
sudo rm /etc/cloud/cloud.cfg.d/99-installer.cfg
|
sudo rm /etc/cloud/cloud.cfg.d/99-installer.cfg
|
||||||
|
@ -1094,6 +1110,7 @@ fi
|
||||||
#### `install-ca-certs.sh`
|
#### `install-ca-certs.sh`
|
||||||
The [`file` provisioner](#build-block) mentioned above helpfully copied my custom CA certs to the `/tmp/certs/` folder on the VM; this script will install them into the certificate store:
|
The [`file` provisioner](#build-block) mentioned above helpfully copied my custom CA certs to the `/tmp/certs/` folder on the VM; this script will install them into the certificate store:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo '>> Installing custom certificates...'
|
echo '>> Installing custom certificates...'
|
||||||
sudo cp /tmp/certs/* /usr/local/share/ca-certificates/
|
sudo cp /tmp/certs/* /usr/local/share/ca-certificates/
|
||||||
|
@ -1107,6 +1124,7 @@ sudo /usr/sbin/update-ca-certificates
|
||||||
#### `disable-multipathd.sh`
|
#### `disable-multipathd.sh`
|
||||||
This disables `multipathd`:
|
This disables `multipathd`:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
sudo systemctl disable multipathd
|
sudo systemctl disable multipathd
|
||||||
echo 'Disabling multipathd'
|
echo 'Disabling multipathd'
|
||||||
|
@ -1115,6 +1133,7 @@ echo 'Disabling multipathd'
|
||||||
#### `disable-release-upgrade-motd.sh`
|
#### `disable-release-upgrade-motd.sh`
|
||||||
And this one disable the release upgrade notices that would otherwise be displayed upon each login:
|
And this one disable the release upgrade notices that would otherwise be displayed upon each login:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo '>> Disabling release update MOTD...'
|
echo '>> Disabling release update MOTD...'
|
||||||
sudo chmod -x /etc/update-motd.d/91-release-upgrade
|
sudo chmod -x /etc/update-motd.d/91-release-upgrade
|
||||||
|
@ -1123,6 +1142,7 @@ sudo chmod -x /etc/update-motd.d/91-release-upgrade
|
||||||
#### `persist-cloud-init-net.sh`
|
#### `persist-cloud-init-net.sh`
|
||||||
I want to make sure that this VM keeps the same IP address following the reboot that will come in a few minutes, so I 'll set a quick `cloud-init` option to help make sure that happens:
|
I want to make sure that this VM keeps the same IP address following the reboot that will come in a few minutes, so I 'll set a quick `cloud-init` option to help make sure that happens:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/sh -eu
|
#!/bin/sh -eu
|
||||||
echo '>> Preserving network settings...'
|
echo '>> Preserving network settings...'
|
||||||
echo 'manual_cache_clean: True' | sudo tee -a /etc/cloud/cloud.cfg
|
echo 'manual_cache_clean: True' | sudo tee -a /etc/cloud/cloud.cfg
|
||||||
|
@ -1132,6 +1152,7 @@ echo 'manual_cache_clean: True' | sudo tee -a /etc/cloud/cloud.cfg
|
||||||
Then I just set a few options for the `sshd` configuration, like disabling root login:
|
Then I just set a few options for the `sshd` configuration, like disabling root login:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo '>> Configuring SSH'
|
echo '>> Configuring SSH'
|
||||||
sudo sed -i 's/.*PermitRootLogin.*/PermitRootLogin no/' /etc/ssh/sshd_config
|
sudo sed -i 's/.*PermitRootLogin.*/PermitRootLogin no/' /etc/ssh/sshd_config
|
||||||
|
@ -1144,6 +1165,7 @@ This script is a little longer and takes care of all the Kubernetes-specific set
|
||||||
|
|
||||||
First I enable the required `overlay` and `br_netfilter` modules:
|
First I enable the required `overlay` and `br_netfilter` modules:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo ">> Installing Kubernetes components..."
|
echo ">> Installing Kubernetes components..."
|
||||||
|
|
||||||
|
@ -1160,6 +1182,7 @@ sudo modprobe br_netfilter
|
||||||
|
|
||||||
Then I'll make some networking tweaks to enable forwarding and bridging:
|
Then I'll make some networking tweaks to enable forwarding and bridging:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Configure networking
|
# Configure networking
|
||||||
echo ".. configure networking"
|
echo ".. configure networking"
|
||||||
cat << EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
|
cat << EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
|
||||||
|
@ -1173,6 +1196,7 @@ sudo sysctl --system
|
||||||
|
|
||||||
Next, set up `containerd` as the container runtime:
|
Next, set up `containerd` as the container runtime:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Setup containerd
|
# Setup containerd
|
||||||
echo ".. setup containerd"
|
echo ".. setup containerd"
|
||||||
sudo apt-get update && sudo apt-get install -y containerd apt-transport-https jq
|
sudo apt-get update && sudo apt-get install -y containerd apt-transport-https jq
|
||||||
|
@ -1183,6 +1207,7 @@ sudo systemctl restart containerd
|
||||||
|
|
||||||
Then disable swap:
|
Then disable swap:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Disable swap
|
# Disable swap
|
||||||
echo ".. disable swap"
|
echo ".. disable swap"
|
||||||
sudo sed -i '/[[:space:]]swap[[:space:]]/ s/^\(.*\)$/#\1/g' /etc/fstab
|
sudo sed -i '/[[:space:]]swap[[:space:]]/ s/^\(.*\)$/#\1/g' /etc/fstab
|
||||||
|
@ -1191,6 +1216,7 @@ sudo swapoff -a
|
||||||
|
|
||||||
Next I'll install the Kubernetes components and (crucially) `apt-mark hold` them so they won't be automatically upgraded without it being a coordinated change:
|
Next I'll install the Kubernetes components and (crucially) `apt-mark hold` them so they won't be automatically upgraded without it being a coordinated change:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Install Kubernetes
|
# Install Kubernetes
|
||||||
echo ".. install kubernetes version ${KUBEVERSION}"
|
echo ".. install kubernetes version ${KUBEVERSION}"
|
||||||
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||||
|
@ -1202,6 +1228,7 @@ sudo apt-mark hold kubelet kubeadm kubectl
|
||||||
#### `update-packages.sh`
|
#### `update-packages.sh`
|
||||||
Lastly, I'll be sure to update all installed packages (excepting the Kubernetes ones, of course), and then perform a reboot to make sure that any new kernel modules get loaded:
|
Lastly, I'll be sure to update all installed packages (excepting the Kubernetes ones, of course), and then perform a reboot to make sure that any new kernel modules get loaded:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo '>> Checking for and installing updates...'
|
echo '>> Checking for and installing updates...'
|
||||||
sudo apt-get update && sudo apt-get -y upgrade
|
sudo apt-get update && sudo apt-get -y upgrade
|
||||||
|
@ -1215,6 +1242,7 @@ After the reboot, all that's left are some cleanup tasks to get the VM ready to
|
||||||
#### `cleanup-cloud-init.sh`
|
#### `cleanup-cloud-init.sh`
|
||||||
I'll start with cleaning up the `cloud-init` state:
|
I'll start with cleaning up the `cloud-init` state:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo '>> Cleaning up cloud-init state...'
|
echo '>> Cleaning up cloud-init state...'
|
||||||
sudo cloud-init clean -l
|
sudo cloud-init clean -l
|
||||||
|
@ -1223,6 +1251,7 @@ sudo cloud-init clean -l
|
||||||
#### `enable-vmware-customization.sh`
|
#### `enable-vmware-customization.sh`
|
||||||
And then be (re)enable the ability for VMware to be able to customize the guest successfully:
|
And then be (re)enable the ability for VMware to be able to customize the guest successfully:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo '>> Enabling legacy VMware Guest Customization...'
|
echo '>> Enabling legacy VMware Guest Customization...'
|
||||||
echo 'disable_vmware_customization: true' | sudo tee -a /etc/cloud/cloud.cfg
|
echo 'disable_vmware_customization: true' | sudo tee -a /etc/cloud/cloud.cfg
|
||||||
|
@ -1232,6 +1261,7 @@ sudo vmware-toolbox-cmd config set deployPkg enable-custom-scripts true
|
||||||
#### `zero-disk.sh`
|
#### `zero-disk.sh`
|
||||||
I'll also execute this handy script to free up unused space on the virtual disk. It works by creating a file which completely fills up the disk, and then deleting that file:
|
I'll also execute this handy script to free up unused space on the virtual disk. It works by creating a file which completely fills up the disk, and then deleting that file:
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
echo '>> Zeroing free space to reduce disk size'
|
echo '>> Zeroing free space to reduce disk size'
|
||||||
sudo sh -c 'dd if=/dev/zero of=/EMPTY bs=1M || true; sync; sleep 1; sync'
|
sudo sh -c 'dd if=/dev/zero of=/EMPTY bs=1M || true; sync; sleep 1; sync'
|
||||||
|
@ -1241,6 +1271,7 @@ sudo sh -c 'rm -f /EMPTY; sync; sleep 1; sync'
|
||||||
#### `generalize.sh`
|
#### `generalize.sh`
|
||||||
Lastly, let's do a final run of cleaning up logs, temporary files, and unique identifiers that don't need to exist in a template. This script will also remove the SSH key with the `packer_key` identifier since that won't be needed anymore.
|
Lastly, let's do a final run of cleaning up logs, temporary files, and unique identifiers that don't need to exist in a template. This script will also remove the SSH key with the `packer_key` identifier since that won't be needed anymore.
|
||||||
```shell
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash -eu
|
#!/bin/bash -eu
|
||||||
# Prepare a VM to become a template.
|
# Prepare a VM to become a template.
|
||||||
|
|
||||||
|
@ -1294,7 +1325,7 @@ sudo rm -f /root/.bash_history
|
||||||
Now that all the ducks are nicely lined up, let's give them some marching orders and see what happens. All I have to do is open a terminal session to the folder containing the `.pkr.hcl` files, and then run the Packer build command:
|
Now that all the ducks are nicely lined up, let's give them some marching orders and see what happens. All I have to do is open a terminal session to the folder containing the `.pkr.hcl` files, and then run the Packer build command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
packer packer build -on-error=abort -force .
|
packer packer build -on-error=abort -force . # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
{{% notice note "Flags" %}}
|
{{% notice note "Flags" %}}
|
||||||
|
|
|
@ -77,6 +77,7 @@ I can then click through the rest of the wizard but (as before) I'll stop on the
|
||||||
#### Editing the cluster spec
|
#### Editing the cluster spec
|
||||||
Remember that awkward `member:1.2.840.113556.1.4.1941:` attribute from earlier? Here's how it looks within the TCE cluster-defining YAML:
|
Remember that awkward `member:1.2.840.113556.1.4.1941:` attribute from earlier? Here's how it looks within the TCE cluster-defining YAML:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
LDAP_GROUP_SEARCH_BASE_DN: OU=LAB,DC=lab,DC=bowdre,DC=net
|
LDAP_GROUP_SEARCH_BASE_DN: OU=LAB,DC=lab,DC=bowdre,DC=net
|
||||||
LDAP_GROUP_SEARCH_FILTER: (objectClass=group)
|
LDAP_GROUP_SEARCH_FILTER: (objectClass=group)
|
||||||
LDAP_GROUP_SEARCH_GROUP_ATTRIBUTE: 'member:1.2.840.113556.1.4.1941:'
|
LDAP_GROUP_SEARCH_GROUP_ATTRIBUTE: 'member:1.2.840.113556.1.4.1941:'
|
||||||
|
@ -86,24 +87,27 @@ LDAP_GROUP_SEARCH_USER_ATTRIBUTE: DN
|
||||||
|
|
||||||
That `:` at the end of the line will cause problems down the road - specifically when the deployment process creates the `dex` app which handles the actual LDAPS authentication piece. Cumulative hours of [troubleshooting](#troubleshooting-notes) (and learning!) eventually revealed to me that something along the way had choked on that trailing colon and inserted this into the `dex` configuration:
|
That `:` at the end of the line will cause problems down the road - specifically when the deployment process creates the `dex` app which handles the actual LDAPS authentication piece. Cumulative hours of [troubleshooting](#troubleshooting-notes) (and learning!) eventually revealed to me that something along the way had choked on that trailing colon and inserted this into the `dex` configuration:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
userMatchers:
|
userMatchers:
|
||||||
- userAttr: DN
|
- userAttr: DN
|
||||||
groupAttr:
|
groupAttr:
|
||||||
member:1.2.840.113556.1.4.1941: null
|
member:1.2.840.113556.1.4.1941: null # [tl! focus]
|
||||||
```
|
```
|
||||||
|
|
||||||
It *should* look like this instead:
|
It *should* look like this instead:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
userMatchers:
|
userMatchers:
|
||||||
- userAttr: DN
|
- userAttr: DN
|
||||||
groupAttr: 'member:1.2.840.113556.1.4.1941:'
|
groupAttr: 'member:1.2.840.113556.1.4.1941:' # [tl! focus]
|
||||||
```
|
```
|
||||||
|
|
||||||
That error prevents `dex` from starting correctly so the authentication would never work. I eventually figured out that using the `|` character to define the attribute as a [literal scalar](https://yaml.org/spec/1.2.2/#812-literal-style) would help to get around this issue so I changed the cluster YAML to look like this:
|
That error prevents `dex` from starting correctly so the authentication would never work. I eventually figured out that using the `|` character to define the attribute as a [literal scalar](https://yaml.org/spec/1.2.2/#812-literal-style) would help to get around this issue so I changed the cluster YAML to look like this:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
LDAP_GROUP_SEARCH_BASE_DN: OU=LAB,DC=lab,DC=bowdre,DC=net
|
LDAP_GROUP_SEARCH_BASE_DN: OU=LAB,DC=lab,DC=bowdre,DC=net
|
||||||
LDAP_GROUP_SEARCH_FILTER: (objectClass=group)
|
LDAP_GROUP_SEARCH_FILTER: (objectClass=group)
|
||||||
LDAP_GROUP_SEARCH_GROUP_ATTRIBUTE: |
|
LDAP_GROUP_SEARCH_GROUP_ATTRIBUTE: | # [tl! focus:1]
|
||||||
'member:1.2.840.113556.1.4.1941:'
|
'member:1.2.840.113556.1.4.1941:'
|
||||||
LDAP_GROUP_SEARCH_NAME_ATTRIBUTE: cn
|
LDAP_GROUP_SEARCH_NAME_ATTRIBUTE: cn
|
||||||
LDAP_GROUP_SEARCH_USER_ATTRIBUTE: DN
|
LDAP_GROUP_SEARCH_USER_ATTRIBUTE: DN
|
||||||
|
@ -113,8 +117,8 @@ LDAP_GROUP_SEARCH_USER_ATTRIBUTE: DN
|
||||||
|
|
||||||
#### Deploying the cluster
|
#### Deploying the cluster
|
||||||
That's the only thing I need to manually edit so now I can go ahead and create the cluster with:
|
That's the only thing I need to manually edit so now I can go ahead and create the cluster with:
|
||||||
```
|
```shell
|
||||||
tanzu management-cluster create tce-mgmt -f tce-mgmt-deploy.yaml
|
tanzu management-cluster create tce-mgmt -f tce-mgmt-deploy.yaml # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
This will probably take 10-15 minutes to deploy so it's a great time to go top off my coffee.
|
This will probably take 10-15 minutes to deploy so it's a great time to go top off my coffee.
|
||||||
|
@ -136,19 +140,19 @@ Some addons might be getting installed! Check their status by running the follow
|
||||||
```
|
```
|
||||||
|
|
||||||
I obediently follow the instructions to switch to the correct context and verify that the addons are all running:
|
I obediently follow the instructions to switch to the correct context and verify that the addons are all running:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl config use-context tce-mgmt-admin@tce-mgmt
|
kubectl config use-context tce-mgmt-admin@tce-mgmt # [tl! .cmd]
|
||||||
Switched to context "tce-mgmt-admin@tce-mgmt".
|
Switched to context "tce-mgmt-admin@tce-mgmt". # [tl! .nocopy:1]
|
||||||
|
|
||||||
❯ kubectl get apps -A
|
kubectl get apps -A # [tl! .cmd]
|
||||||
NAMESPACE NAME DESCRIPTION SINCE-DEPLOY AGE
|
NAMESPACE NAME DESCRIPTION SINCE-DEPLOY AGE # [tl! .nocopy:start]
|
||||||
tkg-system antrea Reconcile succeeded 5m2s 11m
|
tkg-system antrea Reconcile succeeded 5m2s 11m
|
||||||
tkg-system metrics-server Reconcile succeeded 39s 11m
|
tkg-system metrics-server Reconcile succeeded 39s 11m
|
||||||
tkg-system pinniped Reconcile succeeded 4m55s 11m
|
tkg-system pinniped Reconcile succeeded 4m55s 11m
|
||||||
tkg-system secretgen-controller Reconcile succeeded 65s 11m
|
tkg-system secretgen-controller Reconcile succeeded 65s 11m
|
||||||
tkg-system tanzu-addons-manager Reconcile succeeded 70s 11m
|
tkg-system tanzu-addons-manager Reconcile succeeded 70s 11m
|
||||||
tkg-system vsphere-cpi Reconcile succeeded 32s 11m
|
tkg-system vsphere-cpi Reconcile succeeded 32s 11m
|
||||||
tkg-system vsphere-csi Reconcile succeeded 66s 11m
|
tkg-system vsphere-csi Reconcile succeeded 66s 11m # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Post-deployment tasks
|
### Post-deployment tasks
|
||||||
|
@ -158,21 +162,24 @@ I've got a TCE cluster now but it's not quite ready for me to authenticate with
|
||||||
#### Load Balancer deployment
|
#### Load Balancer deployment
|
||||||
The [guide I'm following from the TCE site](https://tanzucommunityedition.io/docs/latest/vsphere-ldap-config/) assumes that I'm using NSX-ALB in my environment, but I'm not. So, [as before](/tanzu-community-edition-k8s-homelab/#deploying-kube-vip-as-a-load-balancer), I'll need to deploy [Scott Rosenberg's `kube-vip` Carvel package](https://github.com/vrabbi/tkgm-customizations):
|
The [guide I'm following from the TCE site](https://tanzucommunityedition.io/docs/latest/vsphere-ldap-config/) assumes that I'm using NSX-ALB in my environment, but I'm not. So, [as before](/tanzu-community-edition-k8s-homelab/#deploying-kube-vip-as-a-load-balancer), I'll need to deploy [Scott Rosenberg's `kube-vip` Carvel package](https://github.com/vrabbi/tkgm-customizations):
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
git clone https://github.com/vrabbi/tkgm-customizations.git
|
git clone https://github.com/vrabbi/tkgm-customizations.git # [tl! .cmd:3]
|
||||||
cd tkgm-customizations/carvel-packages/kube-vip-package
|
cd tkgm-customizations/carvel-packages/kube-vip-package
|
||||||
kubectl apply -n tanzu-package-repo-global -f metadata.yml
|
kubectl apply -n tanzu-package-repo-global -f metadata.yml
|
||||||
kubectl apply -n tanzu-package-repo-global -f package.yaml
|
kubectl apply -n tanzu-package-repo-global -f package.yaml
|
||||||
cat << EOF > values.yaml
|
|
||||||
|
cat << EOF > values.yaml # [tl! .cmd]
|
||||||
vip_range: 192.168.1.64-192.168.1.70
|
vip_range: 192.168.1.64-192.168.1.70
|
||||||
EOF
|
EOF
|
||||||
tanzu package install kubevip -p kubevip.terasky.com -v 0.3.9 -f values.yaml
|
|
||||||
|
tanzu package install kubevip -p kubevip.terasky.com -v 0.3.9 -f values.yaml # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Modifying services to use the Load Balancer
|
#### Modifying services to use the Load Balancer
|
||||||
With the load balancer in place, I can follow the TCE instruction to modify the Pinniped and Dex services to switch from the `NodePort` type to the `LoadBalancer` type so they can be easily accessed from outside of the cluster. This process starts by creating a file called `pinniped-supervisor-svc-overlay.yaml` and pasting in the following overlay manifest:
|
With the load balancer in place, I can follow the TCE instruction to modify the Pinniped and Dex services to switch from the `NodePort` type to the `LoadBalancer` type so they can be easily accessed from outside of the cluster. This process starts by creating a file called `pinniped-supervisor-svc-overlay.yaml` and pasting in the following overlay manifest:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#@ load("@ytt:overlay", "overlay")
|
#@ load("@ytt:overlay", "overlay")
|
||||||
#@overlay/match by=overlay.subset({"kind": "Service", "metadata": {"name": "pinniped-supervisor", "namespace": "pinniped-supervisor"}})
|
#@overlay/match by=overlay.subset({"kind": "Service", "metadata": {"name": "pinniped-supervisor", "namespace": "pinniped-supervisor"}})
|
||||||
---
|
---
|
||||||
|
@ -203,40 +210,42 @@ spec:
|
||||||
```
|
```
|
||||||
|
|
||||||
This overlay will need to be inserted into the `pinniped-addon` secret which means that the contents need to be converted to a base64-encoded string:
|
This overlay will need to be inserted into the `pinniped-addon` secret which means that the contents need to be converted to a base64-encoded string:
|
||||||
```bash
|
```shell
|
||||||
❯ base64 -w 0 pinniped-supervisor-svc-overlay.yaml
|
base64 -w 0 pinniped-supervisor-svc-overlay.yaml # [tl! .cmd]
|
||||||
I0AgbG9hZCgi[...]==
|
I0AgbG9hZCgi[...]== # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
{{% notice note "Avoid newlines" %}}
|
{{% notice note "Avoid newlines" %}}
|
||||||
The `-w 0` / `--wrap=0` argument tells `base64` to *not* wrap the encoded lines after a certain number of characters. If you leave this off, the string will get a newline inserted every 76 characters, and those linebreaks would make the string a bit more tricky to work with. Avoid having to clean up the output afterwards by being more specific with the request up front!
|
The `-w 0` / `--wrap=0` argument tells `base64` to *not* wrap the encoded lines after a certain number of characters. If you leave this off, the string will get a newline inserted every 76 characters, and those linebreaks would make the string a bit more tricky to work with. Avoid having to clean up the output afterwards by being more specific with the request up front!
|
||||||
{{% /notice %}}
|
{{% /notice %}}
|
||||||
|
|
||||||
I'll copy the resulting base64 string (which is much longer than the truncated form I'm using here), and paste it into the following command to patch the secret (which will be named after the management cluster name so replace the `tce-mgmt` part as appropriate):
|
I'll copy the resulting base64 string (which is much longer than the truncated form I'm using here), and paste it into the following command to patch the secret (which will be named after the management cluster name so replace the `tce-mgmt` part as appropriate):
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n tkg-system patch secret tce-mgmt-pinniped-addon -p '{"data": {"overlays.yaml": "I0AgbG9hZCgi[...]=="}}'
|
kubectl -n tkg-system patch secret tce-mgmt-pinniped-addon -p '{"data": {"overlays.yaml": "I0AgbG9hZCgi[...]=="}}' # [tl! .cmd]
|
||||||
secret/tce-mgmt-pinniped-addon patched
|
secret/tce-mgmt-pinniped-addon patched # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
I can watch as the `pinniped-supervisor` and `dexsvc` services get updated with the new service type:
|
I can watch as the `pinniped-supervisor` and `dexsvc` services get updated with the new service type:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl get svc -A -w
|
kubectl get svc -A -w # [tl! .cmd]
|
||||||
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
|
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) # [tl! .nocopy:start]
|
||||||
pinniped-supervisor pinniped-supervisor NodePort 100.65.185.82 <none> 443:31234/TCP
|
pinniped-supervisor pinniped-supervisor NodePort 100.65.185.82 <none> 443:31234/TCP
|
||||||
tanzu-system-auth dexsvc NodePort 100.70.238.106 <none> 5556:30167/TCP
|
tanzu-system-auth dexsvc NodePort 100.70.238.106 <none> 5556:30167/TCP
|
||||||
tkg-system packaging-api ClusterIP 100.65.185.94 <none> 443/TCP
|
tkg-system packaging-api ClusterIP 100.65.185.94 <none> 443/TCP
|
||||||
tanzu-system-auth dexsvc LoadBalancer 100.70.238.106 <pending> 443:30167/TCP
|
tanzu-system-auth dexsvc LoadBalancer 100.70.238.106 <pending> 443:30167/TCP
|
||||||
pinniped-supervisor pinniped-supervisor LoadBalancer 100.65.185.82 <pending> 443:31234/TCP
|
pinniped-supervisor pinniped-supervisor LoadBalancer 100.65.185.82 <pending> 443:31234/TCP
|
||||||
pinniped-supervisor pinniped-supervisor LoadBalancer 100.65.185.82 192.168.1.70 443:31234/TCP
|
pinniped-supervisor pinniped-supervisor LoadBalancer 100.65.185.82 192.168.1.70 443:31234/TCP
|
||||||
tanzu-system-auth dexsvc LoadBalancer 100.70.238.106 192.168.1.64 443:30167/TCP
|
tanzu-system-auth dexsvc LoadBalancer 100.70.238.106 192.168.1.64 443:30167/TCP # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll also need to restart the `pinniped-post-deploy-job` job to account for the changes I just made; that's accomplished by simply deleting the existing job. After a few minutes a new job will be spawned automagically. I'll just watch for the new job to be created:
|
I'll also need to restart the `pinniped-post-deploy-job` job to account for the changes I just made; that's accomplished by simply deleting the existing job. After a few minutes a new job will be spawned automagically. I'll just watch for the new job to be created:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n pinniped-supervisor delete jobs pinniped-post-deploy-job
|
kubectl -n pinniped-supervisor delete jobs pinniped-post-deploy-job # [tl! .cmd]
|
||||||
job.batch "pinniped-post-deploy-job" deleted
|
job.batch "pinniped-post-deploy-job" deleted # [tl! .nocopy]
|
||||||
|
```
|
||||||
|
|
||||||
❯ kubectl get jobs -A -w
|
```shell
|
||||||
NAMESPACE NAME COMPLETIONS DURATION AGE
|
kubectl get jobs -A -w # [tl! cmd]
|
||||||
|
NAMESPACE NAME COMPLETIONS DURATION AGE # [tl! .nocopy:4]
|
||||||
pinniped-supervisor pinniped-post-deploy-job 0/1 0s
|
pinniped-supervisor pinniped-post-deploy-job 0/1 0s
|
||||||
pinniped-supervisor pinniped-post-deploy-job 0/1 0s
|
pinniped-supervisor pinniped-post-deploy-job 0/1 0s
|
||||||
pinniped-supervisor pinniped-post-deploy-job 0/1 0s 0s
|
pinniped-supervisor pinniped-post-deploy-job 0/1 0s 0s
|
||||||
|
@ -248,6 +257,7 @@ Right now, I've got all the necessary components to support LDAPS authentication
|
||||||
|
|
||||||
I'll toss this into a file I'll call `tanzu-admins-crb.yaml`:
|
I'll toss this into a file I'll call `tanzu-admins-crb.yaml`:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -267,24 +277,24 @@ I have a group in Active Directory called `Tanzu-Admins` which contains a group
|
||||||
Once applied, users within that group will be granted the `cluster-admin` role[^roles].
|
Once applied, users within that group will be granted the `cluster-admin` role[^roles].
|
||||||
|
|
||||||
Let's do it:
|
Let's do it:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f tanzu-admins-crb.yaml
|
kubectl apply -f tanzu-admins-crb.yaml # [tl! .cmd]
|
||||||
clusterrolebinding.rbac.authorization.k8s.io/tanzu-admins created
|
clusterrolebinding.rbac.authorization.k8s.io/tanzu-admins created # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
Thus far, I've been using the default administrator context to interact with the cluster. Now it's time to switch to the non-admin context:
|
Thus far, I've been using the default administrator context to interact with the cluster. Now it's time to switch to the non-admin context:
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu management-cluster kubeconfig get
|
tanzu management-cluster kubeconfig get # [tl! .cmd]
|
||||||
You can now access the cluster by running 'kubectl config use-context tanzu-cli-tce-mgmt@tce-mgmt'
|
You can now access the cluster by running 'kubectl config use-context tanzu-cli-tce-mgmt@tce-mgmt' # [tl! .nocopy:1]
|
||||||
|
|
||||||
❯ kubectl config use-context tanzu-cli-tce-mgmt@tce-mgmt
|
kubectl config use-context tanzu-cli-tce-mgmt@tce-mgmt # [tl! .cmd]
|
||||||
Switched to context "tanzu-cli-tce-mgmt@tce-mgmt".
|
Switched to context "tanzu-cli-tce-mgmt@tce-mgmt". # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
After assuming the non-admin context, the next time I try to interact with the cluster it should kick off the LDAPS authentication process. It won't look like anything is happening in the terminal:
|
After assuming the non-admin context, the next time I try to interact with the cluster it should kick off the LDAPS authentication process. It won't look like anything is happening in the terminal:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl get nodes
|
kubectl get nodes # [tl! .cmd]
|
||||||
|
# [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
But it will shortly spawn a browser page prompting me to log in:
|
But it will shortly spawn a browser page prompting me to log in:
|
||||||
|
@ -294,9 +304,9 @@ Doing so successfully will yield:
|
||||||
![Dex login success!](dex_login_success.png)
|
![Dex login success!](dex_login_success.png)
|
||||||
|
|
||||||
And the `kubectl` command will return the expected details:
|
And the `kubectl` command will return the expected details:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl get nodes
|
kubectl get nodes # [tl! .cmd]
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION # [tl! .nocopy:2]
|
||||||
tce-mgmt-control-plane-v8l8r Ready control-plane,master 29h v1.21.5+vmware.1
|
tce-mgmt-control-plane-v8l8r Ready control-plane,master 29h v1.21.5+vmware.1
|
||||||
tce-mgmt-md-0-847db9ddc-5bwjs Ready <none> 28h v1.21.5+vmware.1
|
tce-mgmt-md-0-847db9ddc-5bwjs Ready <none> 28h v1.21.5+vmware.1
|
||||||
```
|
```
|
||||||
|
@ -318,9 +328,9 @@ Other users hoping to work with a Tanzu Community Edition cluster will also need
|
||||||
At this point, I've only configured authentication for the management cluster - not the workload cluster. The TCE community docs cover what's needed to make this configuration available in the workload cluster as well [here](https://tanzucommunityedition.io/docs/latest/vsphere-ldap-config/#configuration-steps-on-the-workload-cluster). [As before](/tanzu-community-edition-k8s-homelab/#workload-cluster), I created the deployment YAML for the workload cluster by copying the management cluster's deployment YAML and changing the `CLUSTER_NAME` and `VSPHERE_CONTROL_PLANE_ENDPOINT` values accordingly. This time I also deleted all of the `LDAP_*` and `OIDC_*` lines, but made sure to preserve the `IDENTITY_MANAGEMENT_TYPE: ldap` one.
|
At this point, I've only configured authentication for the management cluster - not the workload cluster. The TCE community docs cover what's needed to make this configuration available in the workload cluster as well [here](https://tanzucommunityedition.io/docs/latest/vsphere-ldap-config/#configuration-steps-on-the-workload-cluster). [As before](/tanzu-community-edition-k8s-homelab/#workload-cluster), I created the deployment YAML for the workload cluster by copying the management cluster's deployment YAML and changing the `CLUSTER_NAME` and `VSPHERE_CONTROL_PLANE_ENDPOINT` values accordingly. This time I also deleted all of the `LDAP_*` and `OIDC_*` lines, but made sure to preserve the `IDENTITY_MANAGEMENT_TYPE: ldap` one.
|
||||||
|
|
||||||
I was then able to deploy the workload cluster with:
|
I was then able to deploy the workload cluster with:
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu cluster create --file tce-work-deploy.yaml
|
tanzu cluster create --file tce-work-deploy.yaml # [tl! .cmd]
|
||||||
Validating configuration...
|
Validating configuration... # [tl! .nocopy:start]
|
||||||
Creating workload cluster 'tce-work'...
|
Creating workload cluster 'tce-work'...
|
||||||
Waiting for cluster to be initialized...
|
Waiting for cluster to be initialized...
|
||||||
cluster control plane is still being initialized: WaitingForControlPlane
|
cluster control plane is still being initialized: WaitingForControlPlane
|
||||||
|
@ -329,35 +339,35 @@ Waiting for cluster nodes to be available...
|
||||||
Waiting for addons installation...
|
Waiting for addons installation...
|
||||||
Waiting for packages to be up and running...
|
Waiting for packages to be up and running...
|
||||||
|
|
||||||
Workload cluster 'tce-work' created
|
Workload cluster 'tce-work' created # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Access the admin context:
|
Access the admin context:
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu cluster kubeconfig get --admin tce-work
|
tanzu cluster kubeconfig get --admin tce-work # [tl! .cmd]
|
||||||
Credentials of cluster 'tce-work' have been saved
|
Credentials of cluster 'tce-work' have been saved # [tl! .nocopy:2]
|
||||||
You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work'
|
You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work'
|
||||||
|
|
||||||
❯ kubectl config use-context tce-work-admin@tce-work
|
kubectl config use-context tce-work-admin@tce-work # [tl! .cmd]
|
||||||
Switched to context "tce-work-admin@tce-work".
|
Switched to context "tce-work-admin@tce-work". # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
Apply the same ClusterRoleBinding from before[^crb]:
|
Apply the same ClusterRoleBinding from before[^crb]:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f tanzu-admins-crb.yaml
|
kubectl apply -f tanzu-admins-crb.yaml # [tl! .cmd]
|
||||||
clusterrolebinding.rbac.authorization.k8s.io/tanzu-admins created
|
clusterrolebinding.rbac.authorization.k8s.io/tanzu-admins created # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
And finally switch to the non-admin context and log in with my AD account:
|
And finally switch to the non-admin context and log in with my AD account:
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu cluster kubeconfig get tce-work
|
tanzu cluster kubeconfig get tce-work # [tl! .cmd]
|
||||||
ℹ You can now access the cluster by running 'kubectl config use-context tanzu-cli-tce-work@tce-work'
|
ℹ You can now access the cluster by running 'kubectl config use-context tanzu-cli-tce-work@tce-work' # [tl! .nocopy:1]
|
||||||
|
|
||||||
❯ kubectl config use-context tanzu-cli-tce-work@tce-work
|
kubectl config use-context tanzu-cli-tce-work@tce-work # [tl! .cmd]
|
||||||
Switched to context "tanzu-cli-tce-work@tce-work".
|
Switched to context "tanzu-cli-tce-work@tce-work". # [tl! .nocopy:1]
|
||||||
|
|
||||||
❯ kubectl get nodes
|
kubectl get nodes # [tl! .cmd]
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION # [tl! .nocopy:2]
|
||||||
tce-work-control-plane-zts6r Ready control-plane,master 12m v1.21.5+vmware.1
|
tce-work-control-plane-zts6r Ready control-plane,master 12m v1.21.5+vmware.1
|
||||||
tce-work-md-0-bcfdc4d79-vn9xb Ready <none> 11m v1.21.5+vmware.1
|
tce-work-md-0-bcfdc4d79-vn9xb Ready <none> 11m v1.21.5+vmware.1
|
||||||
```
|
```
|
||||||
|
@ -376,9 +386,9 @@ It took me quite a bit of trial and error to get this far and (being a k8s novic
|
||||||
#### Checking and modifying `dex` configuration
|
#### Checking and modifying `dex` configuration
|
||||||
I had a lot of trouble figuring out how to correctly format the `member:1.2.840.113556.1.4.1941:` attribute in the LDAPS config so that it wouldn't get split into multiple attributes due to the trailing colon - and it took me forever to discover that was even the issue. What eventually did the trick for me was learning that I could look at (and modify!) the configuration for the `dex` app with:
|
I had a lot of trouble figuring out how to correctly format the `member:1.2.840.113556.1.4.1941:` attribute in the LDAPS config so that it wouldn't get split into multiple attributes due to the trailing colon - and it took me forever to discover that was even the issue. What eventually did the trick for me was learning that I could look at (and modify!) the configuration for the `dex` app with:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n tanzu-system-auth edit configmaps dex
|
kubectl -n tanzu-system-auth edit configmaps dex # [tl! .cmd]
|
||||||
[...]
|
[...] # [tl! .nocopy:start]
|
||||||
groupSearch:
|
groupSearch:
|
||||||
baseDN: OU=LAB,DC=lab,DC=bowdre,DC=net
|
baseDN: OU=LAB,DC=lab,DC=bowdre,DC=net
|
||||||
filter: (objectClass=group)
|
filter: (objectClass=group)
|
||||||
|
@ -388,7 +398,7 @@ I had a lot of trouble figuring out how to correctly format the `member:1.2.840
|
||||||
- userAttr: DN
|
- userAttr: DN
|
||||||
groupAttr: 'member:1.2.840.113556.1.4.1941:'
|
groupAttr: 'member:1.2.840.113556.1.4.1941:'
|
||||||
host: win01.lab.bowdre.net:636
|
host: win01.lab.bowdre.net:636
|
||||||
[...]
|
[...] # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
This let me make changes on the fly until I got a working configuration and then work backwards from there to format the initial input correctly.
|
This let me make changes on the fly until I got a working configuration and then work backwards from there to format the initial input correctly.
|
||||||
|
@ -396,13 +406,13 @@ This let me make changes on the fly until I got a working configuration and then
|
||||||
#### Reviewing `dex` logs
|
#### Reviewing `dex` logs
|
||||||
Authentication attempts (at least on the LDAPS side of things) will show up in the logs for the `dex` pod running in the `tanzu-system-auth` namespace. This is a great place to look to see if the user isn't being found, credentials are invalid, or the groups aren't being enumerated correctly:
|
Authentication attempts (at least on the LDAPS side of things) will show up in the logs for the `dex` pod running in the `tanzu-system-auth` namespace. This is a great place to look to see if the user isn't being found, credentials are invalid, or the groups aren't being enumerated correctly:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n tanzu-system-auth get pods
|
kubectl -n tanzu-system-auth get pods # [tl! .cmd]
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE # [tl! .nocopy:2]
|
||||||
dex-7bf4f5d4d9-k4jfl 1/1 Running 0 40h
|
dex-7bf4f5d4d9-k4jfl 1/1 Running 0 40h
|
||||||
|
|
||||||
❯ kubectl -n tanzu-system-auth logs dex-7bf4f5d4d9-k4jfl
|
kubectl -n tanzu-system-auth logs dex-7bf4f5d4d9-k4jfl # [tl! .cmd]
|
||||||
# no such user
|
# no such user # [tl! .nocopy:start]
|
||||||
{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=person)(sAMAccountName=johnny))","time":"2022-03-06T22:29:57Z"}
|
{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=person)(sAMAccountName=johnny))","time":"2022-03-06T22:29:57Z"}
|
||||||
{"level":"error","msg":"ldap: no results returned for filter: \"(\u0026(objectClass=person)(sAMAccountName=johnny))\"","time":"2022-03-06T22:29:57Z"}
|
{"level":"error","msg":"ldap: no results returned for filter: \"(\u0026(objectClass=person)(sAMAccountName=johnny))\"","time":"2022-03-06T22:29:57Z"}
|
||||||
#invalid password
|
#invalid password
|
||||||
|
@ -413,15 +423,15 @@ dex-7bf4f5d4d9-k4jfl 1/1 Running 0 40h
|
||||||
{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=person)(sAMAccountName=john))","time":"2022-03-06T22:31:21Z"}
|
{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=person)(sAMAccountName=john))","time":"2022-03-06T22:31:21Z"}
|
||||||
{"level":"info","msg":"username \"john\" mapped to entry CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net","time":"2022-03-06T22:31:21Z"}
|
{"level":"info","msg":"username \"john\" mapped to entry CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net","time":"2022-03-06T22:31:21Z"}
|
||||||
{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=group)(member:1.2.840.113556.1.4.1941:=CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net))","time":"2022-03-06T22:31:21Z"}
|
{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=group)(member:1.2.840.113556.1.4.1941:=CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net))","time":"2022-03-06T22:31:21Z"}
|
||||||
{"level":"info","msg":"login successful: connector \"ldap\", username=\"john\", preferred_username=\"\", email=\"CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net\", groups=[\"vRA-Admins\" \"Tanzu-Admins\"]","time":"2022-03-06T22:31:21Z"}
|
{"level":"info","msg":"login successful: connector \"ldap\", username=\"john\", preferred_username=\"\", email=\"CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net\", groups=[\"vRA-Admins\" \"Tanzu-Admins\"]","time":"2022-03-06T22:31:21Z"} # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Clearing pinniped sessions
|
#### Clearing pinniped sessions
|
||||||
I couldn't figure out an elegant way to log out so that I could try authenticating as a different user, but I did discover that information about authenticated sessions get stored in `~/.config/tanzu/pinniped/sessions.yaml`. The sessions expired after a while but until that happens I'm able to keep on interacting with `kubectl` - and not given an option to re-authenticate even if I wanted to.
|
I couldn't figure out an elegant way to log out so that I could try authenticating as a different user, but I did discover that information about authenticated sessions get stored in `~/.config/tanzu/pinniped/sessions.yaml`. The sessions expired after a while but until that happens I'm able to keep on interacting with `kubectl` - and not given an option to re-authenticate even if I wanted to.
|
||||||
|
|
||||||
So in lieu of a handy logout option, I was able to remove the cached sessions by deleting the file:
|
So in lieu of a handy logout option, I was able to remove the cached sessions by deleting the file:
|
||||||
```bash
|
```shell
|
||||||
rm ~/.config/tanzu/pinniped/sessions.yaml
|
rm ~/.config/tanzu/pinniped/sessions.yaml # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
That let me use `kubectl get nodes` to trigger the authentication prompt again.
|
That let me use `kubectl get nodes` to trigger the authentication prompt again.
|
||||||
|
|
|
@ -25,7 +25,7 @@ When I [set up my Tanzu Community Edition environment](/tanzu-community-edition-
|
||||||
|
|
||||||
The Tanzu CLI actually makes that pretty easy - once I figured out the appropriate incantation. I just needed to use the `tanzu management-cluster kubeconfig get` command on my Linux VM to export the `kubeconfig` of my management (`tce-mgmt`) cluster to a file:
|
The Tanzu CLI actually makes that pretty easy - once I figured out the appropriate incantation. I just needed to use the `tanzu management-cluster kubeconfig get` command on my Linux VM to export the `kubeconfig` of my management (`tce-mgmt`) cluster to a file:
|
||||||
```shell
|
```shell
|
||||||
tanzu management-cluster kubeconfig get --admin --export-file tce-mgmt-kubeconfig.yaml
|
tanzu management-cluster kubeconfig get --admin --export-file tce-mgmt-kubeconfig.yaml # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
I then used `scp` to pull the file from the VM into my local Linux environment, and proceeded to [install `kubectl`](/tanzu-community-edition-k8s-homelab/#kubectl-binary) and the [`tanzu` CLI](/tanzu-community-edition-k8s-homelab/#tanzu-cli) (making sure to also [enable shell auto-completion](/enable-tanzu-cli-auto-completion-bash-zsh/) along the way!).
|
I then used `scp` to pull the file from the VM into my local Linux environment, and proceeded to [install `kubectl`](/tanzu-community-edition-k8s-homelab/#kubectl-binary) and the [`tanzu` CLI](/tanzu-community-edition-k8s-homelab/#tanzu-cli) (making sure to also [enable shell auto-completion](/enable-tanzu-cli-auto-completion-bash-zsh/) along the way!).
|
||||||
|
@ -33,8 +33,9 @@ I then used `scp` to pull the file from the VM into my local Linux environment,
|
||||||
Now I'm ready to import the configuration locally with `tanzu login` on my Chromebook:
|
Now I'm ready to import the configuration locally with `tanzu login` on my Chromebook:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
❯ tanzu login --kubeconfig ~/projects/tanzu-homelab/tanzu-setup/tce-mgmt-kubeconfig.yaml --context tce-mgmt-admin@tce-mgmt --name tce-mgmt
|
tanzu login --kubeconfig ~/projects/tanzu-homelab/tanzu-setup/tce-mgmt-kubeconfig.yaml \ # [tl! .cmd]
|
||||||
✔ successfully logged in to management cluster using the kubeconfig tce-mgmt
|
--context tce-mgmt-admin@tce-mgmt --name tce-mgmt
|
||||||
|
✔ successfully logged in to management cluster using the kubeconfig tce-mgmt # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
{{% notice tip "Use the absolute path" %}}
|
{{% notice tip "Use the absolute path" %}}
|
||||||
|
@ -43,12 +44,12 @@ Pass in the full path to the exported kubeconfig file. This will help the Tanzu
|
||||||
|
|
||||||
Even though that's just importing the management cluster it actually grants access to both the management and workload clusters:
|
Even though that's just importing the management cluster it actually grants access to both the management and workload clusters:
|
||||||
```shell
|
```shell
|
||||||
❯ tanzu cluster list
|
tanzu cluster list # [tl! .cmd]
|
||||||
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES PLAN
|
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES PLAN # [tl! .nocopy:2]
|
||||||
tce-work default running 1/1 1/1 v1.21.2+vmware.1 <none> dev
|
tce-work default running 1/1 1/1 v1.21.2+vmware.1 <none> dev
|
||||||
|
|
||||||
❯ tanzu cluster get tce-work
|
tanzu cluster get tce-work # [tl! .cmd]
|
||||||
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES
|
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES # [tl! .nocopy:start]
|
||||||
tce-work default running 1/1 1/1 v1.21.2+vmware.1 <none>
|
tce-work default running 1/1 1/1 v1.21.2+vmware.1 <none>
|
||||||
ℹ
|
ℹ
|
||||||
|
|
||||||
|
@ -62,9 +63,9 @@ NAME READY SEVERITY RE
|
||||||
└─Workers
|
└─Workers
|
||||||
└─MachineDeployment/tce-work-md-0
|
└─MachineDeployment/tce-work-md-0
|
||||||
└─Machine/tce-work-md-0-687444b744-crc9q True 24h
|
└─Machine/tce-work-md-0-687444b744-crc9q True 24h
|
||||||
|
# [tl! .nocopy:end]
|
||||||
❯ tanzu management-cluster get
|
tanzu management-cluster get # [tl! .cmd]
|
||||||
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES
|
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES # [tl! .nocopy:start]
|
||||||
tce-mgmt tkg-system running 1/1 1/1 v1.21.2+vmware.1 management
|
tce-mgmt tkg-system running 1/1 1/1 v1.21.2+vmware.1 management
|
||||||
|
|
||||||
|
|
||||||
|
@ -86,29 +87,29 @@ Providers:
|
||||||
capi-kubeadm-bootstrap-system bootstrap-kubeadm BootstrapProvider kubeadm v0.3.23
|
capi-kubeadm-bootstrap-system bootstrap-kubeadm BootstrapProvider kubeadm v0.3.23
|
||||||
capi-kubeadm-control-plane-system control-plane-kubeadm ControlPlaneProvider kubeadm v0.3.23
|
capi-kubeadm-control-plane-system control-plane-kubeadm ControlPlaneProvider kubeadm v0.3.23
|
||||||
capi-system cluster-api CoreProvider cluster-api v0.3.23
|
capi-system cluster-api CoreProvider cluster-api v0.3.23
|
||||||
capv-system infrastructure-vsphere InfrastructureProvider vsphere v0.7.10
|
capv-system infrastructure-vsphere InfrastructureProvider vsphere v0.7.10 # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
And I can then tell `kubectl` about the two clusters:
|
And I can then tell `kubectl` about the two clusters:
|
||||||
```shell
|
```shell
|
||||||
❯ tanzu management-cluster kubeconfig get tce-mgmt --admin
|
tanzu management-cluster kubeconfig get tce-mgmt --admin # [tl! .cmd]
|
||||||
Credentials of cluster 'tce-mgmt' have been saved
|
Credentials of cluster 'tce-mgmt' have been saved # [tl! .nocopy:2]
|
||||||
You can now access the cluster by running 'kubectl config use-context tce-mgmt-admin@tce-mgmt'
|
You can now access the cluster by running 'kubectl config use-context tce-mgmt-admin@tce-mgmt'
|
||||||
|
|
||||||
❯ tanzu cluster kubeconfig get tce-work --admin
|
tanzu cluster kubeconfig get tce-work --admin # [tl! .cmd]
|
||||||
Credentials of cluster 'tce-work' have been saved
|
Credentials of cluster 'tce-work' have been saved # [tl! .nocopy:1]
|
||||||
You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work'
|
You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work'
|
||||||
```
|
```
|
||||||
|
|
||||||
And sure enough, there are my contexts:
|
And sure enough, there are my contexts:
|
||||||
```shell
|
```shell
|
||||||
❯ kubectl config get-contexts
|
kubectl config get-contexts # [tl! .cmd]
|
||||||
CURRENT NAME CLUSTER AUTHINFO NAMESPACE
|
CURRENT NAME CLUSTER AUTHINFO NAMESPACE # [tl! .nocopy:3]
|
||||||
tce-mgmt-admin@tce-mgmt tce-mgmt tce-mgmt-admin
|
tce-mgmt-admin@tce-mgmt tce-mgmt tce-mgmt-admin
|
||||||
* tce-work-admin@tce-work tce-work tce-work-admin
|
* tce-work-admin@tce-work tce-work tce-work-admin
|
||||||
|
|
||||||
❯ kubectl get nodes -o wide
|
kubectl get nodes -o wide # [tl! .cmd]
|
||||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME # [tl! .nocopy:2]
|
||||||
tce-work-control-plane-vc2pb Ready control-plane,master 23h v1.21.2+vmware.1 192.168.1.132 192.168.1.132 VMware Photon OS/Linux 4.19.198-1.ph3 containerd://1.4.6
|
tce-work-control-plane-vc2pb Ready control-plane,master 23h v1.21.2+vmware.1 192.168.1.132 192.168.1.132 VMware Photon OS/Linux 4.19.198-1.ph3 containerd://1.4.6
|
||||||
tce-work-md-0-687444b744-crc9q Ready <none> 23h v1.21.2+vmware.1 192.168.1.133 192.168.1.133 VMware Photon OS/Linux 4.19.198-1.ph3 containerd://1.4.6
|
tce-work-md-0-687444b744-crc9q Ready <none> 23h v1.21.2+vmware.1 192.168.1.133 192.168.1.133 VMware Photon OS/Linux 4.19.198-1.ph3 containerd://1.4.6
|
||||||
```
|
```
|
||||||
|
|
|
@ -18,11 +18,12 @@ I can, and here's how I do it.
|
||||||
### The Script
|
### The Script
|
||||||
The following Powershell script will let you define a list of vCenters to be accessed, securely store your credentials for each vCenter, log in to every vCenter with a single command, and also close the connections when they're no longer needed. It's also a great starting point for any other custom functions you'd like to incorporate into your PowerCLI sessions.
|
The following Powershell script will let you define a list of vCenters to be accessed, securely store your credentials for each vCenter, log in to every vCenter with a single command, and also close the connections when they're no longer needed. It's also a great starting point for any other custom functions you'd like to incorporate into your PowerCLI sessions.
|
||||||
```powershell
|
```powershell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# PowerCLI_Custom_Functions.ps1
|
# PowerCLI_Custom_Functions.ps1
|
||||||
# Usage:
|
# Usage:
|
||||||
# 0) Edit $vCenterList to reference the vCenters in your environment.
|
# 0) Edit $vCenterList to reference the vCenters in your environment.
|
||||||
# 1) Call 'Update-Credentials' to create/update a ViCredentialStoreItem to securely store your username and password.
|
# 1) Call 'Update-Credentials' to create/update a ViCredentialStoreItem to securely store your username and password.
|
||||||
# 2) Call 'Connect-vCenters' to open simultaneously connections to all the vCenters in your environment.
|
# 2) Call 'Connect-vCenters' to open simultaneously connections to all the vCenters in your environment.
|
||||||
# 3) Do PowerCLI things.
|
# 3) Do PowerCLI things.
|
||||||
# 4) Call 'Disconnect-vCenters' to cleanly close all ViServer connections because housekeeping.
|
# 4) Call 'Disconnect-vCenters' to cleanly close all ViServer connections because housekeeping.
|
||||||
Import-Module VMware.PowerCLI
|
Import-Module VMware.PowerCLI
|
||||||
|
@ -54,6 +55,6 @@ powershell.exe -NoExit -Command ". C:\Scripts\PowerCLI_Custom_Functions.ps1"
|
||||||
### The Usage
|
### The Usage
|
||||||
Now just use that shortcut to open up PowerCLI when you wish to do things. The custom functions will be loaded and waiting for you.
|
Now just use that shortcut to open up PowerCLI when you wish to do things. The custom functions will be loaded and waiting for you.
|
||||||
1. Start by running `Update-Credentials`. It will prompt you for the username+password needed to log into each vCenter listed in `$vCenterList`. These can be the same or different accounts, but you will need to enter the credentials for each vCenter since they get stored in a separate `ViCredentialStoreItem`. You'll also run this function again if you need to change the password(s) in the future.
|
1. Start by running `Update-Credentials`. It will prompt you for the username+password needed to log into each vCenter listed in `$vCenterList`. These can be the same or different accounts, but you will need to enter the credentials for each vCenter since they get stored in a separate `ViCredentialStoreItem`. You'll also run this function again if you need to change the password(s) in the future.
|
||||||
2. Log in to all the things by running `Connect-vCenters`.
|
2. Log in to all the things by running `Connect-vCenters`.
|
||||||
3. Do your work.
|
3. Do your work.
|
||||||
4. When you're finished, be sure to call `Disconnect-vCenters` so you don't leave sessions open in the background.
|
4. When you're finished, be sure to call `Disconnect-vCenters` so you don't leave sessions open in the background.
|
||||||
|
|
|
@ -29,10 +29,10 @@ I start off by heading to [tenable.com/products/nessus/nessus-essentials](https:
|
||||||
|
|
||||||
Tenable provides an [example `docker-compose.yml`](https://community.tenable.com/s/article/Deploy-Nessus-docker-image-with-docker-compose) to make it easy to get started:
|
Tenable provides an [example `docker-compose.yml`](https://community.tenable.com/s/article/Deploy-Nessus-docker-image-with-docker-compose) to make it easy to get started:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
version: '3.1'
|
version: '3.1'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
|
||||||
nessus:
|
nessus:
|
||||||
image: tenableofficial/nessus
|
image: tenableofficial/nessus
|
||||||
restart: always
|
restart: always
|
||||||
|
@ -47,6 +47,7 @@ services:
|
||||||
|
|
||||||
I can use that knowledge to craft something I can deploy on Kubernetes:
|
I can use that knowledge to craft something I can deploy on Kubernetes:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -92,19 +93,19 @@ spec:
|
||||||
containerPort: 8834
|
containerPort: 8834
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that I'm configuring the `LoadBalancer` to listen on port `443` and route traffic to the pod on port `8834` so that I don't have to remember to enter an oddball port number when I want to connect to the web interface.
|
Note that I'm configuring the `LoadBalancer` to listen on port `443` and route traffic to the pod on port `8834` so that I don't have to remember to enter an oddball port number when I want to connect to the web interface.
|
||||||
|
|
||||||
And now I can just apply the file:
|
And now I can just apply the file:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f nessus.yaml
|
kubectl apply -f nessus.yaml # [tl! .cmd]
|
||||||
service/nessus created
|
service/nessus created # [tl! .nocopy:1]
|
||||||
deployment.apps/nessus created
|
deployment.apps/nessus created
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll give it a moment or two to deploy and then check on the service to figure out what IP I need to use to connect:
|
I'll give it a moment or two to deploy and then check on the service to figure out what IP I need to use to connect:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl get svc/nessus
|
kubectl get svc/nessus # [tl! .cmd]
|
||||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # [tl! .nocopy:1]
|
||||||
nessus LoadBalancer 100.67.16.51 192.168.1.79 443:31260/TCP 57s
|
nessus LoadBalancer 100.67.16.51 192.168.1.79 443:31260/TCP 57s
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -114,7 +115,7 @@ I point my browser to `https://192.168.1.79` and see that it's a great time for
|
||||||
Eventually that gets replaced with a login screen, where I can authenticate using the username and password specified earlier in the YAML.
|
Eventually that gets replaced with a login screen, where I can authenticate using the username and password specified earlier in the YAML.
|
||||||
![Nessus login screen](nessus_login.png)
|
![Nessus login screen](nessus_login.png)
|
||||||
|
|
||||||
After logging in, I get prompted to run a discovery scan to identify hosts on the network. There's a note that hosts revealed by the discovery scan will *not* count against my 16-host limit unless/until I select individual hosts for more detailed scans. That's good to know for future efforts, but for now I'm focused on just scanning my one vCenter server so I dismiss the prompt.
|
After logging in, I get prompted to run a discovery scan to identify hosts on the network. There's a note that hosts revealed by the discovery scan will *not* count against my 16-host limit unless/until I select individual hosts for more detailed scans. That's good to know for future efforts, but for now I'm focused on just scanning my one vCenter server so I dismiss the prompt.
|
||||||
|
|
||||||
What I *am* interested in is scanning my vCenter for the Log4Shell vulnerability so I'll hit the friendly blue **New Scan** button at the top of the *Scans* page to create my scan. That shows me a list of *Scan Templates*:
|
What I *am* interested in is scanning my vCenter for the Log4Shell vulnerability so I'll hit the friendly blue **New Scan** button at the top of the *Scans* page to create my scan. That shows me a list of *Scan Templates*:
|
||||||
![Scan templates](scan_templates.png)
|
![Scan templates](scan_templates.png)
|
||||||
|
@ -142,4 +143,4 @@ And I can drill down into the vulnerability details:
|
||||||
|
|
||||||
This reveals a handful of findings related to old 1.x versions of Log4j (which went EOL in 2015 - yikes!) as well as [CVE-2021-44832](https://nvd.nist.gov/vuln/detail/CVE-2021-44832) Remote Code Execution vulnerability (which is resolved in Log4j 2.17.1), but the inclusion of Log4j 2.17.0 in vCenter 7.0U3c *was* sufficient to close the highly-publicized [CVE-2021-44228](https://nvd.nist.gov/vuln/detail/CVE-2021-44228) Log4Shell vulnerability. Hopefully VMware can get these other Log4j vulnerabilities taken care of in another upcoming vCenter release.
|
This reveals a handful of findings related to old 1.x versions of Log4j (which went EOL in 2015 - yikes!) as well as [CVE-2021-44832](https://nvd.nist.gov/vuln/detail/CVE-2021-44832) Remote Code Execution vulnerability (which is resolved in Log4j 2.17.1), but the inclusion of Log4j 2.17.0 in vCenter 7.0U3c *was* sufficient to close the highly-publicized [CVE-2021-44228](https://nvd.nist.gov/vuln/detail/CVE-2021-44228) Log4Shell vulnerability. Hopefully VMware can get these other Log4j vulnerabilities taken care of in another upcoming vCenter release.
|
||||||
|
|
||||||
So there's that curiosity satisfied, and now I've got a handy new tool to play with in my lab.
|
So there's that curiosity satisfied, and now I've got a handy new tool to play with in my lab.
|
|
@ -22,13 +22,14 @@ tags:
|
||||||
comment: true # Disable comment if false.
|
comment: true # Disable comment if false.
|
||||||
---
|
---
|
||||||
|
|
||||||
I recently needed to export a list of all the Linux VMs in a rather large vSphere environment spanning multiple vCenters (and the entire globe), and I wanted to include information about which virtual datacenter each VM lived in to make it easier to map VMs to their physical location.
|
I recently needed to export a list of all the Linux VMs in a rather large vSphere environment spanning multiple vCenters (and the entire globe), and I wanted to include information about which virtual datacenter each VM lived in to make it easier to map VMs to their physical location.
|
||||||
|
|
||||||
I've got a [`Connect-vCenters` function](/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/) that I use to quickly log into multiple vCenters at once. That then enables me to run a single query across the entire landscape - but what query? There isn't really a direct way to get datacenter information out of the results generated by `Get-VM`; I could run an additional `Get-Datacenter` query against each returned VM object but that doesn't sound very efficient.
|
I've got a [`Connect-vCenters` function](/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/) that I use to quickly log into multiple vCenters at once. That then enables me to run a single query across the entire landscape - but what query? There isn't really a direct way to get datacenter information out of the results generated by `Get-VM`; I could run an additional `Get-Datacenter` query against each returned VM object but that doesn't sound very efficient.
|
||||||
|
|
||||||
What I came up with is using `Get-Datacenter` to enumerate each virtual datacenter, and then list the VMs matching my query within:
|
What I came up with is using `Get-Datacenter` to enumerate each virtual datacenter, and then list the VMs matching my query within:
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
$linuxVms = foreach( $datacenter in ( Get-Datacenter )) {
|
$linuxVms = foreach( $datacenter in ( Get-Datacenter )) {
|
||||||
Get-Datacenter $datacenter | Get-VM | Where { $_.ExtensionData.Config.GuestFullName -notmatch "win" -and $_.Name -notmatch "vcls" } | `
|
Get-Datacenter $datacenter | Get-VM | Where { $_.ExtensionData.Config.GuestFullName -notmatch "win" -and $_.Name -notmatch "vcls" } | `
|
||||||
Select @{ N="Datacenter";E={ $datacenter.Name }},
|
Select @{ N="Datacenter";E={ $datacenter.Name }},
|
||||||
|
|
|
@ -24,6 +24,7 @@ We've been working lately to use [HashiCorp Packer](https://www.packer.io/) to s
|
||||||
|
|
||||||
A coworker and I cobbled together a quick PowerShell solution which will download the files within a specified web URL to a designated directory (without recreating the nested folder structure):
|
A coworker and I cobbled together a quick PowerShell solution which will download the files within a specified web URL to a designated directory (without recreating the nested folder structure):
|
||||||
```powershell
|
```powershell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
$outputdir = 'C:\Scripts\Download\'
|
$outputdir = 'C:\Scripts\Download\'
|
||||||
$url = 'https://win01.lab.bowdre.net/stuff/files/'
|
$url = 'https://win01.lab.bowdre.net/stuff/files/'
|
||||||
|
|
||||||
|
@ -38,7 +39,7 @@ $WebResponse.Links | Select-Object -ExpandProperty href -Skip 1 | ForEach-Object
|
||||||
$baseUrl = $url.split('/') # ['https', '', 'win01.lab.bowdre.net', 'stuff', 'files']
|
$baseUrl = $url.split('/') # ['https', '', 'win01.lab.bowdre.net', 'stuff', 'files']
|
||||||
$baseUrl = $baseUrl[0,2] -join '//' # 'https://win01.lab.bowdre.net'
|
$baseUrl = $baseUrl[0,2] -join '//' # 'https://win01.lab.bowdre.net'
|
||||||
$fileUrl = '{0}{1}' -f $baseUrl.TrimEnd('/'), $_ # 'https://win01.lab.bowdre.net/stuff/files/filename.ext'
|
$fileUrl = '{0}{1}' -f $baseUrl.TrimEnd('/'), $_ # 'https://win01.lab.bowdre.net/stuff/files/filename.ext'
|
||||||
Invoke-WebRequest -Uri $fileUrl -OutFile $filePath
|
Invoke-WebRequest -Uri $fileUrl -OutFile $filePath
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ title: 'PSA: halt replication before snapshotting linked vCenters'
|
||||||
toc: false
|
toc: false
|
||||||
---
|
---
|
||||||
|
|
||||||
It's a good idea to take a snapshot of your virtual appliances before applying any updates, just in case. When you have multiple vCenter appliances operating in Enhanced Link Mode, though, it's important to make sure that the snapshots are in a consistent state. The vCenter `vmdird` service is responsible for continuously syncing data between the vCenters within a vSphere Single Sign-On (SSO) domain. Reverting to a snapshot where `vmdird`'s knowledge of the environment dramatically differed from that of the other vCenters could cause significant problems down the road or even result in having to rebuild a vCenter from scratch.
|
It's a good idea to take a snapshot of your virtual appliances before applying any updates, just in case. When you have multiple vCenter appliances operating in Enhanced Link Mode, though, it's important to make sure that the snapshots are in a consistent state. The vCenter `vmdird` service is responsible for continuously syncing data between the vCenters within a vSphere Single Sign-On (SSO) domain. Reverting to a snapshot where `vmdird`'s knowledge of the environment dramatically differed from that of the other vCenters could cause significant problems down the road or even result in having to rebuild a vCenter from scratch.
|
||||||
|
|
||||||
*(Yes, that's a lesson I learned the hard way - and warnings about that are tragically hard to come by from what I've seen. So I'm sharing my notes so that you can avoid making the same mistake.)*
|
*(Yes, that's a lesson I learned the hard way - and warnings about that are tragically hard to come by from what I've seen. So I'm sharing my notes so that you can avoid making the same mistake.)*
|
||||||
|
|
||||||
|
@ -20,34 +20,28 @@ Take these steps when you need to snapshot linked vCenters to avoid breaking rep
|
||||||
1. Open an SSH session to *all* the vCenters within the SSO domain.
|
1. Open an SSH session to *all* the vCenters within the SSO domain.
|
||||||
2. Log in and enter `shell` to access the shell on each vCenter.
|
2. Log in and enter `shell` to access the shell on each vCenter.
|
||||||
3. Verify that replication is healthy by running `/usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w [SSO_ADMIN_PASSWORD]` on each vCenter. You want to ensure that each host shows as available to all other hosts, and the message that `Partner is 0 changes behind.`:
|
3. Verify that replication is healthy by running `/usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w [SSO_ADMIN_PASSWORD]` on each vCenter. You want to ensure that each host shows as available to all other hosts, and the message that `Partner is 0 changes behind.`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
root@vcsa [ ~ ]# /usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w $ssoPass
|
/usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w $ssoPass # [tl! .cmd]
|
||||||
Partner: vcsa2.lab.bowdre.net
|
Partner: vcsa2.lab.bowdre.net # [tl! .nocopy:6]
|
||||||
Host available: Yes
|
Host available: Yes
|
||||||
Status available: Yes
|
Status available: Yes
|
||||||
My last change number: 9346
|
My last change number: 9346
|
||||||
Partner has seen my change number: 9346
|
Partner has seen my change number: 9346
|
||||||
Partner is 0 changes behind.
|
Partner is 0 changes behind. # [tl! highlight]
|
||||||
|
|
||||||
root@vcsa2 [ ~ ]# /usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w $ssoPass
|
/usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w $ssoPass # [tl! .cmd]
|
||||||
Partner: vcsa.lab.bowdre.net
|
Partner: vcsa.lab.bowdre.net # [tl! .nocopy:6]
|
||||||
Host available: Yes
|
Host available: Yes
|
||||||
Status available: Yes
|
Status available: Yes
|
||||||
My last change number: 9518
|
My last change number: 9518
|
||||||
Partner has seen my change number: 9518
|
Partner has seen my change number: 9518
|
||||||
Partner is 0 changes behind.
|
Partner is 0 changes behind. # [tl! highlight]
|
||||||
```
|
```
|
||||||
4. Stop `vmdird` on each vCenter by running `/bin/service-control --stop vmdird`:
|
4. Stop `vmdird` on each vCenter by running `/bin/service-control --stop vmdird`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
root@vcsa [ ~ ]# /bin/service-control --stop vmdird
|
/bin/service-control --stop vmdird # [tl! .cmd]
|
||||||
Operation not cancellable. Please wait for it to finish...
|
Operation not cancellable. Please wait for it to finish... # [tl! .nocopy:2]
|
||||||
Performing stop operation on service vmdird...
|
|
||||||
Successfully stopped service vmdird
|
|
||||||
|
|
||||||
root@vcsa2 [ ~ ]# /bin/service-control --stop vmdird
|
|
||||||
Operation not cancellable. Please wait for it to finish...
|
|
||||||
Performing stop operation on service vmdird...
|
Performing stop operation on service vmdird...
|
||||||
Successfully stopped service vmdird
|
Successfully stopped service vmdird
|
||||||
```
|
```
|
||||||
|
@ -55,13 +49,8 @@ Take these steps when you need to snapshot linked vCenters to avoid breaking rep
|
||||||
6. Start replication on each server again with `/bin/service-control --start vmdird`:
|
6. Start replication on each server again with `/bin/service-control --start vmdird`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
root@vcsa [ ~ ]# /bin/service-control --start vmdird
|
/bin/service-control --start vmdird # [tl! .cmd]
|
||||||
Operation not cancellable. Please wait for it to finish...
|
Operation not cancellable. Please wait for it to finish... # [tl! .nocopy]
|
||||||
Performing start operation on service vmdird...
|
|
||||||
Successfully started service vmdird
|
|
||||||
|
|
||||||
root@vcsa2 [ ~ ]# /bin/service-control --start vmdird
|
|
||||||
Operation not cancellable. Please wait for it to finish...
|
|
||||||
Performing start operation on service vmdird...
|
Performing start operation on service vmdird...
|
||||||
Successfully started service vmdird
|
Successfully started service vmdird
|
||||||
```
|
```
|
||||||
|
|
|
@ -38,6 +38,7 @@ If you've got any **Windows Server 2022** VMs with **[Secure Boot](https://docs.
|
||||||
|
|
||||||
I put together a quick PowerCLI query to help identify impacted VMs in my environment:
|
I put together a quick PowerCLI query to help identify impacted VMs in my environment:
|
||||||
```powershell
|
```powershell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
$secureBoot2022VMs = foreach($datacenter in (Get-Datacenter)) {
|
$secureBoot2022VMs = foreach($datacenter in (Get-Datacenter)) {
|
||||||
$datacenter | Get-VM |
|
$datacenter | Get-VM |
|
||||||
Where-Object {$_.Guest.OsFullName -Match 'Microsoft Windows Server 2022' -And $_.ExtensionData.Config.BootOptions.EfiSecureBootEnabled} |
|
Where-Object {$_.Guest.OsFullName -Match 'Microsoft Windows Server 2022' -And $_.ExtensionData.Config.BootOptions.EfiSecureBootEnabled} |
|
||||||
|
|
|
@ -11,7 +11,7 @@ title: Recreating Hashnode Series (Categories) in Jekyll on GitHub Pages
|
||||||
|
|
||||||
I recently [migrated this site](/virtually-potato-migrated-to-github-pages) from Hashnode to GitHub Pages, and I'm really getting into the flexibility and control that managing the content through Jekyll provides. So, naturally, after finalizing the move I got to work recreating Hashnode's "Series" feature, which lets you group posts together and highlight them as a collection. One of the things I liked about the Series setup was that I could control the order of the collected posts: my posts about [building out the vRA environment in my homelab](/series/vra8) are probably best consumed in chronological order (oldest to newest) since the newer posts build upon the groundwork laid by the older ones, while posts about my [other one-off projects](/series/projects) could really be enjoyed in any order.
|
I recently [migrated this site](/virtually-potato-migrated-to-github-pages) from Hashnode to GitHub Pages, and I'm really getting into the flexibility and control that managing the content through Jekyll provides. So, naturally, after finalizing the move I got to work recreating Hashnode's "Series" feature, which lets you group posts together and highlight them as a collection. One of the things I liked about the Series setup was that I could control the order of the collected posts: my posts about [building out the vRA environment in my homelab](/series/vra8) are probably best consumed in chronological order (oldest to newest) since the newer posts build upon the groundwork laid by the older ones, while posts about my [other one-off projects](/series/projects) could really be enjoyed in any order.
|
||||||
|
|
||||||
I quickly realized that if I were hosting this pretty much anywhere *other* than GitHub Pages I could simply leverage the [`jekyll-archives`](https://github.com/jekyll/jekyll-archives) plugin to manage this for me - but, alas, that's not one of the [plugins supported by the platform](https://pages.github.com/versions/). I needed to come up with my own solution, and being still quite new to Jekyll (and this whole website design thing in general) it took me a bit of fumbling to get it right.
|
I quickly realized that if I were hosting this pretty much anywhere *other* than GitHub Pages I could simply leverage the [`jekyll-archives`](https://github.com/jekyll/jekyll-archives) plugin to manage this for me - but, alas, that's not one of the [plugins supported by the platform](https://pages.github.com/versions/). I needed to come up with my own solution, and being still quite new to Jekyll (and this whole website design thing in general) it took me a bit of fumbling to get it right.
|
||||||
|
|
||||||
### Reviewing the theme-provided option
|
### Reviewing the theme-provided option
|
||||||
The Jekyll theme I'm using ([Minimal Mistakes](https://github.com/mmistakes/minimal-mistakes)) comes with [built-in support](https://mmistakes.github.io/mm-github-pages-starter/categories/) for a [category archive page](/series), which (like the [tags page](/tags)) displays all the categorized posts on a single page. Links at the top will let you jump to an appropriate anchor to start viewing the selected category, but it's not really an elegant way to display a single category.
|
The Jekyll theme I'm using ([Minimal Mistakes](https://github.com/mmistakes/minimal-mistakes)) comes with [built-in support](https://mmistakes.github.io/mm-github-pages-starter/categories/) for a [category archive page](/series), which (like the [tags page](/tags)) displays all the categorized posts on a single page. Links at the top will let you jump to an appropriate anchor to start viewing the selected category, but it's not really an elegant way to display a single category.
|
||||||
|
@ -19,6 +19,7 @@ The Jekyll theme I'm using ([Minimal Mistakes](https://github.com/mmistakes/mini
|
||||||
|
|
||||||
It's a start, though, so I took a few minutes to check out how it's being generated. The category archive page lives at [`_pages/category-archive.md`](https://raw.githubusercontent.com/mmistakes/mm-github-pages-starter/master/_pages/category-archive.md):
|
It's a start, though, so I took a few minutes to check out how it's being generated. The category archive page lives at [`_pages/category-archive.md`](https://raw.githubusercontent.com/mmistakes/mm-github-pages-starter/master/_pages/category-archive.md):
|
||||||
```markdown
|
```markdown
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
---
|
---
|
||||||
title: "Posts by Category"
|
title: "Posts by Category"
|
||||||
layout: categories
|
layout: categories
|
||||||
|
@ -30,8 +31,9 @@ author_profile: true
|
||||||
The `title` indicates what's going to be written in bold text at the top of the page, the `permalink` says that it will be accessible at `http://localhost/categories/`, and the nice little `author_profile` sidebar will appear on the left.
|
The `title` indicates what's going to be written in bold text at the top of the page, the `permalink` says that it will be accessible at `http://localhost/categories/`, and the nice little `author_profile` sidebar will appear on the left.
|
||||||
|
|
||||||
This page then calls the `categories` layout, which is defined in [`_layouts/categories.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_layouts/categories.html):
|
This page then calls the `categories` layout, which is defined in [`_layouts/categories.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_layouts/categories.html):
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %}---
|
# torchlight! {"lineNumbers": true}
|
||||||
|
---
|
||||||
layout: archive
|
layout: archive
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -81,39 +83,43 @@ I wanted my solution to preserve the formatting that's used by the theme elsewhe
|
||||||
### Defining a new layout
|
### Defining a new layout
|
||||||
I create a new file called `_layouts/series.html` which will define how these new series pages get rendered. It starts out just like the default `categories.html` one:
|
I create a new file called `_layouts/series.html` which will define how these new series pages get rendered. It starts out just like the default `categories.html` one:
|
||||||
|
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %}---
|
# torchlight! {"lineNumbers": true}
|
||||||
|
---
|
||||||
layout: archive
|
layout: archive
|
||||||
---
|
---
|
||||||
|
|
||||||
{{ content }}{% endraw %}
|
{{ content }}
|
||||||
```
|
```
|
||||||
|
|
||||||
That `{{ content }}` block will let me define text to appear above the list of articles - very handy. Much of the original `categories.html` code has to do with iterating through the list of categories. I won't need that, though, so I'll jump straight to setting what layout the entries on this page will use:
|
That `{{ content }}` block will let me define text to appear above the list of articles - very handy. Much of the original `categories.html` code has to do with iterating through the list of categories. I won't need that, though, so I'll jump straight to setting what layout the entries on this page will use:
|
||||||
```liquid
|
```jinja-html
|
||||||
{% assign entries_layout = page.entries_layout | default: 'list' %}
|
{% assign entries_layout = page.entries_layout | default: 'list' %}
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll be including two custom variables in the [Front Matter](https://jekyllrb.com/docs/front-matter/) for my category pages: `tag` to specify what category to filter on, and `sort_order` which will be set to `reverse` if I want the older posts up top. I'll be able to access these in the layout as `page.tag` and `page.sort_order`, respectively. So I'll go ahead and grab all the posts which are categorized with `page.tag`, and then decide whether the posts will get sorted normally or in reverse:
|
I'll be including two custom variables in the [Front Matter](https://jekyllrb.com/docs/front-matter/) for my category pages: `tag` to specify what category to filter on, and `sort_order` which will be set to `reverse` if I want the older posts up top. I'll be able to access these in the layout as `page.tag` and `page.sort_order`, respectively. So I'll go ahead and grab all the posts which are categorized with `page.tag`, and then decide whether the posts will get sorted normally or in reverse:
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %}{% assign posts = site.categories[page.tag] %}
|
# torchlight! {"lineNumbers": true}
|
||||||
|
{% assign posts = site.categories[page.tag] %}
|
||||||
{% if page.sort_order == 'reverse' %}
|
{% if page.sort_order == 'reverse' %}
|
||||||
{% assign posts = posts | reverse %}
|
{% assign posts = posts | reverse %}
|
||||||
{% endif %}{% endraw %}
|
{% endif %}
|
||||||
```
|
```
|
||||||
|
|
||||||
And then I'll loop through each post (in either normal or reverse order) and insert them into the rendered page:
|
And then I'll loop through each post (in either normal or reverse order) and insert them into the rendered page:
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %}<div class="entries-{{ entries_layout }}">
|
# torchlight! {"lineNumbers": true}
|
||||||
|
<div class="entries-{{ entries_layout }}">
|
||||||
{% for post in posts %}
|
{% for post in posts %}
|
||||||
{% include archive-single.html type=entries_layout %}
|
{% include archive-single.html type=entries_layout %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</div>{% endraw %}
|
</div>
|
||||||
```
|
```
|
||||||
|
|
||||||
Putting it all together now, here's my new `_layouts/series.html` file:
|
Putting it all together now, here's my new `_layouts/series.html` file:
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %}---
|
# torchlight! {"lineNumbers": true}
|
||||||
|
---
|
||||||
layout: archive
|
layout: archive
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -134,7 +140,8 @@ layout: archive
|
||||||
### Series pages
|
### Series pages
|
||||||
Since I can't use a plugin to automatically generate pages for each series, I'll have to do it manually. Fortunately this is pretty easy, and I've got a limited number of categories/series to worry about. I started by making a new `_pages/series-vra8.md` and setting it up thusly:
|
Since I can't use a plugin to automatically generate pages for each series, I'll have to do it manually. Fortunately this is pretty easy, and I've got a limited number of categories/series to worry about. I started by making a new `_pages/series-vra8.md` and setting it up thusly:
|
||||||
```markdown
|
```markdown
|
||||||
{% raw %}---
|
// torchlight! {"lineNumbers": true}
|
||||||
|
---
|
||||||
title: "Adventures in vRealize Automation 8"
|
title: "Adventures in vRealize Automation 8"
|
||||||
layout: series
|
layout: series
|
||||||
permalink: "/series/vra8"
|
permalink: "/series/vra8"
|
||||||
|
@ -145,7 +152,7 @@ header:
|
||||||
teaser: assets/images/posts-2020/RtMljqM9x.png
|
teaser: assets/images/posts-2020/RtMljqM9x.png
|
||||||
---
|
---
|
||||||
|
|
||||||
*Follow along as I create a flexible VMware vRealize Automation 8 environment for provisioning virtual machines - all from the comfort of my Intel NUC homelab.*{% endraw %}
|
*Follow along as I create a flexible VMware vRealize Automation 8 environment for provisioning virtual machines - all from the comfort of my Intel NUC homelab.*
|
||||||
```
|
```
|
||||||
|
|
||||||
You can see that this page is referencing the series layout I just created, and it's going to live at `http://localhost/series/vra8` - precisely where this series was on Hashnode. I've tagged it with the category I want to feature on this page, and specified that the posts will be sorted in reverse order so that anyone reading through the series will start at the beginning (I hear it's a very good place to start). I also added a teaser image which will be displayed when I link to the series from elsewhere. And I included a quick little italicized blurb to tell readers what the series is about.
|
You can see that this page is referencing the series layout I just created, and it's going to live at `http://localhost/series/vra8` - precisely where this series was on Hashnode. I've tagged it with the category I want to feature on this page, and specified that the posts will be sorted in reverse order so that anyone reading through the series will start at the beginning (I hear it's a very good place to start). I also added a teaser image which will be displayed when I link to the series from elsewhere. And I included a quick little italicized blurb to tell readers what the series is about.
|
||||||
|
@ -155,7 +162,8 @@ Check it out [here](/series/vra8):
|
||||||
|
|
||||||
The other series pages will be basically the same, just without the reverse sort directive. Here's `_pages/series-tips.md`:
|
The other series pages will be basically the same, just without the reverse sort directive. Here's `_pages/series-tips.md`:
|
||||||
```markdown
|
```markdown
|
||||||
{% raw %}---
|
// torchlight! {"lineNumbers": true}
|
||||||
|
---
|
||||||
title: "Tips & Tricks"
|
title: "Tips & Tricks"
|
||||||
layout: series
|
layout: series
|
||||||
permalink: "/series/tips"
|
permalink: "/series/tips"
|
||||||
|
@ -165,13 +173,14 @@ header:
|
||||||
teaser: assets/images/posts-2020/kJ_l7gPD2.png
|
teaser: assets/images/posts-2020/kJ_l7gPD2.png
|
||||||
---
|
---
|
||||||
|
|
||||||
*Useful tips and tricks I've stumbled upon.*{% endraw %}
|
*Useful tips and tricks I've stumbled upon.*
|
||||||
```
|
```
|
||||||
|
|
||||||
### Changing the category permalink
|
### Changing the category permalink
|
||||||
Just in case someone wants to look at all the post series in one place, I'll be keeping the existing category archive page around, but I'll want it to be found at `/series/` instead of `/categories/`. I'll start with going into the `_config.yml` file and changing the `category_archive` path:
|
Just in case someone wants to look at all the post series in one place, I'll be keeping the existing category archive page around, but I'll want it to be found at `/series/` instead of `/categories/`. I'll start with going into the `_config.yml` file and changing the `category_archive` path:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
category_archive:
|
category_archive:
|
||||||
type: liquid
|
type: liquid
|
||||||
# path: /categories/
|
# path: /categories/
|
||||||
|
@ -183,45 +192,49 @@ tag_archive:
|
||||||
|
|
||||||
I'll also rename `_pages/category-archive.md` to `_pages/series-archive.md` and update its title and permalink:
|
I'll also rename `_pages/category-archive.md` to `_pages/series-archive.md` and update its title and permalink:
|
||||||
```markdown
|
```markdown
|
||||||
{% raw %}---
|
// torchlight! {"lineNumbers": true}
|
||||||
|
---
|
||||||
title: "Posts by Series"
|
title: "Posts by Series"
|
||||||
layout: categories
|
layout: categories
|
||||||
permalink: /series/
|
permalink: /series/
|
||||||
author_profile: true
|
author_profile: true
|
||||||
---{% endraw %}
|
---
|
||||||
```
|
```
|
||||||
|
|
||||||
### Fixing category links in posts
|
### Fixing category links in posts
|
||||||
The bottom of each post has a section which lists the tags and categories to which it belongs. Right now, those are still pointing to the category archive page (`/series/#vra8`) instead of the series feature pages I created (`/series/vra8`).
|
The bottom of each post has a section which lists the tags and categories to which it belongs. Right now, those are still pointing to the category archive page (`/series/#vra8`) instead of the series feature pages I created (`/series/vra8`).
|
||||||
![Old category link](20210724-old-category-link.png)
|
![Old category link](20210724-old-category-link.png)
|
||||||
|
|
||||||
That *works* but I'd rather it reference the fancy new pages I created. Tracking down where to make that change was a bit of a journey.
|
That *works* but I'd rather it reference the fancy new pages I created. Tracking down where to make that change was a bit of a journey.
|
||||||
|
|
||||||
I started with the [`_layouts/single.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_layouts/single.html) file which is the layout I'm using for individual posts. This bit near the end gave me the clue I needed:
|
I started with the [`_layouts/single.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_layouts/single.html) file which is the layout I'm using for individual posts. This bit near the end gave me the clue I needed:
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %} <footer class="page__meta">
|
# torchlight! {"lineNumbers": true}
|
||||||
|
<footer class="page__meta">
|
||||||
{% if site.data.ui-text[site.locale].meta_label %}
|
{% if site.data.ui-text[site.locale].meta_label %}
|
||||||
<h4 class="page__meta-title">{{ site.data.ui-text[site.locale].meta_label }}</h4>
|
<h4 class="page__meta-title">{{ site.data.ui-text[site.locale].meta_label }}</h4>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% include page__taxonomy.html %}
|
{% include page__taxonomy.html %}
|
||||||
{% include page__date.html %}
|
{% include page__date.html %}
|
||||||
</footer>{% endraw %}
|
</footer>
|
||||||
```
|
```
|
||||||
|
|
||||||
It looks like [`page__taxonomy.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_includes/page__taxonomy.html) is being used to display the tags and categories, so I then went to that file in the `_include` directory:
|
It looks like [`page__taxonomy.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_includes/page__taxonomy.html) is being used to display the tags and categories, so I then went to that file in the `_include` directory:
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %}{% if site.tag_archive.type and page.tags[0] %}
|
# torchlight! {"lineNumbers": true}
|
||||||
|
{% if site.tag_archive.type and page.tags[0] %}
|
||||||
{% include tag-list.html %}
|
{% include tag-list.html %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if site.category_archive.type and page.categories[0] %}
|
{% if site.category_archive.type and page.categories[0] %}
|
||||||
{% include category-list.html %}
|
{% include category-list.html %}
|
||||||
{% endif %}{% endraw %}
|
{% endif %}
|
||||||
```
|
```
|
||||||
|
|
||||||
Okay, it looks like [`_include/category-list.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_includes/category-list.html) is what I actually want. Here's that file:
|
Okay, it looks like [`_include/category-list.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_includes/category-list.html) is what I actually want. Here's that file:
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %}{% case site.category_archive.type %}
|
# torchlight! {"lineNumbers": true}
|
||||||
|
{% case site.category_archive.type %}
|
||||||
{% when "liquid" %}
|
{% when "liquid" %}
|
||||||
{% assign path_type = "#" %}
|
{% assign path_type = "#" %}
|
||||||
{% when "jekyll-archives" %}
|
{% when "jekyll-archives" %}
|
||||||
|
@ -239,19 +252,21 @@ Okay, it looks like [`_include/category-list.html`](https://github.com/mmistakes
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</span>
|
</span>
|
||||||
</p>
|
</p>
|
||||||
{% endif %}{% endraw %}
|
{% endif %}
|
||||||
```
|
```
|
||||||
|
|
||||||
I'm using the `liquid` archive approach since I can't use the `jekyll-archives` plugin, so I can see that it's setting the `path_type` to `"#"`. And near the bottom of the file, I can see that it's assembling the category link by slugifying the `category_word`, sticking the `path_type` in front of it, and then putting the `site.category_archive.path` (which I edited earlier in `_config.yml`) in front of that. So that's why my category links look like `/series/#category`. I can just edit the top of this file to statically set `path_type = nil` and that should clear this up in a jiffy:
|
I'm using the `liquid` archive approach since I can't use the `jekyll-archives` plugin, so I can see that it's setting the `path_type` to `"#"`. And near the bottom of the file, I can see that it's assembling the category link by slugifying the `category_word`, sticking the `path_type` in front of it, and then putting the `site.category_archive.path` (which I edited earlier in `_config.yml`) in front of that. So that's why my category links look like `/series/#category`. I can just edit the top of this file to statically set `path_type = nil` and that should clear this up in a jiffy:
|
||||||
```liquid
|
```jinja-html
|
||||||
{% raw %}{% assign path_type = nil %}
|
# torchlight! {"lineNumbers": true}
|
||||||
|
{% assign path_type = nil %}
|
||||||
{% if site.category_archive.path %}
|
{% if site.category_archive.path %}
|
||||||
{% assign categories_sorted = page.categories | sort_natural %}
|
{% assign categories_sorted = page.categories | sort_natural %}
|
||||||
[...]{% endraw %}
|
[...]
|
||||||
```
|
```
|
||||||
|
|
||||||
To sell the series illusion even further, I can pop into [`_data/ui-text.yml`](https://github.com/mmistakes/minimal-mistakes/blob/master/_data/ui-text.yml) to update the string used for `categories_label`:
|
To sell the series illusion even further, I can pop into [`_data/ui-text.yml`](https://github.com/mmistakes/minimal-mistakes/blob/master/_data/ui-text.yml) to update the string used for `categories_label`:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
meta_label :
|
meta_label :
|
||||||
tags_label : "Tags:"
|
tags_label : "Tags:"
|
||||||
categories_label : "Series:"
|
categories_label : "Series:"
|
||||||
|
@ -265,6 +280,7 @@ Much better!
|
||||||
### Updating the navigation header
|
### Updating the navigation header
|
||||||
And, finally, I'll want to update the navigation links at the top of each page to help visitors find my new featured series pages. For that, I can just edit `_data/navigation.yml` with links to my new pages:
|
And, finally, I'll want to update the navigation links at the top of each page to help visitors find my new featured series pages. For that, I can just edit `_data/navigation.yml` with links to my new pages:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
main:
|
main:
|
||||||
- title: "vRealize Automation 8"
|
- title: "vRealize Automation 8"
|
||||||
url: /series/vra8
|
url: /series/vra8
|
||||||
|
|
|
@ -30,12 +30,13 @@ I will also add some properties to tell PowerCLI (and the `Invoke-VmScript` cmdl
|
||||||
##### Inputs section
|
##### Inputs section
|
||||||
I'll kick this off by going into Cloud Assembly and editing the `WindowsDemo` template I've been working on for the past few eons. I'll add a `diskSize` input:
|
I'll kick this off by going into Cloud Assembly and editing the `WindowsDemo` template I've been working on for the past few eons. I'll add a `diskSize` input:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
formatVersion: 1
|
formatVersion: 1
|
||||||
inputs:
|
inputs:
|
||||||
site: [...]
|
site: [...]
|
||||||
image: [...]
|
image: [...]
|
||||||
size: [...]
|
size: [...]
|
||||||
diskSize:
|
diskSize: # [tl! focus:5]
|
||||||
title: 'System drive size'
|
title: 'System drive size'
|
||||||
default: 60
|
default: 60
|
||||||
type: integer
|
type: integer
|
||||||
|
@ -46,14 +47,15 @@ inputs:
|
||||||
[...]
|
[...]
|
||||||
```
|
```
|
||||||
|
|
||||||
The default value is set to 60GB to match the VMDK attached to the source template; that's also the minimum value since shrinking disks gets messy.
|
The default value is set to 60GB to match the VMDK attached to the source template; that's also the minimum value since shrinking disks gets messy.
|
||||||
|
|
||||||
I'll also drop in an `adminsList` input at the bottom of the section:
|
I'll also drop in an `adminsList` input at the bottom of the section:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
[...]
|
[...]
|
||||||
poc_email: [...]
|
poc_email: [...]
|
||||||
ticket: [...]
|
ticket: [...]
|
||||||
adminsList:
|
adminsList: # [tl! focus:4]
|
||||||
type: string
|
type: string
|
||||||
title: Administrators
|
title: Administrators
|
||||||
description: Comma-separated list of domain accounts/groups which need admin access to this server.
|
description: Comma-separated list of domain accounts/groups which need admin access to this server.
|
||||||
|
@ -64,7 +66,7 @@ resources:
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Resources section
|
##### Resources section
|
||||||
In the Resources section of the cloud template, I'm going to add a few properties that will tell the ABX script how to connect to the appropriate vCenter and then the VM.
|
In the Resources section of the cloud template, I'm going to add a few properties that will tell the ABX script how to connect to the appropriate vCenter and then the VM.
|
||||||
- `vCenter`: The vCenter server where the VM will be deployed, and thus the server which PowerCLI will authenticate against. In this case, I've only got one vCenter, but a larger environment might have multiples. Defining this in the cloud template makes it easy to select automagically if needed. (For instance, if I had a `bow-vcsa` and a `dre-vcsa` for my different sites, I could do something like `vCenter: '${input.site}-vcsa.lab.bowdre.net'` here.)
|
- `vCenter`: The vCenter server where the VM will be deployed, and thus the server which PowerCLI will authenticate against. In this case, I've only got one vCenter, but a larger environment might have multiples. Defining this in the cloud template makes it easy to select automagically if needed. (For instance, if I had a `bow-vcsa` and a `dre-vcsa` for my different sites, I could do something like `vCenter: '${input.site}-vcsa.lab.bowdre.net'` here.)
|
||||||
- `vCenterUser`: The username with rights to the VM in vCenter. Again, this doesn't have to be a static assignment.
|
- `vCenterUser`: The username with rights to the VM in vCenter. Again, this doesn't have to be a static assignment.
|
||||||
- `templateUser`: This is the account that will be used by `Invoke-VmScript` to log in to the guest OS. My template will use the default `Administrator` account for non-domain systems, but the `lab\vra` service account on domain-joined systems (using the `adJoin` input I [set up earlier](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template)).
|
- `templateUser`: This is the account that will be used by `Invoke-VmScript` to log in to the guest OS. My template will use the default `Administrator` account for non-domain systems, but the `lab\vra` service account on domain-joined systems (using the `adJoin` input I [set up earlier](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template)).
|
||||||
|
@ -72,6 +74,7 @@ In the Resources section of the cloud template, I'm going to add a few propertie
|
||||||
I'll also include the `adminsList` input from earlier so that can get passed to ABX as well. And I'm going to add in an `adJoin` property (mapped to the [existing `input.adJoin`](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template)) so that I'll have that to work with later.
|
I'll also include the `adminsList` input from earlier so that can get passed to ABX as well. And I'm going to add in an `adJoin` property (mapped to the [existing `input.adJoin`](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template)) so that I'll have that to work with later.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
[...]
|
[...]
|
||||||
resources:
|
resources:
|
||||||
Cloud_vSphere_Machine_1:
|
Cloud_vSphere_Machine_1:
|
||||||
|
@ -80,7 +83,7 @@ resources:
|
||||||
image: '${input.image}'
|
image: '${input.image}'
|
||||||
flavor: '${input.size}'
|
flavor: '${input.size}'
|
||||||
site: '${input.site}'
|
site: '${input.site}'
|
||||||
vCenter: vcsa.lab.bowdre.net
|
vCenter: vcsa.lab.bowdre.net # [tl! focus:3]
|
||||||
vCenterUser: vra@lab.bowdre.net
|
vCenterUser: vra@lab.bowdre.net
|
||||||
templateUser: '${input.adJoin ? "vra@lab" : "Administrator"}'
|
templateUser: '${input.adJoin ? "vra@lab" : "Administrator"}'
|
||||||
adminsList: '${input.adminsList}'
|
adminsList: '${input.adminsList}'
|
||||||
|
@ -89,16 +92,17 @@ resources:
|
||||||
app: '${input.app}'
|
app: '${input.app}'
|
||||||
adJoin: '${input.adJoin}'
|
adJoin: '${input.adJoin}'
|
||||||
ignoreActiveDirectory: '${!input.adJoin}'
|
ignoreActiveDirectory: '${!input.adJoin}'
|
||||||
[...]
|
[...]
|
||||||
```
|
```
|
||||||
|
|
||||||
And I will add in a `storage` property as well which will automatically adjust the deployed VMDK size to match the specified input:
|
And I will add in a `storage` property as well which will automatically adjust the deployed VMDK size to match the specified input:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
[...]
|
[...]
|
||||||
description: '${input.description}'
|
description: '${input.description}'
|
||||||
networks: [...]
|
networks: [...]
|
||||||
constraints: [...]
|
constraints: [...]
|
||||||
storage:
|
storage: # [tl! focus:1]
|
||||||
bootDiskCapacityInGB: '${input.diskSize}'
|
bootDiskCapacityInGB: '${input.diskSize}'
|
||||||
Cloud_vSphere_Network_1:
|
Cloud_vSphere_Network_1:
|
||||||
type: Cloud.vSphere.Network
|
type: Cloud.vSphere.Network
|
||||||
|
@ -109,6 +113,7 @@ And I will add in a `storage` property as well which will automatically adjust t
|
||||||
##### Complete template
|
##### Complete template
|
||||||
Okay, all together now:
|
Okay, all together now:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
formatVersion: 1
|
formatVersion: 1
|
||||||
inputs:
|
inputs:
|
||||||
site:
|
site:
|
||||||
|
@ -196,13 +201,13 @@ inputs:
|
||||||
poc_email:
|
poc_email:
|
||||||
type: string
|
type: string
|
||||||
title: Point of Contact Email
|
title: Point of Contact Email
|
||||||
default: jack.shephard@virtuallypotato.com
|
default: jack.shephard@example.com
|
||||||
pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$'
|
pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$'
|
||||||
ticket:
|
ticket:
|
||||||
type: string
|
type: string
|
||||||
title: Ticket/Request Number
|
title: Ticket/Request Number
|
||||||
default: 4815162342
|
default: 4815162342
|
||||||
adminsList:
|
adminsList:
|
||||||
type: string
|
type: string
|
||||||
title: Administrators
|
title: Administrators
|
||||||
description: Comma-separated list of domain accounts/groups which need admin access to this server.
|
description: Comma-separated list of domain accounts/groups which need admin access to this server.
|
||||||
|
@ -297,6 +302,7 @@ And I'll pop over to the right side to map the Action Constants I created earlie
|
||||||
|
|
||||||
Now for The Script:
|
Now for The Script:
|
||||||
```powershell
|
```powershell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
<# vRA 8.x ABX action to perform certain in-guest actions post-deploy:
|
<# vRA 8.x ABX action to perform certain in-guest actions post-deploy:
|
||||||
Windows:
|
Windows:
|
||||||
- auto-update VM tools
|
- auto-update VM tools
|
||||||
|
@ -304,12 +310,12 @@ Now for The Script:
|
||||||
- extend C: volume to fill disk
|
- extend C: volume to fill disk
|
||||||
- set up remote access
|
- set up remote access
|
||||||
- create a scheduled task to (attempt to) apply Windows updates
|
- create a scheduled task to (attempt to) apply Windows updates
|
||||||
|
|
||||||
## Action Secrets:
|
## Action Secrets:
|
||||||
templatePassWinDomain # password for domain account with admin rights to the template (domain-joined deployments)
|
templatePassWinDomain # password for domain account with admin rights to the template (domain-joined deployments)
|
||||||
templatePassWinWorkgroup # password for local account with admin rights to the template (standalone deployments)
|
templatePassWinWorkgroup # password for local account with admin rights to the template (standalone deployments)
|
||||||
vCenterPassword # password for vCenter account passed from the cloud template
|
vCenterPassword # password for vCenter account passed from the cloud template
|
||||||
|
|
||||||
## Action Inputs:
|
## Action Inputs:
|
||||||
## Inputs from deployment:
|
## Inputs from deployment:
|
||||||
resourceNames[0] # VM name [BOW-DVRT-XXX003]
|
resourceNames[0] # VM name [BOW-DVRT-XXX003]
|
||||||
|
@ -326,8 +332,8 @@ function handler($context, $inputs) {
|
||||||
$vcUser = $inputs.customProperties.vCenterUser
|
$vcUser = $inputs.customProperties.vCenterUser
|
||||||
$vcPassword = $context.getSecret($inputs."vCenterPassword")
|
$vcPassword = $context.getSecret($inputs."vCenterPassword")
|
||||||
$vCenter = $inputs.customProperties.vCenter
|
$vCenter = $inputs.customProperties.vCenter
|
||||||
|
|
||||||
# Create vmtools connection to the VM
|
# Create vmtools connection to the VM
|
||||||
$vmName = $inputs.resourceNames[0]
|
$vmName = $inputs.resourceNames[0]
|
||||||
Connect-ViServer -Server $vCenter -User $vcUser -Password $vcPassword -Force
|
Connect-ViServer -Server $vCenter -User $vcUser -Password $vcPassword -Force
|
||||||
$vm = Get-VM -Name $vmName
|
$vm = Get-VM -Name $vmName
|
||||||
|
@ -335,13 +341,13 @@ function handler($context, $inputs) {
|
||||||
if (-not (Wait-Tools -VM $vm -TimeoutSeconds 180)) {
|
if (-not (Wait-Tools -VM $vm -TimeoutSeconds 180)) {
|
||||||
Write-Error "Unable to establish connection with VM tools" -ErrorAction Stop
|
Write-Error "Unable to establish connection with VM tools" -ErrorAction Stop
|
||||||
}
|
}
|
||||||
|
|
||||||
# Detect OS type
|
# Detect OS type
|
||||||
$count = 0
|
$count = 0
|
||||||
While (!$osType) {
|
While (!$osType) {
|
||||||
Try {
|
Try {
|
||||||
$osType = ($vm | Get-View).Guest.GuestFamily.ToString()
|
$osType = ($vm | Get-View).Guest.GuestFamily.ToString()
|
||||||
$toolsStatus = ($vm | Get-View).Guest.ToolsStatus.ToString()
|
$toolsStatus = ($vm | Get-View).Guest.ToolsStatus.ToString()
|
||||||
} Catch {
|
} Catch {
|
||||||
# 60s timeout
|
# 60s timeout
|
||||||
if ($count -ge 12) {
|
if ($count -ge 12) {
|
||||||
|
@ -354,7 +360,7 @@ function handler($context, $inputs) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Write-Host "$vmName is a $osType and its tools status is $toolsStatus."
|
Write-Host "$vmName is a $osType and its tools status is $toolsStatus."
|
||||||
|
|
||||||
# Update tools on Windows if out of date
|
# Update tools on Windows if out of date
|
||||||
if ($osType.Equals("windowsGuest") -And $toolsStatus.Equals("toolsOld")) {
|
if ($osType.Equals("windowsGuest") -And $toolsStatus.Equals("toolsOld")) {
|
||||||
Write-Host "Updating VM Tools..."
|
Write-Host "Updating VM Tools..."
|
||||||
|
@ -364,7 +370,7 @@ function handler($context, $inputs) {
|
||||||
Write-Error "Unable to establish connection with VM tools" -ErrorAction Stop
|
Write-Error "Unable to establish connection with VM tools" -ErrorAction Stop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Run OS-specific tasks
|
# Run OS-specific tasks
|
||||||
if ($osType.Equals("windowsGuest")) {
|
if ($osType.Equals("windowsGuest")) {
|
||||||
# Initialize Windows variables
|
# Initialize Windows variables
|
||||||
|
@ -373,7 +379,7 @@ function handler($context, $inputs) {
|
||||||
$adJoin = $inputs.customProperties.adJoin
|
$adJoin = $inputs.customProperties.adJoin
|
||||||
$templateUser = $inputs.customProperties.templateUser
|
$templateUser = $inputs.customProperties.templateUser
|
||||||
$templatePassword = $adJoin.Equals("true") ? $context.getSecret($inputs."templatePassWinDomain") : $context.getSecret($inputs."templatePassWinWorkgroup")
|
$templatePassword = $adJoin.Equals("true") ? $context.getSecret($inputs."templatePassWinDomain") : $context.getSecret($inputs."templatePassWinWorkgroup")
|
||||||
|
|
||||||
# Add domain accounts to local administrators group
|
# Add domain accounts to local administrators group
|
||||||
if ($adminsList.Length -gt 0 -And $adJoin.Equals("true")) {
|
if ($adminsList.Length -gt 0 -And $adJoin.Equals("true")) {
|
||||||
# Standardize users entered without domain as DOMAIN\username
|
# Standardize users entered without domain as DOMAIN\username
|
||||||
|
@ -440,7 +446,7 @@ function handler($context, $inputs) {
|
||||||
Start-Sleep -s 10
|
Start-Sleep -s 10
|
||||||
Write-Host "Creating a scheduled task to apply updates..."
|
Write-Host "Creating a scheduled task to apply updates..."
|
||||||
$runUpdateScript = Invoke-VMScript -VM $vm -ScriptText $updateScript -GuestUser $templateUser -GuestPassword $templatePassword
|
$runUpdateScript = Invoke-VMScript -VM $vm -ScriptText $updateScript -GuestUser $templateUser -GuestPassword $templatePassword
|
||||||
Write-Host "Created task:`n" $runUpdateScript.ScriptOutput "`n"
|
Write-Host "Created task:`n" $runUpdateScript.ScriptOutput "`n"
|
||||||
} elseif ($osType.Equals("linuxGuest")) {
|
} elseif ($osType.Equals("linuxGuest")) {
|
||||||
#TODO
|
#TODO
|
||||||
Write-Host "Linux systems not supported by this action... yet"
|
Write-Host "Linux systems not supported by this action... yet"
|
||||||
|
@ -479,7 +485,7 @@ I do have another subsciption on that event already, [`VM Post-Provisioning`](/a
|
||||||
After hitting the **Save** button, I go back to that other `VM Post-Provisioning` subscription, set it to enable blocking, and give it a priority of `1`:
|
After hitting the **Save** button, I go back to that other `VM Post-Provisioning` subscription, set it to enable blocking, and give it a priority of `1`:
|
||||||
![Blocking VM Post-Provisioning](20210903_old_subscription_blocking.png)
|
![Blocking VM Post-Provisioning](20210903_old_subscription_blocking.png)
|
||||||
|
|
||||||
This will ensure that the new subscription fires after the older one completes, and that should avoid any conflicts between the two.
|
This will ensure that the new subscription fires after the older one completes, and that should avoid any conflicts between the two.
|
||||||
|
|
||||||
### Testing
|
### Testing
|
||||||
Alright, now let's see if it worked. I head into Service Broker to submit the deployment request:
|
Alright, now let's see if it worked. I head into Service Broker to submit the deployment request:
|
||||||
|
@ -499,50 +505,50 @@ Logging in to server.
|
||||||
logged in to server vcsa.lab.bowdre.net:443
|
logged in to server vcsa.lab.bowdre.net:443
|
||||||
Read-only file system
|
Read-only file system
|
||||||
09/03/2021 19:08:27 Get-VM Finished execution
|
09/03/2021 19:08:27 Get-VM Finished execution
|
||||||
09/03/2021 19:08:27 Get-VM
|
09/03/2021 19:08:27 Get-VM
|
||||||
Waiting for VM Tools to start...
|
Waiting for VM Tools to start...
|
||||||
09/03/2021 19:08:29 Wait-Tools 5222b516-ae2c-5740-2926-77cd21441f27
|
09/03/2021 19:08:29 Wait-Tools 5222b516-ae2c-5740-2926-77cd21441f27
|
||||||
09/03/2021 19:08:29 Wait-Tools Finished execution
|
09/03/2021 19:08:29 Wait-Tools Finished execution
|
||||||
09/03/2021 19:08:29 Wait-Tools
|
09/03/2021 19:08:29 Wait-Tools
|
||||||
09/03/2021 19:08:29 Get-View Finished execution
|
09/03/2021 19:08:29 Get-View Finished execution
|
||||||
09/03/2021 19:08:29 Get-View
|
09/03/2021 19:08:29 Get-View
|
||||||
09/03/2021 19:08:29 Get-View Finished execution
|
09/03/2021 19:08:29 Get-View Finished execution
|
||||||
09/03/2021 19:08:29 Get-View
|
09/03/2021 19:08:29 Get-View
|
||||||
BOW-PSVS-XXX001 is a windowsGuest and its tools status is toolsOld.
|
BOW-PSVS-XXX001 is a windowsGuest and its tools status is toolsOld.
|
||||||
Updating VM Tools...
|
Updating VM Tools...
|
||||||
09/03/2021 19:08:30 Update-Tools 5222b516-ae2c-5740-2926-77cd21441f27
|
09/03/2021 19:08:30 Update-Tools 5222b516-ae2c-5740-2926-77cd21441f27
|
||||||
09/03/2021 19:08:30 Update-Tools Finished execution
|
09/03/2021 19:08:30 Update-Tools Finished execution
|
||||||
09/03/2021 19:08:30 Update-Tools
|
09/03/2021 19:08:30 Update-Tools
|
||||||
Waiting for VM Tools to start...
|
Waiting for VM Tools to start...
|
||||||
09/03/2021 19:09:00 Wait-Tools 5222b516-ae2c-5740-2926-77cd21441f27
|
09/03/2021 19:09:00 Wait-Tools 5222b516-ae2c-5740-2926-77cd21441f27
|
||||||
09/03/2021 19:09:00 Wait-Tools Finished execution
|
09/03/2021 19:09:00 Wait-Tools Finished execution
|
||||||
09/03/2021 19:09:00 Wait-Tools
|
09/03/2021 19:09:00 Wait-Tools
|
||||||
Administrators: "lab\testy"
|
Administrators: "lab\testy"
|
||||||
Attempting to add administrator accounts...
|
Attempting to add administrator accounts...
|
||||||
09/03/2021 19:09:10 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27
|
09/03/2021 19:09:10 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27
|
||||||
09/03/2021 19:09:10 Invoke-VMScript Finished execution
|
09/03/2021 19:09:10 Invoke-VMScript Finished execution
|
||||||
09/03/2021 19:09:10 Invoke-VMScript
|
09/03/2021 19:09:10 Invoke-VMScript
|
||||||
Successfully added ["lab\testy"] to Administrators group.
|
Successfully added ["lab\testy"] to Administrators group.
|
||||||
Attempting to extend system volume...
|
Attempting to extend system volume...
|
||||||
09/03/2021 19:09:27 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27
|
09/03/2021 19:09:27 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27
|
||||||
09/03/2021 19:09:27 Invoke-VMScript Finished execution
|
09/03/2021 19:09:27 Invoke-VMScript Finished execution
|
||||||
09/03/2021 19:09:27 Invoke-VMScript
|
09/03/2021 19:09:27 Invoke-VMScript
|
||||||
Successfully extended system partition.
|
Successfully extended system partition.
|
||||||
Attempting to enable remote access (RDP, WMI, File and Printer Sharing, PSRemoting)...
|
Attempting to enable remote access (RDP, WMI, File and Printer Sharing, PSRemoting)...
|
||||||
09/03/2021 19:09:49 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27
|
09/03/2021 19:09:49 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27
|
||||||
09/03/2021 19:09:49 Invoke-VMScript Finished execution
|
09/03/2021 19:09:49 Invoke-VMScript Finished execution
|
||||||
09/03/2021 19:09:49 Invoke-VMScript
|
09/03/2021 19:09:49 Invoke-VMScript
|
||||||
Successfully enabled remote access.
|
Successfully enabled remote access.
|
||||||
Creating a scheduled task to apply updates...
|
Creating a scheduled task to apply updates...
|
||||||
09/03/2021 19:10:12 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27
|
09/03/2021 19:10:12 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27
|
||||||
09/03/2021 19:10:12 Invoke-VMScript Finished execution
|
09/03/2021 19:10:12 Invoke-VMScript Finished execution
|
||||||
09/03/2021 19:10:12 Invoke-VMScript
|
09/03/2021 19:10:12 Invoke-VMScript
|
||||||
Created task:
|
Created task:
|
||||||
|
|
||||||
TaskPath TaskName State
|
TaskPath TaskName State
|
||||||
-------- -------- -----
|
-------- -------- -----
|
||||||
\ Initial_Updates Ready
|
\ Initial_Updates Ready
|
||||||
\ Initial_Updates Ready
|
\ Initial_Updates Ready
|
||||||
```
|
```
|
||||||
|
|
||||||
So it *claims* to have successfully updated the VM tools, added `lab\testy` to the local `Administrators` group, extended the `C:` volume to fill the 65GB virtual disk, added firewall rules to permit remote access, and created a scheduled task to apply updates. I can open a console session to the VM to spot-check the results.
|
So it *claims* to have successfully updated the VM tools, added `lab\testy` to the local `Administrators` group, extended the `C:` volume to fill the 65GB virtual disk, added firewall rules to permit remote access, and created a scheduled task to apply updates. I can open a console session to the VM to spot-check the results.
|
||||||
|
|
|
@ -48,9 +48,9 @@ site
|
||||||
└── third-post-image-4.png
|
└── third-post-image-4.png
|
||||||
```
|
```
|
||||||
|
|
||||||
So the article contents go under `site/content/post/` in a file called `name-of-article.md`. Each article may embed image (or other file types), and those get stored in `site/static/images/post/` and referenced like `![Image for first post](/images/post/first-post-image-1.png)`. When Hugo builds a site, it processes the stuff under the `site/content/` folder to render the Markdown files into browser-friendly HTML pages but it _doesn't_ process anything in the `site/static/` folder; that's treated as static content and just gets dropped as-is into the resulting site.
|
So the article contents go under `site/content/post/` in a file called `name-of-article.md`. Each article may embed image (or other file types), and those get stored in `site/static/images/post/` and referenced like `![Image for first post](/images/post/first-post-image-1.png)`. When Hugo builds a site, it processes the stuff under the `site/content/` folder to render the Markdown files into browser-friendly HTML pages but it _doesn't_ process anything in the `site/static/` folder; that's treated as static content and just gets dropped as-is into the resulting site.
|
||||||
|
|
||||||
It's functional, but things can get pretty messy when you've got a bunch of image files and are struggling to keep track of which images go with which post.
|
It's functional, but things can get pretty messy when you've got a bunch of image files and are struggling to keep track of which images go with which post.
|
||||||
|
|
||||||
Like I mentioned earlier, Hugo's Page Bundles group a page's resources together in one place. Each post gets its own folder under `site/content/` and then all of the other files it needs to reference can get dropped in there too. With Page Bundles, the folder tree looks like this:
|
Like I mentioned earlier, Hugo's Page Bundles group a page's resources together in one place. Each post gets its own folder under `site/content/` and then all of the other files it needs to reference can get dropped in there too. With Page Bundles, the folder tree looks like this:
|
||||||
|
|
||||||
|
@ -78,25 +78,26 @@ site
|
||||||
└── logo.png
|
└── logo.png
|
||||||
```
|
```
|
||||||
|
|
||||||
Images and other files are now referenced in the post directly like `![Image for post 1](/first-post-image-1.png)`, and this makes it a lot easier to keep track of which images go with which post. And since the files aren't considered to be static anymore, Page Bundles enables Hugo to perform certain [Image Processing tasks](https://gohugo.io/content-management/image-processing/) when the site gets built.
|
Images and other files are now referenced in the post directly like `![Image for post 1](/first-post-image-1.png)`, and this makes it a lot easier to keep track of which images go with which post. And since the files aren't considered to be static anymore, Page Bundles enables Hugo to perform certain [Image Processing tasks](https://gohugo.io/content-management/image-processing/) when the site gets built.
|
||||||
|
|
||||||
Anyway, I wanted to start using Page Bundles but didn't want to have to manually go through all my posts to move the images and update the paths so I spent a few minutes cobbling together a quick script to help me out. It's pretty similar to the one I created to help [migrate images from Hashnode to my Jekyll site](/script-to-update-image-embed-links-in-markdown-files/) last time around - and, like that script, it's not pretty, polished, or flexible in the least, but it did the trick for me.
|
Anyway, I wanted to start using Page Bundles but didn't want to have to manually go through all my posts to move the images and update the paths so I spent a few minutes cobbling together a quick script to help me out. It's pretty similar to the one I created to help [migrate images from Hashnode to my Jekyll site](/script-to-update-image-embed-links-in-markdown-files/) last time around - and, like that script, it's not pretty, polished, or flexible in the least, but it did the trick for me.
|
||||||
|
|
||||||
This one needs to be run from one step above the site root (`../site/` in the example above), and it gets passed the relative path to a post (`site/content/posts/first-post.md`). From there, it will create a new folder with the same name (`site/content/posts/first-post/`) and move the post into there while renaming it to `index.md` (`site/content/posts/first-post/index.md`).
|
This one needs to be run from one step above the site root (`../site/` in the example above), and it gets passed the relative path to a post (`site/content/posts/first-post.md`). From there, it will create a new folder with the same name (`site/content/posts/first-post/`) and move the post into there while renaming it to `index.md` (`site/content/posts/first-post/index.md`).
|
||||||
|
|
||||||
It then looks through the newly-relocated post to find all the image embeds. It moves the image files into the post directory, and then updates the post to point to the new image locations.
|
It then looks through the newly-relocated post to find all the image embeds. It moves the image files into the post directory, and then updates the post to point to the new image locations.
|
||||||
|
|
||||||
Next it updates the links for any thumbnail images mentioned in the front matter post metadata. In most of my past posts, I reused an image already embedded in the post as the thumbnail so those files would already be moved by the time the script gets to that point. For the few exceptions, it also needs to move those image files over as well.
|
Next it updates the links for any thumbnail images mentioned in the front matter post metadata. In most of my past posts, I reused an image already embedded in the post as the thumbnail so those files would already be moved by the time the script gets to that point. For the few exceptions, it also needs to move those image files over as well.
|
||||||
|
|
||||||
Lastly, it changes the `usePageBundles` flag from `false` to `true` so that Hugo knows what we've done.
|
Lastly, it changes the `usePageBundles` flag from `false` to `true` so that Hugo knows what we've done.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Hasty script to convert a given standard Hugo post (where the post content and
|
# Hasty script to convert a given standard Hugo post (where the post content and
|
||||||
# images are stored separately) to a Page Bundle (where the content and images are
|
# images are stored separately) to a Page Bundle (where the content and images are
|
||||||
# stored together in the same directory).
|
# stored together in the same directory).
|
||||||
#
|
#
|
||||||
# Run this from the directory directly above the site root, and provide the relative
|
# Run this from the directory directly above the site root, and provide the relative
|
||||||
# path to the existing post that needs to be converted.
|
# path to the existing post that needs to be converted.
|
||||||
#
|
#
|
||||||
# Usage: ./convert-to-pagebundle.sh vpotato/content/posts/hello-hugo.md
|
# Usage: ./convert-to-pagebundle.sh vpotato/content/posts/hello-hugo.md
|
||||||
|
|
|
@ -12,24 +12,23 @@ title: Script to update image embed links in Markdown files
|
||||||
toc: false
|
toc: false
|
||||||
---
|
---
|
||||||
|
|
||||||
I'm preparing to migrate this blog thingy from Hashnode (which has been great!) to a [GitHub Pages site with Jekyll](https://docs.github.com/en/pages/setting-up-a-github-pages-site-with-jekyll/creating-a-github-pages-site-with-jekyll) so that I can write posts locally and then just do a `git push` to publish them - and get some more practice using `git` in the process. Of course, I've written some admittedly-great content here and I don't want to abandon that.
|
I'm preparing to migrate this blog thingy from Hashnode (which has been great!) to a [GitHub Pages site with Jekyll](https://docs.github.com/en/pages/setting-up-a-github-pages-site-with-jekyll/creating-a-github-pages-site-with-jekyll) so that I can write posts locally and then just do a `git push` to publish them - and get some more practice using `git` in the process. Of course, I've written some admittedly-great content here and I don't want to abandon that.
|
||||||
|
|
||||||
Hashnode helpfully automatically backs up my posts in Markdown format to a private GitHub repo so it was easy to clone those into a local working directory, but all the embedded images were still hosted on Hashnode:
|
Hashnode helpfully automatically backs up my posts in Markdown format to a private GitHub repo so it was easy to clone those into a local working directory, but all the embedded images were still hosted on Hashnode:
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
|
|
||||||
![Clever image title](https://cdn.hashnode.com/res/hashnode/image/upload/v1600098180227/lhTnVwCO3.png)
|
![Clever image title](https://cdn.hashnode.com/res/hashnode/image/upload/v1600098180227/lhTnVwCO3.png)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
I wanted to download those images to `./assets/images/posts-2020/` within my local Jekyll working directory, and then update the `*.md` files to reflect the correct local path... without doing it all manually. It took a bit of trial and error to get the regex working just right (and the result is neither pretty nor elegant), but here's what I came up with:
|
I wanted to download those images to `./assets/images/posts-2020/` within my local Jekyll working directory, and then update the `*.md` files to reflect the correct local path... without doing it all manually. It took a bit of trial and error to get the regex working just right (and the result is neither pretty nor elegant), but here's what I came up with:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Hasty script to process a blog post markdown file, capture the URL for embedded images,
|
# Hasty script to process a blog post markdown file, capture the URL for embedded images,
|
||||||
# download the image locally, and modify the markdown file with the relative image path.
|
# download the image locally, and modify the markdown file with the relative image path.
|
||||||
#
|
#
|
||||||
# Run it from the top level of a Jekyll blog directory for best results, and pass the
|
# Run it from the top level of a Jekyll blog directory for best results, and pass the
|
||||||
# filename of the blog post you'd like to process.
|
# filename of the blog post you'd like to process.
|
||||||
#
|
#
|
||||||
# Ex: ./imageMigration.sh 2021-07-19-Bulk-migrating-images-in-a-blog-post.md
|
# Ex: ./imageMigration.sh 2021-07-19-Bulk-migrating-images-in-a-blog-post.md
|
||||||
|
@ -49,16 +48,14 @@ done
|
||||||
|
|
||||||
I could then run that against all of the Markdown posts under `./_posts/` with:
|
I could then run that against all of the Markdown posts under `./_posts/` with:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
for post in $(ls _posts/); do ~/scripts/imageMigration.sh $post; done
|
for post in $(ls _posts/); do ~/scripts/imageMigration.sh $post; done # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And the image embeds in the local copy of my posts now all look like this:
|
And the image embeds in the local copy of my posts now all look like this:
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
|
|
||||||
![Clever image title](lhTnVwCO3.png)
|
![Clever image title](lhTnVwCO3.png)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Brilliant!
|
Brilliant!
|
|
@ -54,8 +54,8 @@ The first step in getting up and running with Tailscale is to sign up at [https:
|
||||||
|
|
||||||
Once you have a Tailscale account, you're ready to install the Tailscale client. The [download page](https://tailscale.com/download) outlines how to install it on various platforms, and also provides a handy-dandy one-liner to install it on Linux:
|
Once you have a Tailscale account, you're ready to install the Tailscale client. The [download page](https://tailscale.com/download) outlines how to install it on various platforms, and also provides a handy-dandy one-liner to install it on Linux:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl -fsSL https://tailscale.com/install.sh | sh
|
curl -fsSL https://tailscale.com/install.sh | sh # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
After the install completes, it will tell you exactly what you need to do next:
|
After the install completes, it will tell you exactly what you need to do next:
|
||||||
|
@ -71,9 +71,9 @@ There are also Tailscale apps available for [iOS](https://tailscale.com/download
|
||||||
#### Basic `tailscale up`
|
#### Basic `tailscale up`
|
||||||
Running `sudo tailscale up` then reveals the next step:
|
Running `sudo tailscale up` then reveals the next step:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ sudo tailscale up
|
sudo tailscale up # [tl! .cmd]
|
||||||
|
# [tl! .nocopy:3]
|
||||||
To authenticate, visit:
|
To authenticate, visit:
|
||||||
|
|
||||||
https://login.tailscale.com/a/1872939939df
|
https://login.tailscale.com/a/1872939939df
|
||||||
|
@ -83,8 +83,8 @@ I can copy that address into a browser and I'll get prompted to log in to my Tai
|
||||||
|
|
||||||
That was pretty easy, right? But what about if I can't easily get to a web browser from the terminal session on a certain device? No worries, `tailscale up` has a flag for that:
|
That was pretty easy, right? But what about if I can't easily get to a web browser from the terminal session on a certain device? No worries, `tailscale up` has a flag for that:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --qr
|
sudo tailscale up --qr # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
That will convert the URL to a QR code that I can scan from my phone.
|
That will convert the URL to a QR code that I can scan from my phone.
|
||||||
|
@ -93,44 +93,44 @@ That will convert the URL to a QR code that I can scan from my phone.
|
||||||
There are a few additional flags that can be useful under certain situations:
|
There are a few additional flags that can be useful under certain situations:
|
||||||
|
|
||||||
- `--advertise-exit-node` to tell the tailnet that this could be used as an exit node for internet traffic
|
- `--advertise-exit-node` to tell the tailnet that this could be used as an exit node for internet traffic
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --advertise-exit-node
|
sudo tailscale up --advertise-exit-node # [tl! .cmd]
|
||||||
```
|
```
|
||||||
- `--advertise-routes` to let the node perform subnet routing functions to provide connectivity to specified local subnets
|
- `--advertise-routes` to let the node perform subnet routing functions to provide connectivity to specified local subnets
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --advertise-routes "192.168.1.0/24,172.16.0.0/16"
|
sudo tailscale up --advertise-routes "192.168.1.0/24,172.16.0.0/16" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
- `--advertise-tags`[^tags] to associate the node with certain tags for ACL purposes (like `tag:home` to identify stuff in my home network and `tag:cloud` to label external cloud-hosted resources)
|
- `--advertise-tags`[^tags] to associate the node with certain tags for ACL purposes (like `tag:home` to identify stuff in my home network and `tag:cloud` to label external cloud-hosted resources)
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --advertise-tags "tag:cloud"
|
sudo tailscale up --advertise-tags "tag:cloud" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
- `--hostname` to manually specific a hostname to use within the tailnet
|
- `--hostname` to manually specific a hostname to use within the tailnet
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --hostname "tailnode"
|
sudo tailscale up --hostname "tailnode" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
- `--shields-up` to block incoming traffic
|
- `--shields-up` to block incoming traffic
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --shields-up
|
sudo tailscale up --shields-up # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
These flags can also be combined with each other:
|
These flags can also be combined with each other:
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --hostname "tailnode" --advertise-exit-node --qr
|
sudo tailscale up --hostname "tailnode" --advertise-exit-node --qr # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
[^tags]: Before being able to assign tags at the command line, you must first define tag owners who can manage the tag. On a personal account, you've only got one user to worry with but you still have to set this up first. I'll go over this in a bit but here's [the documentation](https://tailscale.com/kb/1068/acl-tags/#defining-a-tag) if you want to skip ahead.
|
[^tags]: Before being able to assign tags at the command line, you must first define tag owners who can manage the tag. On a personal account, you've only got one user to worry with but you still have to set this up first. I'll go over this in a bit but here's [the documentation](https://tailscale.com/kb/1068/acl-tags/#defining-a-tag) if you want to skip ahead.
|
||||||
|
|
||||||
#### Sidebar: Tailscale on VyOS
|
#### Sidebar: Tailscale on VyOS
|
||||||
Getting Tailscale on [my VyOS virtual router](/vmware-home-lab-on-intel-nuc-9/#vyos) was unfortunately a little more involved than [leveraging the built-in WireGuard capability](/cloud-based-wireguard-vpn-remote-homelab-access/#configure-vyos-router-as-wireguard-peer). I found the [vyos-tailscale](https://github.com/DMarby/vyos-tailscale) project to help with building a customized VyOS installation ISO with the `tailscaled` daemon added in. I was then able to copy the ISO over to my VyOS instance and install it as if it were a [standard upgrade](https://docs.vyos.io/en/latest/installation/update.html). I could then bring up the interface, advertise my home networks, and make it available as an exit node with:
|
Getting Tailscale on [my VyOS virtual router](/vmware-home-lab-on-intel-nuc-9/#vyos) was unfortunately a little more involved than [leveraging the built-in WireGuard capability](/cloud-based-wireguard-vpn-remote-homelab-access/#configure-vyos-router-as-wireguard-peer). I found the [vyos-tailscale](https://github.com/DMarby/vyos-tailscale) project to help with building a customized VyOS installation ISO with the `tailscaled` daemon added in. I was then able to copy the ISO over to my VyOS instance and install it as if it were a [standard upgrade](https://docs.vyos.io/en/latest/installation/update.html). I could then bring up the interface, advertise my home networks, and make it available as an exit node with:
|
||||||
```bash
|
```shell
|
||||||
sudo tailscale up --advertise-exit-node --advertise-routes "192.168.1.0/24,172.16.0.0/16"
|
sudo tailscale up --advertise-exit-node --advertise-routes "192.168.1.0/24,172.16.0.0/16" # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Other `tailscale` commands
|
#### Other `tailscale` commands
|
||||||
Once there are a few members, I can use the `tailscale status` command to see a quick overview of the tailnet:
|
Once there are a few members, I can use the `tailscale status` command to see a quick overview of the tailnet:
|
||||||
```bash
|
```shell
|
||||||
❯ tailscale status
|
tailscale status # [tl! .cmd]
|
||||||
100.115.115.39 deb01 john@ linux -
|
100.115.115.39 deb01 john@ linux - # [tl! .nocopy:start]
|
||||||
100.118.115.69 ipam john@ linux -
|
100.118.115.69 ipam john@ linux -
|
||||||
100.116.90.109 johns-iphone john@ iOS -
|
100.116.90.109 johns-iphone john@ iOS -
|
||||||
100.116.31.85 matrix john@ linux -
|
100.116.31.85 matrix john@ linux -
|
||||||
|
@ -138,16 +138,16 @@ Once there are a few members, I can use the `tailscale status` command to see a
|
||||||
100.94.127.1 pixelbook john@ android -
|
100.94.127.1 pixelbook john@ android -
|
||||||
100.75.110.50 snikket john@ linux -
|
100.75.110.50 snikket john@ linux -
|
||||||
100.96.24.81 vyos john@ linux -
|
100.96.24.81 vyos john@ linux -
|
||||||
100.124.116.125 win01 john@ windows -
|
100.124.116.125 win01 john@ windows - # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Without doing any other configuration beyond just installing Tailscale and connecting it to my account, I can now easily connect from any of these devices to any of the other devices using the listed Tailscale IP[^magicdns]. Entering `ssh 100.116.31.85` will connect me to my Matrix server.
|
Without doing any other configuration beyond just installing Tailscale and connecting it to my account, I can now easily connect from any of these devices to any of the other devices using the listed Tailscale IP[^magicdns]. Entering `ssh 100.116.31.85` will connect me to my Matrix server.
|
||||||
|
|
||||||
`tailscale ping` lets me check the latency between two Tailscale nodes at the Tailscale layer; the first couple of pings will likely be delivered through a nearby DERP server until the NAT traversal magic is able to kick in:
|
`tailscale ping` lets me check the latency between two Tailscale nodes at the Tailscale layer; the first couple of pings will likely be delivered through a nearby DERP server until the NAT traversal magic is able to kick in:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ tailscale ping snikket
|
tailscale ping snikket # [tl! .cmd]
|
||||||
pong from snikket (100.75.110.50) via DERP(nyc) in 34ms
|
pong from snikket (100.75.110.50) via DERP(nyc) in 34ms # [tl! .nocopy:3]
|
||||||
pong from snikket (100.75.110.50) via DERP(nyc) in 35ms
|
pong from snikket (100.75.110.50) via DERP(nyc) in 35ms
|
||||||
pong from snikket (100.75.110.50) via DERP(nyc) in 35ms
|
pong from snikket (100.75.110.50) via DERP(nyc) in 35ms
|
||||||
pong from snikket (100.75.110.50) via [PUBLIC_IP]:41641 in 23ms
|
pong from snikket (100.75.110.50) via [PUBLIC_IP]:41641 in 23ms
|
||||||
|
@ -155,9 +155,9 @@ pong from snikket (100.75.110.50) via [PUBLIC_IP]:41641 in 23ms
|
||||||
|
|
||||||
The `tailscale netcheck` command will give me some details about my local Tailscale node, like whether it's able to pass UDP traffic, which DERP server is the closest, and the latency to all Tailscale DERP servers:
|
The `tailscale netcheck` command will give me some details about my local Tailscale node, like whether it's able to pass UDP traffic, which DERP server is the closest, and the latency to all Tailscale DERP servers:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ tailscale netcheck
|
tailscale netcheck # [tl! .cmd]
|
||||||
|
# [tl! .nocopy:start]
|
||||||
Report:
|
Report:
|
||||||
* UDP: true
|
* UDP: true
|
||||||
* IPv4: yes, [LOCAL_PUBLIC_IP]:52661
|
* IPv4: yes, [LOCAL_PUBLIC_IP]:52661
|
||||||
|
@ -178,7 +178,7 @@ Report:
|
||||||
- tok: 154.9ms (Tokyo)
|
- tok: 154.9ms (Tokyo)
|
||||||
- syd: 215.3ms (Sydney)
|
- syd: 215.3ms (Sydney)
|
||||||
- sin: 243.7ms (Singapore)
|
- sin: 243.7ms (Singapore)
|
||||||
- blr: 244.6ms (Bangalore)
|
- blr: 244.6ms (Bangalore) # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
[^magicdns]: I could also connect using the Tailscale hostname, if [MagicDNS](https://tailscale.com/kb/1081/magicdns/) is enabled - but I'm getting ahead of myself.
|
[^magicdns]: I could also connect using the Tailscale hostname, if [MagicDNS](https://tailscale.com/kb/1081/magicdns/) is enabled - but I'm getting ahead of myself.
|
||||||
|
@ -245,6 +245,7 @@ This ACL file uses a format called [HuJSON](https://github.com/tailscale/hujson)
|
||||||
I'm going to start by creating a group called `admins` and add myself to that group. This isn't strictly necessary since I am the only user in the organization, but I feel like it's a nice practice anyway. Then I'll add the `tagOwners` section to map each tag to its owner, the new group I just created:
|
I'm going to start by creating a group called `admins` and add myself to that group. This isn't strictly necessary since I am the only user in the organization, but I feel like it's a nice practice anyway. Then I'll add the `tagOwners` section to map each tag to its owner, the new group I just created:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"groups": {
|
"groups": {
|
||||||
"group:admins": ["john@example.com"],
|
"group:admins": ["john@example.com"],
|
||||||
|
@ -277,6 +278,7 @@ Each ACL rule consists of four named parts:
|
||||||
|
|
||||||
So I'll add this to the top of my policy file:
|
So I'll add this to the top of my policy file:
|
||||||
```json
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"acls": [
|
"acls": [
|
||||||
{
|
{
|
||||||
|
@ -306,6 +308,7 @@ Earlier I configured Tailscale to force all nodes to use my home DNS server for
|
||||||
|
|
||||||
Option 2 sounds better to me so that's what I'm going to do. Instead of putting an IP address directly into the ACL rule I'd rather use a hostname, and unfortunately the Tailscale host names aren't available within ACL rule declarations. But I can define a host alias in the policy to map a friendly name to the IP:
|
Option 2 sounds better to me so that's what I'm going to do. Instead of putting an IP address directly into the ACL rule I'd rather use a hostname, and unfortunately the Tailscale host names aren't available within ACL rule declarations. But I can define a host alias in the policy to map a friendly name to the IP:
|
||||||
```json
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"hosts": {
|
"hosts": {
|
||||||
"win01": "100.124.116.125"
|
"win01": "100.124.116.125"
|
||||||
|
@ -315,6 +318,7 @@ Option 2 sounds better to me so that's what I'm going to do. Instead of putting
|
||||||
|
|
||||||
And I can then create a new rule for `"users": ["tag:cloud"]` to add an exception for `win01:53`:
|
And I can then create a new rule for `"users": ["tag:cloud"]` to add an exception for `win01:53`:
|
||||||
```json
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"acls": [
|
"acls": [
|
||||||
{
|
{
|
||||||
|
@ -332,6 +336,7 @@ And I can then create a new rule for `"users": ["tag:cloud"]` to add an exceptio
|
||||||
And that gets DNS working again for my cloud servers while still serving the results from my NextDNS configuration. Here's the complete policy configuration:
|
And that gets DNS working again for my cloud servers while still serving the results from my NextDNS configuration. Here's the complete policy configuration:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
{
|
{
|
||||||
"acls": [
|
"acls": [
|
||||||
{
|
{
|
||||||
|
|
|
@ -38,42 +38,43 @@ You're ready to roll once the Terminal opens and gives you a prompt:
|
||||||
|
|
||||||
Your first action should be to go ahead and install any patches:
|
Your first action should be to go ahead and install any patches:
|
||||||
```shell
|
```shell
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
sudo apt upgrade
|
sudo apt upgrade
|
||||||
```
|
```
|
||||||
|
|
||||||
### Zsh, Oh My Zsh, and powerlevel10k theme
|
### Zsh, Oh My Zsh, and powerlevel10k theme
|
||||||
I've been really getting into this shell setup recently so let's go on and make things comfortable before we move on too much further. Getting `zsh` is straight forward:
|
I've been really getting into this shell setup recently so let's go on and make things comfortable before we move on too much further. Getting `zsh` is straight forward:
|
||||||
```shell
|
```shell
|
||||||
sudo apt install zsh
|
sudo apt install zsh # [tl! .cmd]
|
||||||
```
|
```
|
||||||
Go ahead and launch `zsh` (by typing '`zsh`') and go through the initial setup wizard to configure preferences for things like history, completion, and other settings. I leave history on the defaults, enable the default completion options, switch the command-line editor to `vi`-style, and enable both `autocd` and `appendhistory`. Once you're back at the (new) `penguin%` prompt we can move on to installing the [Oh My Zsh plugin framework](https://github.com/ohmyzsh/ohmyzsh).
|
Go ahead and launch `zsh` (by typing '`zsh`') and go through the initial setup wizard to configure preferences for things like history, completion, and other settings. I leave history on the defaults, enable the default completion options, switch the command-line editor to `vi`-style, and enable both `autocd` and `appendhistory`. Once you're back at the (new) `penguin%` prompt we can move on to installing the [Oh My Zsh plugin framework](https://github.com/ohmyzsh/ohmyzsh).
|
||||||
|
|
||||||
Just grab the installer script like so:
|
Just grab the installer script like so:
|
||||||
```shell
|
```shell
|
||||||
wget https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh
|
wget https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh # [tl! .cmd]
|
||||||
```
|
```
|
||||||
Review it if you'd like (and you should! *Always* review code before running it!!), and then execute it:
|
Review it if you'd like (and you should! *Always* review code before running it!!), and then execute it:
|
||||||
```shell
|
```shell
|
||||||
sh install.sh
|
sh install.sh # [tl! .cmd]
|
||||||
```
|
```
|
||||||
When asked if you'd like to change your default shell to `zsh` now, **say no**. This is because it will prompt for your password, but you probably don't have a password set on your brand-new Linux (Beta) account and that just makes things complicated. We'll clear this up later, but for now just check out that slick new prompt:
|
When asked if you'd like to change your default shell to `zsh` now, **say no**. This is because it will prompt for your password, but you probably don't have a password set on your brand-new Linux (Beta) account and that just makes things complicated. We'll clear this up later, but for now just check out that slick new prompt:
|
||||||
![Oh my!](8q-WT0AyC.png)
|
![Oh my!](8q-WT0AyC.png)
|
||||||
|
|
||||||
Oh My Zsh is pretty handy because you can easily enable [additional plugins](https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins) to make your prompt behave exactly the way you want it to. Let's spruce it up even more with the [powerlevel10k theme](https://github.com/romkatv/powerlevel10k)!
|
Oh My Zsh is pretty handy because you can easily enable [additional plugins](https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins) to make your prompt behave exactly the way you want it to. Let's spruce it up even more with the [powerlevel10k theme](https://github.com/romkatv/powerlevel10k)!
|
||||||
```shell
|
```shell
|
||||||
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ${ZSH_CUSTOM:-$HOME/.oh-my-zsh/custom}/themes/powerlevel10k
|
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git \ # [tl! .cmd]
|
||||||
|
${ZSH_CUSTOM:-$HOME/.oh-my-zsh/custom}/themes/powerlevel10k
|
||||||
```
|
```
|
||||||
Now we just need to edit `~/.zshrc` to point to the new theme:
|
Now we just need to edit `~/.zshrc` to point to the new theme:
|
||||||
```shell
|
```shell
|
||||||
sed -i s/^ZSH_THEME=.\*$/ZSH_THEME='"powerlevel10k\/powerlevel10k"'/ ~/.zshrc
|
sed -i s/^ZSH_THEME=.\*$/ZSH_THEME='"powerlevel10k\/powerlevel10k"'/ ~/.zshrc # [tl! .cmd]
|
||||||
```
|
```
|
||||||
We'll need to launch another instance of `zsh` for the theme change to take effect so first lets go ahead and manually set `zsh` as our default shell. We can use `sudo` to get around the whole "don't have a password set" inconvenience:
|
We'll need to launch another instance of `zsh` for the theme change to take effect so first lets go ahead and manually set `zsh` as our default shell. We can use `sudo` to get around the whole "don't have a password set" inconvenience:
|
||||||
```shell
|
```shell
|
||||||
sudo chsh -s /bin/zsh [username]
|
sudo chsh -s /bin/zsh [username] # [tl! .cmd]
|
||||||
```
|
```
|
||||||
Now close out the terminal and open it again, and you should be met by the powerlevel10k configurator which will walk you through getting things set up:
|
Now close out the terminal and open it again, and you should be met by the powerlevel10k configurator which will walk you through getting things set up:
|
||||||
![pwerlevel10k configurator](K1ScSuWcg.png)
|
![powerlevel10k configurator](K1ScSuWcg.png)
|
||||||
|
|
||||||
This theme is crazy-configurable, but fortunately the configurator wizard does a great job of helping you choose the options that work best for you.
|
This theme is crazy-configurable, but fortunately the configurator wizard does a great job of helping you choose the options that work best for you.
|
||||||
I pick the Classic prompt style, Unicode character set, Dark prompt color, 24-hour time, Angled separators, Sharp prompt heads, Flat prompt tails, 2-line prompt height, Dotted prompt connection, Right prompt frame, Sparse prompt spacing, Fluent prompt flow, Enabled transient prompt, Verbose instant prompt, and (finally) Yes to apply the changes.
|
I pick the Classic prompt style, Unicode character set, Dark prompt color, 24-hour time, Angled separators, Sharp prompt heads, Flat prompt tails, 2-line prompt height, Dotted prompt connection, Right prompt frame, Sparse prompt spacing, Fluent prompt flow, Enabled transient prompt, Verbose instant prompt, and (finally) Yes to apply the changes.
|
||||||
|
@ -83,7 +84,7 @@ Looking good!
|
||||||
### Visual Studio Code
|
### Visual Studio Code
|
||||||
I'll need to do some light development work so VS Code is next on the hit list. You can grab the installer [here](https://code.visualstudio.com/Download#) or just copy/paste the following to stay in the Terminal. Definitely be sure to get the arm64 version!
|
I'll need to do some light development work so VS Code is next on the hit list. You can grab the installer [here](https://code.visualstudio.com/Download#) or just copy/paste the following to stay in the Terminal. Definitely be sure to get the arm64 version!
|
||||||
```shell
|
```shell
|
||||||
curl -L https://aka.ms/linux-arm64-deb > code_arm64.deb
|
curl -L https://aka.ms/linux-arm64-deb > code_arm64.deb # [tl! .cmd:1]
|
||||||
sudo apt install ./code_arm64.deb
|
sudo apt install ./code_arm64.deb
|
||||||
```
|
```
|
||||||
VS Code should automatically appear in the Chromebook's Launcher, or you can use it to open a file directly with `code [filename]`:
|
VS Code should automatically appear in the Chromebook's Launcher, or you can use it to open a file directly with `code [filename]`:
|
||||||
|
@ -105,7 +106,7 @@ I'm working on setting up a [VMware homelab on an Intel NUC 9](https://twitter.c
|
||||||
|
|
||||||
PowerShell for ARM is still in an early stage so while [it is supported](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7.2#support-for-arm-processors) it must be installed manually. Microsoft has instructions for installing PowerShell from binary archives [here](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7.2#linux), and I grabbed the latest `-linux-arm64.tar.gz` release I could find [here](https://github.com/PowerShell/PowerShell/releases).
|
PowerShell for ARM is still in an early stage so while [it is supported](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7.2#support-for-arm-processors) it must be installed manually. Microsoft has instructions for installing PowerShell from binary archives [here](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7.2#linux), and I grabbed the latest `-linux-arm64.tar.gz` release I could find [here](https://github.com/PowerShell/PowerShell/releases).
|
||||||
```shell
|
```shell
|
||||||
curl -L -o /tmp/powershell.tar.gz https://github.com/PowerShell/PowerShell/releases/download/v7.2.0-preview.5/powershell-7.2.0-preview.5-linux-arm64.tar.gz
|
curl -L -o /tmp/powershell.tar.gz https://github.com/PowerShell/PowerShell/releases/download/v7.2.0-preview.5/powershell-7.2.0-preview.5-linux-arm64.tar.gz # [tl! .cmd:4]
|
||||||
sudo mkdir -p /opt/microsoft/powershell/7
|
sudo mkdir -p /opt/microsoft/powershell/7
|
||||||
sudo tar zxf /tmp/powershell.tar.gz -C /opt/microsoft/powershell/7
|
sudo tar zxf /tmp/powershell.tar.gz -C /opt/microsoft/powershell/7
|
||||||
sudo chmod +x /opt/microsoft/powershell/7/pwsh
|
sudo chmod +x /opt/microsoft/powershell/7/pwsh
|
||||||
|
@ -125,7 +126,7 @@ The Linux (Beta) environment consists of a hardened virtual machine (named `term
|
||||||
|
|
||||||
The docker installation has a few prerequisites:
|
The docker installation has a few prerequisites:
|
||||||
```shell
|
```shell
|
||||||
sudo apt install \
|
sudo apt install \ # [tl! .cmd]
|
||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
|
@ -134,18 +135,18 @@ sudo apt install \
|
||||||
```
|
```
|
||||||
Then we need to grab the Docker repo key:
|
Then we need to grab the Docker repo key:
|
||||||
```shell
|
```shell
|
||||||
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
|
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - # [tl! .cmd]
|
||||||
```
|
```
|
||||||
And then we can add the repo:
|
And then we can add the repo:
|
||||||
```shell
|
```shell
|
||||||
sudo add-apt-repository \
|
sudo add-apt-repository \ # [tl! .cmd]
|
||||||
"deb [arch=arm64] https://download.docker.com/linux/debian \
|
"deb [arch=arm64] https://download.docker.com/linux/debian \
|
||||||
$(lsb_release -cs) \
|
$(lsb_release -cs) \
|
||||||
stable"
|
stable"
|
||||||
```
|
```
|
||||||
And finally update the package cache and install `docker` and its friends:
|
And finally update the package cache and install `docker` and its friends:
|
||||||
```shell
|
```shell
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd:1]
|
||||||
sudo apt install docker-ce docker-ce-cli containerd.io
|
sudo apt install docker-ce docker-ce-cli containerd.io
|
||||||
```
|
```
|
||||||
![I put a container in your container](k2uiYi5e8.png)
|
![I put a container in your container](k2uiYi5e8.png)
|
||||||
|
@ -164,13 +165,13 @@ I came across [a Reddit post](https://www.reddit.com/r/Crostini/comments/jnbqv3/
|
||||||
|
|
||||||
The key is to grab the appropriate version of [conda Miniforge](https://github.com/conda-forge/miniforge), make it executable, and run the installer:
|
The key is to grab the appropriate version of [conda Miniforge](https://github.com/conda-forge/miniforge), make it executable, and run the installer:
|
||||||
```shell
|
```shell
|
||||||
wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-aarch64.sh
|
wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-aarch64.sh # [tl! .cmd:2]
|
||||||
chmod +x Miniforge3-Linux-aarch64.sh
|
chmod +x Miniforge3-Linux-aarch64.sh
|
||||||
./Miniforge3-Linux-aarch64.sh
|
./Miniforge3-Linux-aarch64.sh
|
||||||
```
|
```
|
||||||
Exit the terminal and relaunch it, and then install Jupyter:
|
Exit the terminal and relaunch it, and then install Jupyter:
|
||||||
```shell
|
```shell
|
||||||
conda install -c conda-forge notebook
|
conda install -c conda-forge notebook # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
You can then launch the notebook with `jupyter notebook` and it will automatically open up in a Chrome OS browser tab:
|
You can then launch the notebook with `jupyter notebook` and it will automatically open up in a Chrome OS browser tab:
|
||||||
|
|
|
@ -32,7 +32,10 @@ I shared a [few months back](/federated-matrix-server-synapse-on-oracle-clouds-f
|
||||||
I recently came across the [Snikket project](https://snikket.org/), which [aims](https://snikket.org/about/goals/) to make decentralized end-to-end encrypted personal messaging simple and accessible for *everyone*, with an emphasis on providing a consistent experience across the network. Snikket does this by maintaining a matched set of server and client[^2] software with feature and design parity, making it incredibly easy to deploy and manage the server, and simplifying user registration with invite links. In contrast to Matrix, Snikket does not operate an open server on which users can self-register but instead requires users to be invited to a hosted instance. The idea is that a server would be used by small groups of family and friends where every user knows (and trusts!) the server operator while also ensuring the complete decentralization of the network[^3].
|
I recently came across the [Snikket project](https://snikket.org/), which [aims](https://snikket.org/about/goals/) to make decentralized end-to-end encrypted personal messaging simple and accessible for *everyone*, with an emphasis on providing a consistent experience across the network. Snikket does this by maintaining a matched set of server and client[^2] software with feature and design parity, making it incredibly easy to deploy and manage the server, and simplifying user registration with invite links. In contrast to Matrix, Snikket does not operate an open server on which users can self-register but instead requires users to be invited to a hosted instance. The idea is that a server would be used by small groups of family and friends where every user knows (and trusts!) the server operator while also ensuring the complete decentralization of the network[^3].
|
||||||
|
|
||||||
How simple is the server install?
|
How simple is the server install?
|
||||||
{{< tweet user="johndotbowdre" id="1461356940466933768" >}}
|
> I spun up a quick @snikket_im XMPP server last night to check out the project - and I do mean QUICK. It took me longer to register a new domain than to deploy the server on GCP and create my first account through the client.
|
||||||
|
>
|
||||||
|
> — John (@johndotbowdre) November 18, 2021
|
||||||
|
|
||||||
Seriously, their [4-step quick-start guide](https://snikket.org/service/quickstart/) is so good that I didn't feel the need to do a blog post about my experience. I've now been casually using Snikket for a bit over month and remain very impressed both by the software and the project itself, and have even deployed a new Snikket instance for my family to use. My parents were actually able to join the chat without any issues, which is a testament to how easy it is from a user perspective too.
|
Seriously, their [4-step quick-start guide](https://snikket.org/service/quickstart/) is so good that I didn't feel the need to do a blog post about my experience. I've now been casually using Snikket for a bit over month and remain very impressed both by the software and the project itself, and have even deployed a new Snikket instance for my family to use. My parents were actually able to join the chat without any issues, which is a testament to how easy it is from a user perspective too.
|
||||||
|
|
||||||
A few days ago I migrated my original Snikket instance from Google Cloud (GCP) to the same Oracle Cloud Infrastructure (OCI) virtual server that's hosting my Matrix homeserver so I thought I might share some notes first on the installation process. At the end, I'll share the tweaks which were needed to get Snikket to run happily alongside Matrix.
|
A few days ago I migrated my original Snikket instance from Google Cloud (GCP) to the same Oracle Cloud Infrastructure (OCI) virtual server that's hosting my Matrix homeserver so I thought I might share some notes first on the installation process. At the end, I'll share the tweaks which were needed to get Snikket to run happily alongside Matrix.
|
||||||
|
@ -55,8 +58,8 @@ You can refer to my notes from last time for details on how I [created the Ubunt
|
||||||
| `60000-60100`[^4] | UDP | Audio/Video data proxy (TURN data) |
|
| `60000-60100`[^4] | UDP | Audio/Video data proxy (TURN data) |
|
||||||
|
|
||||||
As a gentle reminder, Oracle's `iptables` configuration inserts a `REJECT all` rule at the bottom of each chain. I needed to make sure that each of my `ALLOW` rules get inserted above that point. So I used `iptables -L INPUT --line-numbers` to identify which line held the `REJECT` rule, and then used `iptables -I INPUT [LINE_NUMBER] -m state --state NEW -p [PROTOCOL] --dport [PORT] -j ACCEPT` to insert the new rules above that point.
|
As a gentle reminder, Oracle's `iptables` configuration inserts a `REJECT all` rule at the bottom of each chain. I needed to make sure that each of my `ALLOW` rules get inserted above that point. So I used `iptables -L INPUT --line-numbers` to identify which line held the `REJECT` rule, and then used `iptables -I INPUT [LINE_NUMBER] -m state --state NEW -p [PROTOCOL] --dport [PORT] -j ACCEPT` to insert the new rules above that point.
|
||||||
```bash
|
```shell
|
||||||
sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 80 -j ACCEPT
|
sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 80 -j ACCEPT # [tl! .cmd:start]
|
||||||
sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 443 -j ACCEPT
|
sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 443 -j ACCEPT
|
||||||
sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dports 3478-3479 -j ACCEPT
|
sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dports 3478-3479 -j ACCEPT
|
||||||
sudo iptables -I INPUT 9 -m state --state NEW -p tcp -m multiport --dports 3478-3479 -j ACCEPT
|
sudo iptables -I INPUT 9 -m state --state NEW -p tcp -m multiport --dports 3478-3479 -j ACCEPT
|
||||||
|
@ -66,13 +69,13 @@ sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 5222 -j ACCEPT
|
||||||
sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 5269 -j ACCEPT
|
sudo iptables -I INPUT 9 -m state --state NEW -p tcp --dport 5269 -j ACCEPT
|
||||||
sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 3478,3479 -j ACCEPT
|
sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 3478,3479 -j ACCEPT
|
||||||
sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 5349,5350 -j ACCEPT
|
sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 5349,5350 -j ACCEPT
|
||||||
sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 60000:60100 -j ACCEPT
|
sudo iptables -I INPUT 9 -m state --state NEW -p udp -m multiport --dports 60000:60100 -j ACCEPT # [tl! .cmd:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Then to verify the rules are in the right order:
|
Then to verify the rules are in the right order:
|
||||||
```bash
|
```shell
|
||||||
$ sudo iptables -L INPUT --line-numbers -n
|
sudo iptables -L INPUT --line-numbers -n # [tl! .cmd]
|
||||||
Chain INPUT (policy ACCEPT)
|
Chain INPUT (policy ACCEPT) # [tl! .nocopy:start]
|
||||||
num target prot opt source destination
|
num target prot opt source destination
|
||||||
1 ts-input all -- 0.0.0.0/0 0.0.0.0/0
|
1 ts-input all -- 0.0.0.0/0 0.0.0.0/0
|
||||||
2 ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED
|
2 ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED
|
||||||
|
@ -89,13 +92,13 @@ num target prot opt source destination
|
||||||
13 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:5222
|
13 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:5222
|
||||||
14 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:5000
|
14 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:5000
|
||||||
15 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW multiport dports 3478,3479
|
15 ACCEPT tcp -- 0.0.0.0/0 0.0.0.0/0 state NEW multiport dports 3478,3479
|
||||||
16 REJECT all -- 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-prohibited
|
16 REJECT all -- 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-prohibited # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Before moving on, it's important to save them so the rules will persist across reboots!
|
Before moving on, it's important to save them so the rules will persist across reboots!
|
||||||
```bash
|
```shell
|
||||||
$ sudo netfilter-persistent save
|
sudo netfilter-persistent save # [tl! .cmd]
|
||||||
run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save
|
run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save # [tl! .nocopy:1]
|
||||||
run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save
|
run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -112,30 +115,30 @@ share.vpota.to 300 IN CNAME chat.vpota.to
|
||||||
### Install `docker` and `docker-compose`
|
### Install `docker` and `docker-compose`
|
||||||
Snikket is distributed as a set of docker containers which makes it super easy to get up and running on basically any Linux system. But, of course, you'll first need to [install `docker`](https://docs.docker.com/engine/install/ubuntu/)
|
Snikket is distributed as a set of docker containers which makes it super easy to get up and running on basically any Linux system. But, of course, you'll first need to [install `docker`](https://docs.docker.com/engine/install/ubuntu/)
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
# Update package index
|
# Update package index
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd]
|
||||||
# Install prereqs
|
# Install prereqs
|
||||||
sudo apt install ca-certificates curl gnupg lsb-release
|
sudo apt install ca-certificates curl gnupg lsb-release # [tl! .cmd]
|
||||||
# Add docker's GPG key
|
# Add docker's GPG key
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg # [tl! .cmd]
|
||||||
# Add the docker repo
|
# Add the docker repo
|
||||||
echo \
|
echo \ # [tl! .cmd]
|
||||||
"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
|
"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
|
||||||
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
# Refresh the package index with the new repo added
|
# Refresh the package index with the new repo added
|
||||||
sudo apt update
|
sudo apt update # [tl! .cmd]
|
||||||
# Install docker
|
# Install docker
|
||||||
sudo apt install docker-ce docker-ce-cli containerd.io
|
sudo apt install docker-ce docker-ce-cli containerd.io # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And install `docker-compose` also to simplify the container management:
|
And install `docker-compose` also to simplify the container management:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
# Download the docker-compose binary
|
# Download the docker-compose binary
|
||||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose # [tl! .cmd]
|
||||||
# Make it executable
|
# Make it executable
|
||||||
sudo chmod +x /usr/local/bin/docker-compose
|
sudo chmod +x /usr/local/bin/docker-compose # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Now we're ready to...
|
Now we're ready to...
|
||||||
|
@ -143,21 +146,21 @@ Now we're ready to...
|
||||||
### Install Snikket
|
### Install Snikket
|
||||||
This starts with just making a place for Snikket to live:
|
This starts with just making a place for Snikket to live:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo mkdir /etc/snikket
|
sudo mkdir /etc/snikket # [tl! .cmd:1]
|
||||||
cd /etc/snikket
|
cd /etc/snikket
|
||||||
```
|
```
|
||||||
|
|
||||||
And then grabbing the Snikket `docker-compose` file:
|
And then grabbing the Snikket `docker-compose` file:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo curl -o docker-compose.yml https://snikket.org/service/resources/docker-compose.beta.yml
|
sudo curl -o docker-compose.yml https://snikket.org/service/resources/docker-compose.beta.yml # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And then creating a very minimal configuration file:
|
And then creating a very minimal configuration file:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo vi snikket.conf
|
sudo vi snikket.conf # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
A basic config only needs two parameters:
|
A basic config only needs two parameters:
|
||||||
|
@ -173,7 +176,8 @@ In my case, I'm going to add two additional parameters to restrict the UDP TURN
|
||||||
|
|
||||||
So here's my config:
|
So here's my config:
|
||||||
|
|
||||||
```
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
SNIKKET_DOMAIN=chat.vpota.to
|
SNIKKET_DOMAIN=chat.vpota.to
|
||||||
SNIKKET_ADMIN_EMAIL=ops@example.com
|
SNIKKET_ADMIN_EMAIL=ops@example.com
|
||||||
|
|
||||||
|
@ -185,8 +189,8 @@ SNIKKET_TWEAK_TURNSERVER_MAX_PORT=60100
|
||||||
### Start it up!
|
### Start it up!
|
||||||
With everything in place, I can start up the Snikket server:
|
With everything in place, I can start up the Snikket server:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo docker-compose up -d
|
sudo docker-compose up -d # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
This will take a moment or two to pull down all the required container images, start them, and automatically generate the SSL certificates. Very soon, though, I can point my browser to `https://chat.vpota.to` and see a lovely login page - complete with an automagically-valid-and-trusted certificate:
|
This will take a moment or two to pull down all the required container images, start them, and automatically generate the SSL certificates. Very soon, though, I can point my browser to `https://chat.vpota.to` and see a lovely login page - complete with an automagically-valid-and-trusted certificate:
|
||||||
|
@ -194,8 +198,8 @@ This will take a moment or two to pull down all the required container images, s
|
||||||
|
|
||||||
Of course, I don't yet have a way to log in, and like I mentioned earlier Snikket doesn't offer open user registration. Every user (even me, the admin!) has to be invited. Fortunately I can generate my first invite directly from the command line:
|
Of course, I don't yet have a way to log in, and like I mentioned earlier Snikket doesn't offer open user registration. Every user (even me, the admin!) has to be invited. Fortunately I can generate my first invite directly from the command line:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo docker exec snikket create-invite --admin --group default
|
sudo docker exec snikket create-invite --admin --group default # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
That command will return a customized invite link which I can copy and paste into my browser.
|
That command will return a customized invite link which I can copy and paste into my browser.
|
||||||
|
@ -248,33 +252,34 @@ One of the really cool things about Caddy is that it automatically generates SSL
|
||||||
|
|
||||||
Fortunately, the [Snikket reverse proxy documentation](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/reverse_proxy.md#basic) was recently updated with a sample config for making this happen. Matrix and Snikket really only overlap on ports `80` and `443` so those are the only ports I'll need to handle, which lets me go for the "Basic" configuration instead of the "Advanced" one. I can just adapt the sample config from the documentation and add that to my existing `/etc/caddy/Caddyfile` alongside the config for Matrix:
|
Fortunately, the [Snikket reverse proxy documentation](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/reverse_proxy.md#basic) was recently updated with a sample config for making this happen. Matrix and Snikket really only overlap on ports `80` and `443` so those are the only ports I'll need to handle, which lets me go for the "Basic" configuration instead of the "Advanced" one. I can just adapt the sample config from the documentation and add that to my existing `/etc/caddy/Caddyfile` alongside the config for Matrix:
|
||||||
|
|
||||||
```
|
```text
|
||||||
http://chat.vpota.to,
|
# torchlight! {"lineNumbers": true}
|
||||||
|
http://chat.vpota.to, # [tl! focus:start]
|
||||||
http://groups.chat.vpota.to,
|
http://groups.chat.vpota.to,
|
||||||
http://share.chat.vpota.to {
|
http://share.chat.vpota.to {
|
||||||
reverse_proxy localhost:5080
|
reverse_proxy localhost:5080
|
||||||
}
|
}
|
||||||
|
|
||||||
chat.vpota.to,
|
chat.vpota.to,
|
||||||
groups.chat.vpota.to,
|
groups.chat.vpota.to,
|
||||||
share.chat.vpota.to {
|
share.chat.vpota.to {
|
||||||
reverse_proxy https://localhost:5443 {
|
reverse_proxy https://localhost:5443 {
|
||||||
transport http {
|
transport http {
|
||||||
tls_insecure_skip_verify
|
tls_insecure_skip_verify
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
} # [tl! focus:end]
|
||||||
|
|
||||||
matrix.bowdre.net {
|
matrix.bowdre.net {
|
||||||
reverse_proxy /_matrix/* http://localhost:8008
|
reverse_proxy /_matrix/* http://localhost:8008
|
||||||
reverse_proxy /_synapse/client/* http://localhost:8008
|
reverse_proxy /_synapse/client/* http://localhost:8008
|
||||||
}
|
}
|
||||||
|
|
||||||
bowdre.net {
|
bowdre.net {
|
||||||
route {
|
route {
|
||||||
respond /.well-known/matrix/server `{"m.server": "matrix.bowdre.net:443"}`
|
respond /.well-known/matrix/server `{"m.server": "matrix.bowdre.net:443"}`
|
||||||
redir https://virtuallypotato.com
|
redir https://virtuallypotato.com
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -291,32 +296,32 @@ Since Snikket is completely containerized, moving between hosts is a simple matt
|
||||||
|
|
||||||
The Snikket team has actually put together a couple of scripts to assist with [backing up](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/backup.sh) and [restoring](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/restore.sh) an instance. I just adapted the last line of each to do what I needed:
|
The Snikket team has actually put together a couple of scripts to assist with [backing up](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/backup.sh) and [restoring](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/restore.sh) an instance. I just adapted the last line of each to do what I needed:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo docker run --rm --volumes-from=snikket \
|
sudo docker run --rm --volumes-from=snikket \ # [tl! .cmd]
|
||||||
-v "/home/john/snikket-backup/":/backup debian:buster-slim \
|
-v "/home/john/snikket-backup/":/backup debian:buster-slim \
|
||||||
tar czf /backup/snikket-"$(date +%F-%H%m)".tar.gz /snikket
|
tar czf /backup/snikket-"$(date +%F-%H%m)".tar.gz /snikket
|
||||||
```
|
```
|
||||||
|
|
||||||
That will drop a compressed backup of the `snikket_data` volume into the specified directory, `/home/john/snikket-backup/`. While I'm at it, I'll also go ahead and copy the `docker-compose.yml` and `snikket.conf` files from `/etc/snikket/`:
|
That will drop a compressed backup of the `snikket_data` volume into the specified directory, `/home/john/snikket-backup/`. While I'm at it, I'll also go ahead and copy the `docker-compose.yml` and `snikket.conf` files from `/etc/snikket/`:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
$ sudo cp -a /etc/snikket/* /home/john/snikket-backup/
|
sudo cp -a /etc/snikket/* /home/john/snikket-backup/ # [tl! .cmd]
|
||||||
$ ls -l /home/john/snikket-backup/
|
ls -l /home/john/snikket-backup/ # [tl! .cmd]
|
||||||
total 1728
|
total 1728 # [tl! .nocopy:3]
|
||||||
-rw-r--r-- 1 root root 993 Dec 19 17:47 docker-compose.yml
|
-rw-r--r-- 1 root root 993 Dec 19 17:47 docker-compose.yml
|
||||||
-rw-r--r-- 1 root root 1761046 Dec 19 17:46 snikket-2021-12-19-1745.tar.gz
|
-rw-r--r-- 1 root root 1761046 Dec 19 17:46 snikket-2021-12-19-1745.tar.gz
|
||||||
-rw-r--r-- 1 root root 299 Dec 19 17:47 snikket.conf
|
-rw-r--r-- 1 root root 299 Dec 19 17:47 snikket.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
And I can then zip that up for easy transfer:
|
And I can then zip that up for easy transfer:
|
||||||
```bash
|
```shell
|
||||||
tar cvf /home/john/snikket-backup.tar.gz /home/john/snikket-backup/
|
tar cvf /home/john/snikket-backup.tar.gz /home/john/snikket-backup/ # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
This would be a great time to go ahead and stop this original Snikket instance. After all, nothing that happens after the backup was exported is going to carry over anyway.
|
This would be a great time to go ahead and stop this original Snikket instance. After all, nothing that happens after the backup was exported is going to carry over anyway.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo docker-compose down
|
sudo docker-compose down # [tl! .cmd]
|
||||||
```
|
```
|
||||||
{{% notice tip "Update DNS" %}}
|
{{% notice tip "Update DNS" %}}
|
||||||
This is also a great time to update the `A` record for `chat.vpota.to` so that it points to the new server. It will need a little bit of time for the change to trickle out, and the updated record really needs to be in place before starting Snikket on the new server so that there aren't any certificate problems.
|
This is also a great time to update the `A` record for `chat.vpota.to` so that it points to the new server. It will need a little bit of time for the change to trickle out, and the updated record really needs to be in place before starting Snikket on the new server so that there aren't any certificate problems.
|
||||||
|
@ -325,18 +330,18 @@ This is also a great time to update the `A` record for `chat.vpota.to` so that i
|
||||||
|
|
||||||
Now I just need to transfer the archive from one server to the other. I've got [Tailscale](https://tailscale.com/)[^11] running on my various cloud servers so that they can talk to each other through a secure WireGuard tunnel (remember [WireGuard](/cloud-based-wireguard-vpn-remote-homelab-access/)?) without having to open any firewall ports between them, and that means I can just use `scp` to transfer the file without any fuss. I can even leverage Tailscale's [Magic DNS](https://tailscale.com/kb/1081/magicdns/) feature to avoid worrying with any IPs, just the hostname registered in Tailscale (`chat-oci`):
|
Now I just need to transfer the archive from one server to the other. I've got [Tailscale](https://tailscale.com/)[^11] running on my various cloud servers so that they can talk to each other through a secure WireGuard tunnel (remember [WireGuard](/cloud-based-wireguard-vpn-remote-homelab-access/)?) without having to open any firewall ports between them, and that means I can just use `scp` to transfer the file without any fuss. I can even leverage Tailscale's [Magic DNS](https://tailscale.com/kb/1081/magicdns/) feature to avoid worrying with any IPs, just the hostname registered in Tailscale (`chat-oci`):
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
scp /home/john/snikket-backup.tar.gz chat-oci:/home/john/
|
scp /home/john/snikket-backup.tar.gz chat-oci:/home/john/ # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, I SSH in to the new server and unzip the archive:
|
Next, I SSH in to the new server and unzip the archive:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
$ ssh snikket-oci-server
|
ssh snikket-oci-server # [tl! .cmd:3]
|
||||||
$ tar xf snikket-backup.tar.gz
|
tar xf snikket-backup.tar.gz
|
||||||
$ cd snikket-backup
|
cd snikket-backup
|
||||||
$ ls -l
|
ls -l
|
||||||
total 1728
|
total 1728 # [tl! .nocopy:3]
|
||||||
-rw-r--r-- 1 root root 993 Dec 19 17:47 docker-compose.yml
|
-rw-r--r-- 1 root root 993 Dec 19 17:47 docker-compose.yml
|
||||||
-rw-r--r-- 1 root root 1761046 Dec 19 17:46 snikket-2021-12-19-1745.tar.gz
|
-rw-r--r-- 1 root root 1761046 Dec 19 17:46 snikket-2021-12-19-1745.tar.gz
|
||||||
-rw-r--r-- 1 root root 299 Dec 19 17:47 snikket.conf
|
-rw-r--r-- 1 root root 299 Dec 19 17:47 snikket.conf
|
||||||
|
@ -344,8 +349,8 @@ total 1728
|
||||||
|
|
||||||
Before I can restore the content of the `snikket-data` volume on the new server, I'll need to first go ahead and set up Snikket again. I've already got `docker` and `docker-compose` installed from when I installed Matrix so I'll skip to creating the Snikket directory and copying in the `docker-compose.yml` and `snikket.conf` files.
|
Before I can restore the content of the `snikket-data` volume on the new server, I'll need to first go ahead and set up Snikket again. I've already got `docker` and `docker-compose` installed from when I installed Matrix so I'll skip to creating the Snikket directory and copying in the `docker-compose.yml` and `snikket.conf` files.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo mkdir /etc/snikket
|
sudo mkdir /etc/snikket # [tl! .cmd:3]
|
||||||
sudo cp docker-compose.yml /etc/snikket/
|
sudo cp docker-compose.yml /etc/snikket/
|
||||||
sudo cp snikket.conf /etc/snikket/
|
sudo cp snikket.conf /etc/snikket/
|
||||||
cd /etc/snikket
|
cd /etc/snikket
|
||||||
|
@ -353,7 +358,8 @@ cd /etc/snikket
|
||||||
|
|
||||||
Before I fire this up on the new host, I need to edit the `snikket.conf` to tell Snikket to use those different ports defined in the reverse proxy configuration using [a couple of `SNIKKET_TWEAK_*` lines](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/reverse_proxy.md#snikket):
|
Before I fire this up on the new host, I need to edit the `snikket.conf` to tell Snikket to use those different ports defined in the reverse proxy configuration using [a couple of `SNIKKET_TWEAK_*` lines](https://github.com/snikket-im/snikket-server/blob/master/docs/advanced/reverse_proxy.md#snikket):
|
||||||
|
|
||||||
```
|
```ini
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
SNIKKET_DOMAIN=chat.vpota.to
|
SNIKKET_DOMAIN=chat.vpota.to
|
||||||
SNIKKET_ADMIN_EMAIL=ops@example.com
|
SNIKKET_ADMIN_EMAIL=ops@example.com
|
||||||
|
|
||||||
|
@ -364,16 +370,16 @@ SNIKKET_TWEAK_TURNSERVER_MAX_PORT=60100
|
||||||
```
|
```
|
||||||
|
|
||||||
Alright, let's start up the Snikket server:
|
Alright, let's start up the Snikket server:
|
||||||
```bash
|
```shell
|
||||||
sudo docker-compose up -d
|
sudo docker-compose up -d # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
After a moment or two, I can point a browser to `https://chat.vpota.to` and see the login screen (with a valid SSL certificate!) but I won't actually be able to log in. As far as Snikket is concerned, this is a brand new setup.
|
After a moment or two, I can point a browser to `https://chat.vpota.to` and see the login screen (with a valid SSL certificate!) but I won't actually be able to log in. As far as Snikket is concerned, this is a brand new setup.
|
||||||
|
|
||||||
Now I can borrow the last line from the [`restore.sh` script](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/restore.sh) to bring in my data:
|
Now I can borrow the last line from the [`restore.sh` script](https://github.com/snikket-im/snikket-selfhosted/blob/main/scripts/restore.sh) to bring in my data:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo docker run --rm --volumes-from=snikket \
|
sudo docker run --rm --volumes-from=snikket \ # [tl! .cmd]
|
||||||
--mount type=bind,source="/home/john/snikket-backup/snikket-2021-12-19-1745.tar.gz",destination=/backup.tar.gz \
|
--mount type=bind,source="/home/john/snikket-backup/snikket-2021-12-19-1745.tar.gz",destination=/backup.tar.gz \
|
||||||
debian:buster-slim \
|
debian:buster-slim \
|
||||||
bash -c "rm -rf /snikket/*; tar xvf /backup.tar.gz -C /"
|
bash -c "rm -rf /snikket/*; tar xvf /backup.tar.gz -C /"
|
||||||
|
|
949
content/posts/spotlight-on-torchlight/index.md
Normal file
949
content/posts/spotlight-on-torchlight/index.md
Normal file
|
@ -0,0 +1,949 @@
|
||||||
|
---
|
||||||
|
title: "Spotlight on Torchlight"
|
||||||
|
date: 2023-11-09
|
||||||
|
lastmod: 2023-11-13
|
||||||
|
description: "Syntax highlighting powered by the Torchlight.dev API makes it easier to dress up code blocks. Here's an overview of what I did to replace this blog's built-in Hugo highlighter (Chroma) with Torchlight."
|
||||||
|
featured: false
|
||||||
|
toc: true
|
||||||
|
comment: true
|
||||||
|
series: Projects # Projects, Scripts
|
||||||
|
tags:
|
||||||
|
- javascript
|
||||||
|
- hugo
|
||||||
|
- meta
|
||||||
|
---
|
||||||
|
|
||||||
|
I've been futzing around a bit with how code blocks render on this blog. Hugo has a built-in, _really fast_, [syntax highlighter](https://gohugo.io/content-management/syntax-highlighting/) courtesy of [Chroma](https://github.com/alecthomas/chroma). Chroma is basically automatic and it renders very quickly[^fast] during the `hugo` build process, and it's a pretty solid "works everywhere out of the box" option.
|
||||||
|
|
||||||
|
That said, the one-size-fits-all approach may not actually fit everyone *well*, and Chroma does leave me wanting a bit more. Chroma sometimes struggles with tokenizing and highlighting certain languages, leaving me with boring monochromatic text blocks. Hugo's implementation supports highlighting individual lines by inserting directives next to the code fence backticks (like `{hl_lines="11-13"}` to highlight lines 11-13), but that can be clumsy if you're not sure which lines need to be highlighted[^eleven], are needing to highlight multiple disjointed lines, or later insert additional lines which throw off the count. And sometimes I'd like to share a full file for context while also collapsing it down to just the bits I'm going to write about. That's not something that can be done with the built-in highlighter (at least not without tacking on a bunch of extra JavaScript and CSS nonsense[^nonsense]).
|
||||||
|
|
||||||
|
[^fast]: Did I mention that it's fast?
|
||||||
|
[^eleven]: (or how to count to eleven)
|
||||||
|
[^nonsense]: Spoiler: I'm going to tack on some JS and CSS nonsense later - we'll get to that.
|
||||||
|
|
||||||
|
But then I found a post from Sebastian de Deyne about [Better code highlighting in Hugo with Torchlight](https://sebastiandedeyne.com/better-code-highlighting-in-hugo-with-torchlight). and I thought that [Torchlight](https://torchlight.dev) sounded pretty promising.
|
||||||
|
|
||||||
|
From Torchlight's [docs](https://torchlight.dev/docs),
|
||||||
|
|
||||||
|
> *Torchlight is a VS Code-compatible syntax highlighter that requires no JavaScript, supports every language, every VS Code theme, line highlighting, git diffing, and more.*
|
||||||
|
>
|
||||||
|
> *Unlike traditional syntax highlighting tools, Torchlight is an HTTP API that tokenizes and highlights your code on our backend server instead of in the visitor's browser.*
|
||||||
|
>
|
||||||
|
> *We find this to be the easiest and most powerful way to achieve accurate and feature rich syntax highlighting.*
|
||||||
|
>
|
||||||
|
> *Client-side language parsers are limited in their complexity since they have to run in the browser environment. There are a lot of edge cases that those libraries can't catch.*
|
||||||
|
>
|
||||||
|
> *Torchlight relies on the VS Code parsing engine and TextMate language grammars to achieve the most accurate results possible. We bring the power of the entire VS Code ecosystem to your docs or blog.*
|
||||||
|
|
||||||
|
In short: Code blocks in, formatted HTML out, and no JavaScript or extra code to render this slick display in the browser:
|
||||||
|
```toml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# netlify.toml
|
||||||
|
[build]
|
||||||
|
publish = "public"
|
||||||
|
|
||||||
|
[build.environment]
|
||||||
|
HUGO_VERSION = "0.111.3" # [tl! --]
|
||||||
|
HUGO_VERSION = "0.116.1" # [tl! ++ reindex(-1)]
|
||||||
|
|
||||||
|
[context.production] # [tl! focus:5 highlight:3,1]
|
||||||
|
command = """
|
||||||
|
hugo --minify
|
||||||
|
npm i @torchlight-api/torchlight-cli
|
||||||
|
npx torchlight
|
||||||
|
"""
|
||||||
|
|
||||||
|
[context.preview] # [tl! collapse:start]
|
||||||
|
command = """
|
||||||
|
hugo --minify --environment preview
|
||||||
|
npm i @torchlight-api/torchlight-cli
|
||||||
|
npx torchlight
|
||||||
|
"""
|
||||||
|
[[headers]]
|
||||||
|
for = "/*"
|
||||||
|
[headers.values]
|
||||||
|
X-Robots-Tag = "noindex"
|
||||||
|
|
||||||
|
[[redirects]]
|
||||||
|
from = "/*"
|
||||||
|
to = "/404/"
|
||||||
|
status = 404 # [tl! collapse:end]
|
||||||
|
```
|
||||||
|
|
||||||
|
Pretty nice, right? That block's got:
|
||||||
|
- Colorful, accurate syntax highlighting
|
||||||
|
- Traditional line highlighting
|
||||||
|
- A shnazzy blur/focus to really make the important lines pop
|
||||||
|
- In-line diffs to show what's changed
|
||||||
|
- An expandable section to reveal additional context on-demand
|
||||||
|
|
||||||
|
And marking-up that code block was pretty easy and intuitive. Torchlight is controlled by [annotations](https://torchlight.dev/docs/annotations) inserted as comments appropriate for whatever language you're using (like `# [tl! highlight]` to highlight a single line). In most cases you can just put the annotation right at the end of the line you're trying to flag. You can also [specify ranges](https://torchlight.dev/docs/annotations/ranges) relative to the current line (`[tl! focus:5]` to apply the focus effect to the current line and the next five) or use `:start` and `:end` so you don't have to count at all.
|
||||||
|
```toml
|
||||||
|
# torchlight! {"torchlightAnnotations": false}
|
||||||
|
# netlify.toml
|
||||||
|
[build]
|
||||||
|
publish = "public"
|
||||||
|
|
||||||
|
[build.environment]
|
||||||
|
# diff: remove this line
|
||||||
|
HUGO_VERSION = "0.111.3" # [tl! --]
|
||||||
|
# diff: add this line, adjust line numbering to compensate
|
||||||
|
HUGO_VERSION = "0.116.1" # [tl! ++ reindex(-1)]
|
||||||
|
|
||||||
|
# focus this line and the following 5, highlight the third line down
|
||||||
|
[context.production] # [tl! focus:5 highlight:3,1]
|
||||||
|
command = """
|
||||||
|
hugo --minify
|
||||||
|
npm i @torchlight-api/torchlight-cli
|
||||||
|
npx torchlight
|
||||||
|
"""
|
||||||
|
|
||||||
|
# collapse everything from `:start` to `:end`
|
||||||
|
[context.preview] # [tl! collapse:start]
|
||||||
|
command = """
|
||||||
|
hugo --minify --environment preview
|
||||||
|
npm i @torchlight-api/torchlight-cli
|
||||||
|
npx torchlight
|
||||||
|
"""
|
||||||
|
[[headers]]
|
||||||
|
for = "/*"
|
||||||
|
[headers.values]
|
||||||
|
X-Robots-Tag = "noindex"
|
||||||
|
|
||||||
|
[[redirects]]
|
||||||
|
from = "/*"
|
||||||
|
to = "/404/"
|
||||||
|
status = 404 # [tl! collapse:end]
|
||||||
|
```
|
||||||
|
|
||||||
|
See what I mean? Being able to put the annotations directly on the line(s) they modify is a lot easier to manage than trying to keep track of multiple line numbers in the header. And I think the effect is pretty cool.
|
||||||
|
|
||||||
|
### Basic setup
|
||||||
|
So what did it take to get this working on my blog?
|
||||||
|
|
||||||
|
I started with registering for a free[^free] account at [torchlight.dev](https://app.torchlight.dev/register?plan=free_month) and generating an API token. I'll need to include that later with calls to the Torchlight API. The token will be stashed as an environment variable in my Netlify configuration, but I'll also stick it in a local `.env` file for use with local builds:
|
||||||
|
```shell
|
||||||
|
echo "TORCHLIGHT_TOKEN=torch_[...]" > ./.env # [tl! .cmd]
|
||||||
|
```
|
||||||
|
|
||||||
|
[^free]: Torchlight is free for sites which don't generate revenue, though it does require a link back to `torchlight.dev`. I stuck the attribution link in the footer. More pricing info [here](https://torchlight.dev/#pricing).
|
||||||
|
|
||||||
|
#### Installation
|
||||||
|
I then used `npm` to install Torchlight in the root of my Hugo repo:
|
||||||
|
```shell
|
||||||
|
npm i @torchlight-api/torchlight-cli # [tl! .cmd]
|
||||||
|
# [tl! .nocopy:1]
|
||||||
|
added 94 packages in 5s
|
||||||
|
```
|
||||||
|
|
||||||
|
That created a few new files and directories that I don't want to sync with the repo, so I added those to my `.gitignore` configuration. I'll also be sure to add that `.env` file so that I don't commit any secrets!
|
||||||
|
```
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# .gitignore
|
||||||
|
.hugo_build.lock
|
||||||
|
/node_modules/ [tl! ++:2]
|
||||||
|
/package-lock.json
|
||||||
|
/package.json
|
||||||
|
/public/
|
||||||
|
/resources/
|
||||||
|
/.env [tl! ++]
|
||||||
|
```
|
||||||
|
|
||||||
|
The [installation instructions](https://torchlight.dev/docs/clients/cli#init-command) say to then initialize Torchlight like so:
|
||||||
|
```shell
|
||||||
|
npx torchlight init # [tl! .cmd focus]
|
||||||
|
# [tl! .nocopy:start]
|
||||||
|
node:internal/fs/utils:350
|
||||||
|
throw err;
|
||||||
|
^
|
||||||
|
|
||||||
|
Error: ENOENT: no such file or directory, open '/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/stubs/config.js' # [tl! focus]
|
||||||
|
at Object.openSync (node:fs:603:3)
|
||||||
|
at Object.readFileSync (node:fs:471:35)
|
||||||
|
at write (/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/bin/torchlight.cjs.js:524:39)
|
||||||
|
at init (/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/bin/torchlight.cjs.js:538:12)
|
||||||
|
at Command.<anonymous> (/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/bin/torchlight.cjs.js:722:12)
|
||||||
|
at Command.listener [as _actionHandler] (/home/john/projects/runtimeterror/node_modules/commander/lib/command.js:488:17)
|
||||||
|
at /home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1227:65
|
||||||
|
at Command._chainOrCall (/home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1144:12)
|
||||||
|
at Command._parseCommand (/home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1227:27)
|
||||||
|
at Command._dispatchSubcommand (/home/john/projects/runtimeterror/node_modules/commander/lib/command.js:1050:25) {
|
||||||
|
errno: -2,
|
||||||
|
syscall: 'open',
|
||||||
|
code: 'ENOENT',
|
||||||
|
path: '/home/john/projects/runtimeterror/node_modules/@torchlight-api/torchlight-cli/dist/stubs/config.js'
|
||||||
|
}
|
||||||
|
|
||||||
|
Node.js v18.17.1
|
||||||
|
# [tl! .nocopy:end]
|
||||||
|
```
|
||||||
|
|
||||||
|
Oh. Hmm.
|
||||||
|
|
||||||
|
There's an [open issue](https://github.com/torchlight-api/torchlight-cli/issues/4) which reveals that the stub config file is actually located under the `src/` directory instead of `dist/`. And it turns out the `init` step isn't strictly necessary, it's just a helper to get you a working config to start.
|
||||||
|
|
||||||
|
#### Configuration
|
||||||
|
Now that I know where the stub config lives, I can simply copy it to my repo root. I'll then get to work modifying it to suit my needs:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cp node_modules/@torchlight-api/torchlight-cli/src/stubs/config.js ./torchlight.config.js # [tl! .cmd]
|
||||||
|
```
|
||||||
|
|
||||||
|
```js
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
|
// torchlight.config.js
|
||||||
|
module.exports = {
|
||||||
|
// Your token from https://torchlight.dev
|
||||||
|
token: process.env.TORCHLIGHT_TOKEN, // this will come from a netlify build var [tl! highlight focus]
|
||||||
|
|
||||||
|
// The Torchlight client caches highlighted code blocks. Here you
|
||||||
|
// can define which directory you'd like to use. You'll likely
|
||||||
|
// want to add this directory to your .gitignore. Set to
|
||||||
|
// `false` to use an in-memory cache. You may also
|
||||||
|
// provide a full cache implementation.
|
||||||
|
cache: 'cache', // [tl! -- focus:1]
|
||||||
|
cache: false, // disable cache for netlify builds [tl! ++ reindex(-1)]
|
||||||
|
|
||||||
|
// Which theme you want to use. You can find all of the themes at
|
||||||
|
// https://torchlight.dev/docs/themes.
|
||||||
|
theme: 'material-theme-palenight', // [tl! -- focus:1]
|
||||||
|
theme: 'one-dark-pro', // switch up the theme [tl! ++ reindex(-1)]
|
||||||
|
|
||||||
|
// The Host of the API.
|
||||||
|
host: 'https://api.torchlight.dev',
|
||||||
|
|
||||||
|
// Global options to control block-level settings.
|
||||||
|
// https://torchlight.dev/docs/options
|
||||||
|
options: {
|
||||||
|
// Turn line numbers on or off globally.
|
||||||
|
lineNumbers: false,
|
||||||
|
|
||||||
|
// Control the `style` attribute applied to line numbers.
|
||||||
|
// lineNumbersStyle: '',
|
||||||
|
|
||||||
|
// Turn on +/- diff indicators.
|
||||||
|
diffIndicators: true,
|
||||||
|
|
||||||
|
// If there are any diff indicators for a line, put them
|
||||||
|
// in place of the line number to save horizontal space.
|
||||||
|
diffIndicatorsInPlaceOfLineNumbers: true // [tl! --]
|
||||||
|
diffIndicatorsInPlaceOfLineNumbers: true, // [tl! ++ reindex(-1)]
|
||||||
|
|
||||||
|
// When lines are collapsed, this is the text that will
|
||||||
|
// be shown to indicate that they can be expanded.
|
||||||
|
// summaryCollapsedIndicator: '...', [tl! --]
|
||||||
|
summaryCollapsedIndicator: 'Click to expand...', // make the collapse a little more explicit [tl! ++ reindex(-1)]
|
||||||
|
},
|
||||||
|
|
||||||
|
// Options for the highlight command.
|
||||||
|
highlight: {
|
||||||
|
// Directory where your un-highlighted source files live. If
|
||||||
|
// left blank, Torchlight will use the current directory.
|
||||||
|
input: '', // [tl! -- focus:1]
|
||||||
|
input: 'public', // tells Torchlight where to find Hugo's processed HTML output [tl! ++ reindex(-1)]
|
||||||
|
|
||||||
|
// Directory where your highlighted files should be placed. If
|
||||||
|
// left blank, files will be modified in place.
|
||||||
|
output: '',
|
||||||
|
|
||||||
|
// Globs to include when looking for files to highlight.
|
||||||
|
includeGlobs: [
|
||||||
|
'**/*.htm',
|
||||||
|
'**/*.html'
|
||||||
|
],
|
||||||
|
|
||||||
|
// String patterns to ignore (not globs). The entire file
|
||||||
|
// path will be searched and if any of these strings
|
||||||
|
// appear, the file will be ignored.
|
||||||
|
excludePatterns: [
|
||||||
|
'/node_modules/',
|
||||||
|
'/vendor/'
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You can find more details about the configuration options [here](https://torchlight.dev/docs/clients/cli#configuration-file).
|
||||||
|
|
||||||
|
#### Stylization
|
||||||
|
It's not strictly necessary for the basic functionality, but applying a little bit of extra CSS to match up with the classes leveraged by Torchlight can help to make things look a bit more polished. Fortunately for this _fake-it-til-you-make-it_ dev, Torchlight provides sample CSS that work great for this:
|
||||||
|
|
||||||
|
- [Basic CSS](https://torchlight.dev/docs/css) for generally making things look tidy
|
||||||
|
- [Focus CSS](https://torchlight.dev/docs/annotations/focusing#css) for that slick blur/focus effect
|
||||||
|
- [Collapse CSS](https://torchlight.dev/docs/annotations/collapsing#required-css) for some accordion action
|
||||||
|
|
||||||
|
Put those blocks together (along with a few minor tweaks), and here's what I started with in `assets/css/torchlight.css`:
|
||||||
|
```css
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Basic styling for Torchlight code blocks. *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Margin and rounding are personal preferences,
|
||||||
|
overflow-x-auto is recommended.
|
||||||
|
*/
|
||||||
|
pre {
|
||||||
|
border-radius: 0.25rem;
|
||||||
|
margin-top: 1rem;
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
overflow-x: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Add some vertical padding and expand the width
|
||||||
|
to fill its container. The horizontal padding
|
||||||
|
comes at the line level so that background
|
||||||
|
colors extend edge to edge.
|
||||||
|
*/
|
||||||
|
pre.torchlight {
|
||||||
|
display: block;
|
||||||
|
min-width: -webkit-max-content;
|
||||||
|
min-width: -moz-max-content;
|
||||||
|
min-width: max-content;
|
||||||
|
padding-top: 1rem;
|
||||||
|
padding-bottom: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Horizontal line padding to match the vertical
|
||||||
|
padding from the code block above.
|
||||||
|
*/
|
||||||
|
pre.torchlight .line {
|
||||||
|
padding-left: 1rem;
|
||||||
|
padding-right: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Push the code away from the line numbers and
|
||||||
|
summary caret indicators.
|
||||||
|
*/
|
||||||
|
pre.torchlight .line-number,
|
||||||
|
pre.torchlight .summary-caret {
|
||||||
|
margin-right: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Focus styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Blur and dim the lines that don't have the `.line-focus` class,
|
||||||
|
but are within a code block that contains any focus lines.
|
||||||
|
*/
|
||||||
|
.torchlight.has-focus-lines .line:not(.line-focus) {
|
||||||
|
transition: filter 0.35s, opacity 0.35s;
|
||||||
|
filter: blur(.095rem);
|
||||||
|
opacity: .65;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
When the code block is hovered, bring all the lines into focus.
|
||||||
|
*/
|
||||||
|
.torchlight.has-focus-lines:hover .line:not(.line-focus) {
|
||||||
|
filter: blur(0px);
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Collapse styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
.torchlight summary:focus {
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Hide the default markers, as we provide our own */
|
||||||
|
.torchlight details > summary::marker,
|
||||||
|
.torchlight details > summary::-webkit-details-marker {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.torchlight details .summary-caret::after {
|
||||||
|
pointer-events: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add spaces to keep everything aligned */
|
||||||
|
.torchlight .summary-caret-empty::after,
|
||||||
|
.torchlight details .summary-caret-middle::after,
|
||||||
|
.torchlight details .summary-caret-end::after {
|
||||||
|
content: " ";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Show a minus sign when the block is open. */
|
||||||
|
.torchlight details[open] .summary-caret-start::after {
|
||||||
|
content: "-";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* And a plus sign when the block is closed. */
|
||||||
|
.torchlight details:not([open]) .summary-caret-start::after {
|
||||||
|
content: "+";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Hide the [...] indicator when open. */
|
||||||
|
.torchlight details[open] .summary-hide-when-open {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Show the [...] indicator when closed. */
|
||||||
|
.torchlight details:not([open]) .summary-hide-when-open {
|
||||||
|
display: initial;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Additional styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/* Fix for disjointed horizontal scrollbars */
|
||||||
|
.highlight div {
|
||||||
|
overflow-x: visible;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
I'll make sure that this CSS gets dynamically attached to any pages with a code block by adding this to the bottom of my `layouts/partials/head.html`:
|
||||||
|
```html
|
||||||
|
<!-- syntax highlighting -->
|
||||||
|
{{ if (findRE "<pre" .Content 1) }}
|
||||||
|
{{ $syntax := resources.Get "css/torchlight.css" | minify }}
|
||||||
|
<link href="{{ $syntax.RelPermalink }}" rel="stylesheet">
|
||||||
|
{{ end }}
|
||||||
|
```
|
||||||
|
|
||||||
|
As a bit of housekeeping, I'm also going to remove the built-in highlighter configuration from my `config/_default/markup.toml` file to make sure it doesn't conflict with Torchlight:
|
||||||
|
```toml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# config/_default/markup.toml
|
||||||
|
[goldmark]
|
||||||
|
[goldmark.renderer]
|
||||||
|
hardWraps = false
|
||||||
|
unsafe = true
|
||||||
|
xhtml = false
|
||||||
|
[goldmark.extensions]
|
||||||
|
typographer = false
|
||||||
|
|
||||||
|
[highlight] # [tl! --:start]
|
||||||
|
anchorLineNos = true
|
||||||
|
codeFences = true
|
||||||
|
guessSyntax = true
|
||||||
|
hl_Lines = ''
|
||||||
|
lineNos = false
|
||||||
|
lineNoStart = 1
|
||||||
|
lineNumbersInTable = false
|
||||||
|
noClasses = false
|
||||||
|
tabwidth = 2
|
||||||
|
style = 'monokai'
|
||||||
|
# [tl! --:end]
|
||||||
|
# Table of contents # [tl! reindex(10)]
|
||||||
|
# Add toc = true to content front matter to enable
|
||||||
|
[tableOfContents]
|
||||||
|
endLevel = 5
|
||||||
|
ordered = false
|
||||||
|
startLevel = 3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Building
|
||||||
|
Now that the pieces are in place, it's time to start building!
|
||||||
|
|
||||||
|
#### Local
|
||||||
|
I like to preview my blog as I work on it so that I know what it will look like before I hit `git push` and let Netlify do its magic. And Hugo has been fantastic for that! But since I'm offloading the syntax highlighting to the Torchlight API, I'll need to manually build the site instead of relying on Hugo's instant preview builds.
|
||||||
|
|
||||||
|
There are a couple of steps I'll use for this:
|
||||||
|
1. First, I'll `source .env` to load the `TORCHLIGHT_TOKEN` for the API.
|
||||||
|
2. Then, I'll use `hugo --minify --environment local -D` to render my site into the `public/` directory.
|
||||||
|
3. Next, I'll call `npx torchlight` to parse the HTML files in `public/`, extract the content of any `<pre>`/`<code>` blocks, send it to the Torchlight API to work the magic, and write the formatted code blocks back to the existing HTML files.
|
||||||
|
4. Finally, I use `python3 -m http.server --directory public 1313` to serve the `public/` directory so I can view the content at `http://localhost:1313`.
|
||||||
|
|
||||||
|
I'm lazy, though, so I'll even put that into a quick `build.sh` script to help me run local builds:
|
||||||
|
```shell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Quick script to run local builds
|
||||||
|
source .env
|
||||||
|
hugo --minify --environment local -D
|
||||||
|
npx torchlight
|
||||||
|
python3 -m http.server --directory public 1313
|
||||||
|
```
|
||||||
|
|
||||||
|
Now I can just make the script executable and fire it off:
|
||||||
|
```shell
|
||||||
|
chmod +x build.sh # [tl! focus:3 .cmd:1]
|
||||||
|
./build.sh
|
||||||
|
Start building sites … # [tl! .nocopy:start]
|
||||||
|
hugo v0.111.3+extended linux/amd64 BuildDate=unknown VendorInfo=nixpkgs
|
||||||
|
|
||||||
|
| EN
|
||||||
|
-------------------+------
|
||||||
|
Pages | 202
|
||||||
|
Paginator pages | 0
|
||||||
|
Non-page files | 553
|
||||||
|
Static files | 49
|
||||||
|
Processed images | 0
|
||||||
|
Aliases | 5
|
||||||
|
Sitemaps | 1
|
||||||
|
Cleaned | 0
|
||||||
|
|
||||||
|
Total in 248 ms
|
||||||
|
Highlighting index.html
|
||||||
|
Highlighting 3d-modeling-and-printing-on-chrome-os/index.html
|
||||||
|
Highlighting 404/index.html
|
||||||
|
Highlighting about/index.html # [tl! collapse:start]
|
||||||
|
|
||||||
|
+ + + O
|
||||||
|
o '
|
||||||
|
________________ _
|
||||||
|
\__(=======/_=_/____.--'-`--.___
|
||||||
|
\ \ `,--,-.___.----'
|
||||||
|
.--`\\--'../ |
|
||||||
|
'---._____.|] -0- |o
|
||||||
|
* | -0- -O-
|
||||||
|
' o 0 | '
|
||||||
|
. -0- . '
|
||||||
|
|
||||||
|
Did you really want to see the full file list?
|
||||||
|
|
||||||
|
Highlighting tags/vsphere/index.html # [tl! collapse:end]
|
||||||
|
Highlighting tags/windows/index.html
|
||||||
|
Highlighting tags/wireguard/index.html
|
||||||
|
Highlighting tags/wsl/index.html # [tl! focus:1]
|
||||||
|
Writing to /home/john/projects/runtimeterror/public/abusing-chromes-custom-search-engines-for-fun-and-profit/index.html
|
||||||
|
Writing to /home/john/projects/runtimeterror/public/auto-connect-to-protonvpn-on-untrusted-wifi-with-tasker/index.html
|
||||||
|
Writing to /home/john/projects/runtimeterror/public/cat-file-without-comments/index.html # [tl! collapse:start]
|
||||||
|
|
||||||
|
' * + -O- |
|
||||||
|
o o .
|
||||||
|
___________ 0 o .
|
||||||
|
+/-/_"/-/_/-/| -0- o -O- * *
|
||||||
|
/"-/-_"/-_//|| . -O-
|
||||||
|
/__________/|/| + | *
|
||||||
|
|"|_'='-]:+|/|| . o -0- . *
|
||||||
|
|-+-|.|_'-"||// + | | ' ' 0
|
||||||
|
|[".[:!+-'=|// | -0- 0 -O-
|
||||||
|
|='!+|-:]|-|/ -0- o |-0- 0 -O-
|
||||||
|
---------- * | -O| + o
|
||||||
|
o -O- -0- -0- -O-
|
||||||
|
| + | -O- |
|
||||||
|
-0- -0- . O
|
||||||
|
-O- | -O- *
|
||||||
|
your code will be assimilated
|
||||||
|
|
||||||
|
Writing to /home/john/projects/runtimeterror/public/k8s-on-vsphere-node-template-with-packer/index.html # [tl! collapse:end]
|
||||||
|
Writing to /home/john/projects/runtimeterror/public/tanzu-community-edition-k8s-homelab/index.html
|
||||||
|
Serving HTTP on 0.0.0.0 port 1313 (http://0.0.0.0:1313/) ... # [tl! focus:1]
|
||||||
|
127.0.0.1 - - [07/Nov/2023 20:34:29] "GET /spotlight-on-torchlight/ HTTP/1.1" 200 -
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Netlify
|
||||||
|
Setting up Netlify to leverage the Torchlight API is kind of similar. I'll start with logging in to the [Netlify dashboard](https://app.netlify.com) and navigating to **Site Configuration > Environment Variables**. There, I'll click on **Add a variable > Add a ingle variable**. I'll give the new variable a key of `TORCHLIGHT_TOKEN` and set its value to the token I obtained earlier.
|
||||||
|
|
||||||
|
![](netlify-env-var.png)
|
||||||
|
|
||||||
|
Once that's done, I edit the `netlify.toml` file at the root of my site repo to alter the build commands:
|
||||||
|
```toml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
|
[build]
|
||||||
|
publish = "public"
|
||||||
|
|
||||||
|
[build.environment]
|
||||||
|
HUGO_VERSION = "0.111.3"
|
||||||
|
|
||||||
|
[context.production] # [tl! focus:6]
|
||||||
|
command = "hugo" # [tl! -- ++:1,5 reindex(-1):1,1]
|
||||||
|
command = """
|
||||||
|
hugo --minify
|
||||||
|
npm i @torchlight-api/torchlight-cli
|
||||||
|
npx torchlight
|
||||||
|
"""
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Now when I `git push` new content, Netlify will use Hugo to build the site, then install and call Torchlight to `++fancy;` the code blocks before the site gets served. Very nice!
|
||||||
|
|
||||||
|
### #Goals
|
||||||
|
Of course, I. Just. Can't. leave well enough alone, so my work here isn't finished - not by a long shot.
|
||||||
|
|
||||||
|
You see, I'm a sucker for handy "copy" buttons attached to code blocks, and that's not something that Torchlight does (it just returns rendered HTML, remember? No fancy JavaScript here). I also wanted to add informative prompt indicators (like `$` and `#`) to code blocks representing command-line inputs (rather than script files). And I'd like to flag text returned by a command so that *only* the commands get copied, effectively ignoring the returned text, diff-removed lines, diff markers, line numbers, and prompt indicators.
|
||||||
|
|
||||||
|
I had previously implemented a solution based *heavily* on Justin James' blog post, [Hugo - Dynamically Add Copy Code Snippet Button](https://digitaldrummerj.me/hugo-add-copy-code-snippet-button/). Getting that Chroma-focused solution to work well with Torchlight-formatted code blocks took some work, particularly since I'm inept at web development and can barely spell "CSS" and "JavaScrapped".
|
||||||
|
|
||||||
|
But I[^copilot] eventually fumbled through the changes required to meet my #goals, and I'm pretty happy with how it all works.
|
||||||
|
|
||||||
|
[^copilot]: With a little help from my Copilot buddy...
|
||||||
|
|
||||||
|
#### Custom classes
|
||||||
|
Remember Torchlight's in-line annotations that I mentioned earlier? They're pretty capable out of the box, but can also be expanded through the use of [custom classes](https://torchlight.dev/docs/annotations/classes). This makes it easy to selectively apply special handling to selected lines of code, something that's otherwise pretty dang tricky to do with Chroma.
|
||||||
|
|
||||||
|
So, for instance, I could add a class `.cmd` for standard user-level command-line inputs:
|
||||||
|
```shell
|
||||||
|
# torchlight! {"torchlightAnnotations":false}
|
||||||
|
sudo make me a sandwich # [tl! .cmd]
|
||||||
|
```
|
||||||
|
```shell
|
||||||
|
sudo make me a sandwich # [tl! .cmd]
|
||||||
|
```
|
||||||
|
|
||||||
|
Or `.cmd_root` for a root prompt:
|
||||||
|
```shell
|
||||||
|
# torchlight! {"torchlightAnnotations": false}
|
||||||
|
wall "Make your own damn sandwich." # [tl! .cmd_root]
|
||||||
|
```
|
||||||
|
```shell
|
||||||
|
wall "Make your own damn sandwich." # [tl! .cmd_root]
|
||||||
|
```
|
||||||
|
|
||||||
|
And for deviants:
|
||||||
|
```powershell
|
||||||
|
# torchlight! {"torchlightAnnotations": false}
|
||||||
|
Write-Host -ForegroundColor Green "A taco is a sandwich" # [tl! .cmd_pwsh]
|
||||||
|
```
|
||||||
|
```powershell
|
||||||
|
Write-Host -ForegroundColor Green "A taco is a sandwich" # [tl! .cmd_pwsh]
|
||||||
|
```
|
||||||
|
|
||||||
|
I also came up with a cleverly-named `.nocopy` class for the returned lines that shouldn't be copyable:
|
||||||
|
```shell
|
||||||
|
# torchlight! {"torchlightAnnotations": false}
|
||||||
|
copy this # [tl! .cmd]
|
||||||
|
but not this # [tl! .nocopy]
|
||||||
|
```
|
||||||
|
```shell
|
||||||
|
copy this # [tl! .cmd]
|
||||||
|
but not this # [tl! .nocopy]
|
||||||
|
```
|
||||||
|
|
||||||
|
So that's how I'll tie my custom classes to individual lines of code[^ranges], but I still need to actually define those classes.
|
||||||
|
|
||||||
|
I'll drop those at the bottom of the `assets/css/torchlight.css` file I created earlier:
|
||||||
|
|
||||||
|
```css
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
|
/* [tl! collapse:start]
|
||||||
|
/*********************************************
|
||||||
|
* Basic styling for Torchlight code blocks. *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Margin and rounding are personal preferences,
|
||||||
|
overflow-x-auto is recommended.
|
||||||
|
*/
|
||||||
|
pre {
|
||||||
|
border-radius: 0.25rem;
|
||||||
|
margin-top: 1rem;
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
overflow-x: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Add some vertical padding and expand the width
|
||||||
|
to fill its container. The horizontal padding
|
||||||
|
comes at the line level so that background
|
||||||
|
colors extend edge to edge.
|
||||||
|
*/
|
||||||
|
pre.torchlight {
|
||||||
|
display: block;
|
||||||
|
min-width: -webkit-max-content;
|
||||||
|
min-width: -moz-max-content;
|
||||||
|
min-width: max-content;
|
||||||
|
padding-top: 1rem;
|
||||||
|
padding-bottom: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Horizontal line padding to match the vertical
|
||||||
|
padding from the code block above.
|
||||||
|
*/
|
||||||
|
pre.torchlight .line {
|
||||||
|
padding-left: 1rem;
|
||||||
|
padding-right: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Push the code away from the line numbers and
|
||||||
|
summary caret indicators.
|
||||||
|
*/
|
||||||
|
pre.torchlight .line-number,
|
||||||
|
pre.torchlight .summary-caret {
|
||||||
|
margin-right: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Focus styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Blur and dim the lines that don't have the `.line-focus` class,
|
||||||
|
but are within a code block that contains any focus lines.
|
||||||
|
*/
|
||||||
|
.torchlight.has-focus-lines .line:not(.line-focus) {
|
||||||
|
transition: filter 0.35s, opacity 0.35s;
|
||||||
|
filter: blur(.095rem);
|
||||||
|
opacity: .65;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
When the code block is hovered, bring all the lines into focus.
|
||||||
|
*/
|
||||||
|
.torchlight.has-focus-lines:hover .line:not(.line-focus) {
|
||||||
|
filter: blur(0px);
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Collapse styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
.torchlight summary:focus {
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Hide the default markers, as we provide our own */
|
||||||
|
.torchlight details > summary::marker,
|
||||||
|
.torchlight details > summary::-webkit-details-marker {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.torchlight details .summary-caret::after {
|
||||||
|
pointer-events: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add spaces to keep everything aligned */
|
||||||
|
.torchlight .summary-caret-empty::after,
|
||||||
|
.torchlight details .summary-caret-middle::after,
|
||||||
|
.torchlight details .summary-caret-end::after {
|
||||||
|
content: " ";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Show a minus sign when the block is open. */
|
||||||
|
.torchlight details[open] .summary-caret-start::after {
|
||||||
|
content: "-";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* And a plus sign when the block is closed. */
|
||||||
|
.torchlight details:not([open]) .summary-caret-start::after {
|
||||||
|
content: "+";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Hide the [...] indicator when open. */
|
||||||
|
.torchlight details[open] .summary-hide-when-open {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Show the [...] indicator when closed. */
|
||||||
|
.torchlight details:not([open]) .summary-hide-when-open {
|
||||||
|
display: initial;
|
||||||
|
} /* [tl! collapse:end]
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* Additional styling *
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
/* Fix for disjointed horizontal scrollbars */
|
||||||
|
.highlight div {
|
||||||
|
overflow-x: visible;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* [tl! focus:start]
|
||||||
|
Insert prompt indicators on interactive shells.
|
||||||
|
*/
|
||||||
|
.cmd::before {
|
||||||
|
color: var(--base07);
|
||||||
|
content: "$ ";
|
||||||
|
}
|
||||||
|
|
||||||
|
.cmd_root::before {
|
||||||
|
color: var(--base08);
|
||||||
|
content: "# ";
|
||||||
|
}
|
||||||
|
|
||||||
|
.cmd_pwsh::before {
|
||||||
|
color: var(--base07);
|
||||||
|
content: "PS> ";
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Don't copy shell outputs
|
||||||
|
*/
|
||||||
|
.nocopy {
|
||||||
|
webkit-user-select: none;
|
||||||
|
user-select: none;
|
||||||
|
} /* [tl! focus:end]
|
||||||
|
```
|
||||||
|
|
||||||
|
[^ranges]: Or ranges of lines, using the same syntax as before: `[tl! .nocopy:5]` will make this line and the following five uncopyable.
|
||||||
|
|
||||||
|
The `.cmd` classes will simply insert the respective prompt _before_ each flagged line, and the `.nocopy` class will prevent those lines from being selected (and copied). Now for the tricky part...
|
||||||
|
|
||||||
|
#### Copy that blocky
|
||||||
|
There are two major pieces for the code-copy wizardry: the CSS to style/arrange the copy button and language label, and the JavaScript to make it work.
|
||||||
|
|
||||||
|
I put the CSS in `assets/css/code-copy-button.css`:
|
||||||
|
|
||||||
|
```css
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
|
/* adapted from https://digitaldrummerj.me/hugo-add-copy-code-snippet-button/ */
|
||||||
|
|
||||||
|
.highlight {
|
||||||
|
position: relative;
|
||||||
|
z-index: 0;
|
||||||
|
padding: 0;
|
||||||
|
margin:40px 0 10px 0;
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.copy-code-button {
|
||||||
|
position: absolute;
|
||||||
|
z-index: -1;
|
||||||
|
right: 0px;
|
||||||
|
top: -26px;
|
||||||
|
font-size: 13px;
|
||||||
|
font-weight: 700;
|
||||||
|
line-height: 14px;
|
||||||
|
letter-spacing: 0.5px;
|
||||||
|
width: 65px;
|
||||||
|
color: var(--fg);
|
||||||
|
background-color: var(--bg);
|
||||||
|
border: 1.25px solid var(--off-bg);
|
||||||
|
border-top-left-radius: 4px;
|
||||||
|
border-top-right-radius: 4px;
|
||||||
|
border-bottom-right-radius: 0px;
|
||||||
|
border-bottom-left-radius: 0px;
|
||||||
|
white-space: nowrap;
|
||||||
|
padding: 6px 6px 7px 6px;
|
||||||
|
margin: 0 0 0 1px;
|
||||||
|
cursor: pointer;
|
||||||
|
opacity: 0.6;
|
||||||
|
}
|
||||||
|
|
||||||
|
.copy-code-button:hover,
|
||||||
|
.copy-code-button:focus,
|
||||||
|
.copy-code-button:active,
|
||||||
|
.copy-code-button:active:hover {
|
||||||
|
color: var(--off-bg);
|
||||||
|
background-color: var(--off-fg);
|
||||||
|
opacity: 0.8;
|
||||||
|
}
|
||||||
|
|
||||||
|
.copyable-text-area {
|
||||||
|
position: absolute;
|
||||||
|
height: 0;
|
||||||
|
z-index: -1;
|
||||||
|
opacity: .01;
|
||||||
|
}
|
||||||
|
|
||||||
|
.torchlight [data-lang]:before {
|
||||||
|
position: absolute;
|
||||||
|
z-index: -1;
|
||||||
|
top: -26px;
|
||||||
|
left: 0px;
|
||||||
|
content: attr(data-lang);
|
||||||
|
font-size: 13px;
|
||||||
|
font-weight: 700;
|
||||||
|
color: var(--fg);
|
||||||
|
background-color: var(--bg);
|
||||||
|
border-top-left-radius: 4px;
|
||||||
|
border-top-right-radius: 4px;
|
||||||
|
border-bottom-left-radius: 0;
|
||||||
|
border-bottom-right-radius: 0;
|
||||||
|
padding: 6px 6px 7px 6px;
|
||||||
|
line-height: 14px;
|
||||||
|
opacity: 0.6;
|
||||||
|
position: absolute;
|
||||||
|
letter-spacing: 0.5px;
|
||||||
|
border: 1.25px solid var(--off-bg);
|
||||||
|
margin: 0 0 0 1px;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
And, as before, I'll link this from the bottom of my `layouts/partial/head.html` so it will get loaded on the appropriate pages:
|
||||||
|
|
||||||
|
```html
|
||||||
|
<!-- syntax highlighting -->
|
||||||
|
{{ if (findRE "<pre" .Content 1) }}
|
||||||
|
{{ $syntax := resources.Get "css/torchlight.css" | minify }}
|
||||||
|
<link href="{{ $syntax.RelPermalink }}" rel="stylesheet">
|
||||||
|
{{ $copyCss := resources.Get "css/code-copy-button.css" | minify }} <!-- [tl! ++:1 ] -->
|
||||||
|
<link href="{{ $copyCss.RelPermalink }}" rel="stylesheet">
|
||||||
|
{{ end }}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Code behind the copy
|
||||||
|
That sure makes the code blocks and accompanying button / labels look pretty great, but I still need to actually make the button work. For that, I'll need some JavaScript that (again) largely comes from Justin's post.
|
||||||
|
|
||||||
|
With all the different classes and things used with Torchlight, it took a lot of (generally misguided) tinkering for me to get the script to copy just the text I wanted (and nothing else). I learned a ton in the process, so I've highlighted the major deviations from Justin's script.
|
||||||
|
|
||||||
|
Anyway, here's my `assets/js/code-copy-button.js`:
|
||||||
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
|
// adapted from https://digitaldrummerj.me/hugo-add-copy-code-snippet-button/
|
||||||
|
|
||||||
|
function createCopyButton(highlightDiv) {
|
||||||
|
const button = document.createElement("button");
|
||||||
|
button.className = "copy-code-button";
|
||||||
|
button.type = "button";
|
||||||
|
button.innerText = "Copy";
|
||||||
|
button.addEventListener("click", () => copyCodeToClipboard(button, highlightDiv));
|
||||||
|
highlightDiv.insertBefore(button, highlightDiv.firstChild);
|
||||||
|
const wrapper = document.createElement("div");
|
||||||
|
wrapper.className = "highlight-wrapper";
|
||||||
|
highlightDiv.parentNode.insertBefore(wrapper, highlightDiv);
|
||||||
|
wrapper.appendChild(highlightDiv);
|
||||||
|
}
|
||||||
|
|
||||||
|
document.querySelectorAll(".highlight").forEach((highlightDiv) => createCopyButton(highlightDiv)); // [tl! focus:start]
|
||||||
|
|
||||||
|
async function copyCodeToClipboard(button, highlightDiv) {
|
||||||
|
// capture all code lines in the selected block which aren't classed `nocopy` or `line-remove`
|
||||||
|
let codeToCopy = highlightDiv.querySelectorAll(":last-child > .torchlight > code > .line:not(.nocopy, .line-remove)");
|
||||||
|
// now remove the first-child of each line if it is of class `line-number`
|
||||||
|
codeToCopy = Array.from(codeToCopy).reduce((accumulator, line) => {
|
||||||
|
if (line.firstChild.className != "line-number") {
|
||||||
|
return accumulator + line.innerText + "\n"; }
|
||||||
|
else {
|
||||||
|
return accumulator + Array.from(line.children).filter(
|
||||||
|
(child) => child.className != "line-number").reduce(
|
||||||
|
(accumulator, child) => accumulator + child.innerText, "") + "\n";
|
||||||
|
}
|
||||||
|
}, ""); // [tl! focus:end]
|
||||||
|
try {
|
||||||
|
var result = await navigator.permissions.query({ name: "clipboard-write" });
|
||||||
|
if (result.state == "granted" || result.state == "prompt") {
|
||||||
|
await navigator.clipboard.writeText(codeToCopy);
|
||||||
|
} else {
|
||||||
|
button.blur();
|
||||||
|
button.innerText = "Error!";
|
||||||
|
setTimeout(function () {
|
||||||
|
button.innerText = "Copy";
|
||||||
|
}, 2000);
|
||||||
|
}
|
||||||
|
} catch (_) {
|
||||||
|
button.blur();
|
||||||
|
button.innerText = "Error!";
|
||||||
|
setTimeout(function () {
|
||||||
|
button.innerText = "Copy";
|
||||||
|
}, 2000);
|
||||||
|
} finally {
|
||||||
|
button.blur();
|
||||||
|
button.innerText = "Copied!";
|
||||||
|
setTimeout(function () {
|
||||||
|
button.innerText = "Copy";
|
||||||
|
}, 2000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
And this script gets called from the bottom of my `layouts/partials/footer.html`:
|
||||||
|
```html
|
||||||
|
{{ if (findRE "<pre" .Content 1) }}
|
||||||
|
{{ $jsCopy := resources.Get "js/code-copy-button.js" | minify }}
|
||||||
|
<script src="{{ $jsCopy.RelPermalink }}"></script>
|
||||||
|
{{ end }}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Going live!
|
||||||
|
|
||||||
|
And at this point, I can just run my `build.sh` script again to rebuild the site locally and verify that it works as well as I think it does.
|
||||||
|
|
||||||
|
It looks pretty good to me, so I'll go ahead and push this up to Netlify. If all goes well, this post and the new code block styling will go live at the same time.
|
||||||
|
|
||||||
|
See you on the other side!
|
BIN
content/posts/spotlight-on-torchlight/netlify-env-var.png
Normal file
BIN
content/posts/spotlight-on-torchlight/netlify-env-var.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 127 KiB |
|
@ -2,7 +2,6 @@
|
||||||
title: "Using `systemctl edit` to Delay Service Startup"
|
title: "Using `systemctl edit` to Delay Service Startup"
|
||||||
date: 2023-10-15
|
date: 2023-10-15
|
||||||
# lastmod: 2023-10-15
|
# lastmod: 2023-10-15
|
||||||
draft: true
|
|
||||||
description: "Quick notes on using `systemctl edit` to override a systemd service to delay its startup."
|
description: "Quick notes on using `systemctl edit` to override a systemd service to delay its startup."
|
||||||
featured: false
|
featured: false
|
||||||
toc: false
|
toc: false
|
||||||
|
@ -17,7 +16,7 @@ Following a recent update, I found that the [Linux development environment](http
|
||||||
|
|
||||||
Fortunately, it turns out that overriding the service to insert a short startup delay is really easy. I'll just use the `systemctl edit` command to create a quick override configuration:
|
Fortunately, it turns out that overriding the service to insert a short startup delay is really easy. I'll just use the `systemctl edit` command to create a quick override configuration:
|
||||||
```shell
|
```shell
|
||||||
sudo systemctl edit tailscaled
|
sudo systemctl edit tailscaled # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
This shows me the existing contents of the `tailscaled.service` definition so I can easily insert some overrides above. In this case, I just want to use `sleep 5` as the `ExecStartPre` command so that the service start will be delayed by 5 seconds:
|
This shows me the existing contents of the `tailscaled.service` definition so I can easily insert some overrides above. In this case, I just want to use `sleep 5` as the `ExecStartPre` command so that the service start will be delayed by 5 seconds:
|
||||||
|
|
|
@ -36,36 +36,36 @@ Sounds great - but how do you actually make golink available on your tailnet? We
|
||||||
There are three things I'll need to do in the Tailscale admin portal before moving on:
|
There are three things I'll need to do in the Tailscale admin portal before moving on:
|
||||||
#### Create an ACL tag
|
#### Create an ACL tag
|
||||||
I assign ACL tags to devices in my tailnet based on their location and/or purpose, and I'm then able to use those in a policy to restrict access between certain devices. To that end, I'm going to create a new `tag:golink` tag for this purpose. Creating a new tag in Tailscale is really just going to the [Access Controls page of the admin console](https://login.tailscale.com/admin/acls) and editing the policy to specify a `tagOwner` who is permitted to assign the tag:
|
I assign ACL tags to devices in my tailnet based on their location and/or purpose, and I'm then able to use those in a policy to restrict access between certain devices. To that end, I'm going to create a new `tag:golink` tag for this purpose. Creating a new tag in Tailscale is really just going to the [Access Controls page of the admin console](https://login.tailscale.com/admin/acls) and editing the policy to specify a `tagOwner` who is permitted to assign the tag:
|
||||||
```text {hl_lines=[11]}
|
```json
|
||||||
"groups":
|
"groups":
|
||||||
"group:admins": ["john@example.com"],
|
"group:admins": ["john@example.com"],
|
||||||
},
|
},
|
||||||
"tagOwners": {
|
"tagOwners": {
|
||||||
"tag:home": ["group:admins"],
|
"tag:home": ["group:admins"],
|
||||||
"tag:cloud": ["group:admins"],
|
"tag:cloud": ["group:admins"],
|
||||||
"tag:client": ["group:admins"],
|
"tag:client": ["group:admins"],
|
||||||
"tag:dns": ["group:admins"],
|
"tag:dns": ["group:admins"],
|
||||||
"tag:rsync": ["group:admins"],
|
"tag:rsync": ["group:admins"],
|
||||||
"tag:funnel": ["group:admins"],
|
"tag:funnel": ["group:admins"],
|
||||||
"tag:golink": ["group:admins"],
|
"tag:golink": ["group:admins"], // [tl! highlight]
|
||||||
},
|
},
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Configure ACL access
|
#### Configure ACL access
|
||||||
This step is really only necessary since I've altered the default Tailscale ACL and prevent my nodes from communicating with each other unless specifically permitted. I want to make sure that everything on my tailnet can access golink:
|
This step is really only necessary since I've altered the default Tailscale ACL and prevent my nodes from communicating with each other unless specifically permitted. I want to make sure that everything on my tailnet can access golink:
|
||||||
|
|
||||||
```text
|
```json
|
||||||
"acls": [
|
"acls": [
|
||||||
{
|
{
|
||||||
// make golink accessible to everything
|
// make golink accessible to everything
|
||||||
"action": "accept",
|
"action": "accept",
|
||||||
"users": ["*"],
|
"users": ["*"],
|
||||||
"ports": [
|
"ports": [
|
||||||
"tag:golink:80",
|
"tag:golink:80",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
...
|
...
|
||||||
],
|
],
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Create an auth key
|
#### Create an auth key
|
||||||
|
@ -81,19 +81,20 @@ After clicking the **Generate key** button, the key will be displayed. This is t
|
||||||
### Docker setup
|
### Docker setup
|
||||||
The [golink repo](https://github.com/tailscale/golink) offers this command for running the container:
|
The [golink repo](https://github.com/tailscale/golink) offers this command for running the container:
|
||||||
```shell
|
```shell
|
||||||
docker run -it --rm ghcr.io/tailscale/golink:main
|
docker run -it --rm ghcr.io/tailscale/golink:main # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
The doc also indicates that I can pass the auth key to the golink service via the `TS_AUTHKEY` environment variable, and that all the configuration will be stored in `/home/nonroot` (which will be owned by uid/gid `65532`). I'll take this knowledge and use it to craft a `docker-compose.yml` to simplify container management.
|
The doc also indicates that I can pass the auth key to the golink service via the `TS_AUTHKEY` environment variable, and that all the configuration will be stored in `/home/nonroot` (which will be owned by uid/gid `65532`). I'll take this knowledge and use it to craft a `docker-compose.yml` to simplify container management.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
mkdir -p golink/data
|
mkdir -p golink/data # [tl! .cmd:3]
|
||||||
cd golink
|
cd golink
|
||||||
chmod 65532:65532 data
|
chmod 65532:65532 data
|
||||||
vi docker-compose.yaml
|
vi docker-compose.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# golink docker-compose.yaml
|
# golink docker-compose.yaml
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
|
@ -138,9 +139,7 @@ Some of my other golinks:
|
||||||
| `ipam` | `https://ipam.lab.bowdre.net/{{with .Path}}tools/search/{{.}}{{end}}` | searches my lab phpIPAM instance |
|
| `ipam` | `https://ipam.lab.bowdre.net/{{with .Path}}tools/search/{{.}}{{end}}` | searches my lab phpIPAM instance |
|
||||||
| `pdb` | `https://www.protondb.com/{{with .Path}}search?q={{.}}{{end}}` | searches [protondb](https://www.protondb.com/), super-handy for checking game compatibility when [Tailscale is installed on a Steam Deck](https://tailscale.com/blog/steam-deck/) |
|
| `pdb` | `https://www.protondb.com/{{with .Path}}search?q={{.}}{{end}}` | searches [protondb](https://www.protondb.com/), super-handy for checking game compatibility when [Tailscale is installed on a Steam Deck](https://tailscale.com/blog/steam-deck/) |
|
||||||
| `tailnet` | `https://login.tailscale.com/admin/machines?q={{.Path}}` | searches my Tailscale admin panel for a machine name |
|
| `tailnet` | `https://login.tailscale.com/admin/machines?q={{.Path}}` | searches my Tailscale admin panel for a machine name |
|
||||||
| `vpot8` | `https://www.virtuallypotato.com/{{with .Path}}search?query={{.}}{{end}}` | searches this here site |
|
|
||||||
| `sho` | `https://www.shodan.io/{{with .Path}}search?query={{.}}{{end}}` | searches Shodan for interesting internet-connected systems |
|
| `sho` | `https://www.shodan.io/{{with .Path}}search?query={{.}}{{end}}` | searches Shodan for interesting internet-connected systems |
|
||||||
| `tools` | `https://neeva.com/spaces/m_Bhx8tPfYQbOmaW1UHz-3a_xg3h2amlogo2GzgD` | shortcut to my [Tech Toolkit space](https://neeva.com/spaces/m_Bhx8tPfYQbOmaW1UHz-3a_xg3h2amlogo2GzgD) on Neeva |
|
|
||||||
| `randpass` | `https://www.random.org/passwords/?num=1\u0026len=24\u0026format=plain\u0026rnd=new` | generates a random 24-character string suitable for use as a password (`curl`-friendly) |
|
| `randpass` | `https://www.random.org/passwords/?num=1\u0026len=24\u0026format=plain\u0026rnd=new` | generates a random 24-character string suitable for use as a password (`curl`-friendly) |
|
||||||
| `wx` | `https://wttr.in/{{ .Path }}` | local weather report based on geolocation or weather for a designated city (`curl`-friendly) |
|
| `wx` | `https://wttr.in/{{ .Path }}` | local weather report based on geolocation or weather for a designated city (`curl`-friendly) |
|
||||||
|
|
||||||
|
@ -149,7 +148,7 @@ You can browse to `go/.export` to see a JSON-formatted listing of all configured
|
||||||
|
|
||||||
To restore, just pass `--snapshot /path/to/links.json` when starting golink. What I usually do is copy the file into the `data` folder that I'm mounting as a Docker volume, and then just run:
|
To restore, just pass `--snapshot /path/to/links.json` when starting golink. What I usually do is copy the file into the `data` folder that I'm mounting as a Docker volume, and then just run:
|
||||||
```shell
|
```shell
|
||||||
sudo docker exec golink /golink --sqlitedb /home/nonroot/golink.db --snapshot /home/nonroot/links.json
|
sudo docker exec golink /golink --sqlitedb /home/nonroot/golink.db --snapshot /home/nonroot/links.json # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Conclusion
|
### Conclusion
|
||||||
|
|
|
@ -31,20 +31,20 @@ Here's a condensed list of the [steps that I took to manually install Tailscale]
|
||||||
1. Visit [https://pkgs.tailscale.com/stable/#static](https://pkgs.tailscale.com/stable/#static) to see the latest stable version for your system architecture, and copy the URL. For instance, I'll be using `https://pkgs.tailscale.com/stable/tailscale_1.34.1_arm64.tgz`.
|
1. Visit [https://pkgs.tailscale.com/stable/#static](https://pkgs.tailscale.com/stable/#static) to see the latest stable version for your system architecture, and copy the URL. For instance, I'll be using `https://pkgs.tailscale.com/stable/tailscale_1.34.1_arm64.tgz`.
|
||||||
2. Download and extract it to the system:
|
2. Download and extract it to the system:
|
||||||
```shell
|
```shell
|
||||||
wget https://pkgs.tailscale.com/stable/tailscale_1.34.1_arm64.tgz
|
wget https://pkgs.tailscale.com/stable/tailscale_1.34.1_arm64.tgz # [tl! .cmd:2]
|
||||||
tar xvf tailscale_1.34.1_arm64.tgz
|
tar xvf tailscale_1.34.1_arm64.tgz
|
||||||
cd tailscale_1.34.1_arm64/
|
cd tailscale_1.34.1_arm64/
|
||||||
```
|
```
|
||||||
3. Install the binaries and service files:
|
3. Install the binaries and service files:
|
||||||
```shell
|
```shell
|
||||||
sudo install -m 755 tailscale /usr/bin/
|
sudo install -m 755 tailscale /usr/bin/ # [tl! .cmd:4]
|
||||||
sudo install -m 755 tailscaled /usr/sbin/
|
sudo install -m 755 tailscaled /usr/sbin/
|
||||||
sudo install -m 644 systemd/tailscaled.defaults /etc/default/tailscaled
|
sudo install -m 644 systemd/tailscaled.defaults /etc/default/tailscaled
|
||||||
sudo install -m 644 systemd/tailscaled.service /usr/lib/systemd/system/
|
sudo install -m 644 systemd/tailscaled.service /usr/lib/systemd/system/
|
||||||
```
|
```
|
||||||
4. Start the service:
|
4. Start the service:
|
||||||
```shell
|
```shell
|
||||||
sudo systemctl enable tailscaled
|
sudo systemctl enable tailscaled # [tl! .cmd:1]
|
||||||
sudo systemctl start tailscaled
|
sudo systemctl start tailscaled
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -68,9 +68,9 @@ I've already got Docker installed on this machine, but if I didn't I would follo
|
||||||
|
|
||||||
I also verify that my install is using `cgroup` version 1 as version 2 is not currently supported:
|
I also verify that my install is using `cgroup` version 1 as version 2 is not currently supported:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ docker info | grep -i cgroup
|
docker info | grep -i cgroup # [tl! .cmd]
|
||||||
Cgroup Driver: cgroupfs
|
Cgroup Driver: cgroupfs # [tl! .nocopy:1]
|
||||||
Cgroup Version: 1
|
Cgroup Version: 1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -79,60 +79,49 @@ Next up, I'll install `kubectl` [as described here](https://kubernetes.io/docs/t
|
||||||
|
|
||||||
I can look at the [releases page on GithHub](https://github.com/kubernetes/kubernetes/releases) to see that the latest release for me is `1.22.5`. With this newfound knowledge I can follow the [Install kubectl binary with curl on Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-kubectl-binary-with-curl-on-linux) instructions to grab that specific version:
|
I can look at the [releases page on GithHub](https://github.com/kubernetes/kubernetes/releases) to see that the latest release for me is `1.22.5`. With this newfound knowledge I can follow the [Install kubectl binary with curl on Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-kubectl-binary-with-curl-on-linux) instructions to grab that specific version:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ curl -LO https://dl.k8s.io/release/v1.22.5/bin/linux/amd64/kubectl
|
curl -sLO https://dl.k8s.io/release/v1.22.5/bin/linux/amd64/kubectl # [tl! .cmd:1]
|
||||||
|
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||||
% Total % Received % Xferd Average Speed Time Time Time Current
|
# [tl! .nocopy:2]
|
||||||
Dload Upload Total Spent Left Speed
|
|
||||||
100 154 100 154 0 0 2298 0 --:--:-- --:--:-- --:--:-- 2298
|
|
||||||
100 44.7M 100 44.7M 0 0 56.9M 0 --:--:-- --:--:-- --:--:-- 56.9M
|
|
||||||
|
|
||||||
❯ sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
|
||||||
|
|
||||||
[sudo] password for john:
|
[sudo] password for john:
|
||||||
|
|
||||||
❯ kubectl version --client
|
kubectl version --client # [tl! .cmd]
|
||||||
Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.5", GitCommit:"5c99e2ac2ff9a3c549d9ca665e7bc05a3e18f07e", GitTreeState:"clean", BuildDate:"2021-12-16T08:38:33Z", GoVersion:"go1.16.12", Compiler:"gc", Platform:"linux/amd64"}
|
Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.5", # [tl! .nocopy:3]
|
||||||
|
GitCommit:"5c99e2ac2ff9a3c549d9ca665e7bc05a3e18f07e", GitTreeState:"clean",
|
||||||
|
BuildDate:"2021-12-16T08:38:33Z", GoVersion:"go1.16.12", Compiler:"gc",
|
||||||
|
Platform:"linux/amd64"}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `kind` binary
|
#### `kind` binary
|
||||||
It's not strictly a requirement, but having the `kind` executable available will be handy for troubleshooting during the bootstrap process in case anything goes sideways. It can be installed in basically the same was as `kubectl`:
|
It's not strictly a requirement, but having the `kind` executable available will be handy for troubleshooting during the bootstrap process in case anything goes sideways. It can be installed in basically the same was as `kubectl`:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64
|
curl -sLo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 # [tl! .cmd:2]
|
||||||
|
sudo install -o root -g root -m 0755 kind /usr/local/bin/kind
|
||||||
% Total % Received % Xferd Average Speed Time Time Time Current
|
kind version
|
||||||
Dload Upload Total Spent Left Speed
|
kind v0.11.1 go1.16.5 linux/amd64 # [tl! .nocopy]
|
||||||
100 98 100 98 0 0 513 0 --:--:-- --:--:-- --:--:-- 513
|
|
||||||
100 655 100 655 0 0 2212 0 --:--:-- --:--:-- --:--:-- 10076
|
|
||||||
100 6660k 100 6660k 0 0 11.8M 0 --:--:-- --:--:-- --:--:-- 11.8M
|
|
||||||
|
|
||||||
❯ sudo install -o root -g root -m 0755 kind /usr/local/bin/kind
|
|
||||||
|
|
||||||
❯ kind version
|
|
||||||
kind v0.11.1 go1.16.5 linux/amd64
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Tanzu CLI
|
#### Tanzu CLI
|
||||||
The final bit of required software is the Tanzu CLI, which can be downloaded from the [project on GitHub](https://github.com/vmware-tanzu/community-edition/releases).
|
The final bit of required software is the Tanzu CLI, which can be downloaded from the [project on GitHub](https://github.com/vmware-tanzu/community-edition/releases).
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl -H "Accept: application/vnd.github.v3.raw" \
|
curl -H "Accept: application/vnd.github.v3.raw" \ # [tl! .cmd]
|
||||||
-L https://api.github.com/repos/vmware-tanzu/community-edition/contents/hack/get-tce-release.sh | \
|
-L https://api.github.com/repos/vmware-tanzu/community-edition/contents/hack/get-tce-release.sh | \
|
||||||
bash -s v0.9.1 linux
|
bash -s v0.9.1 linux
|
||||||
```
|
```
|
||||||
|
|
||||||
And then unpack it and run the installer:
|
And then unpack it and run the installer:
|
||||||
```bash
|
```shell
|
||||||
tar xf tce-linux-amd64-v0.9.1.tar.gz
|
tar xf tce-linux-amd64-v0.9.1.tar.gz # [tl! .cmd:2]
|
||||||
cd tce-linux-amd64-v0.9.1
|
cd tce-linux-amd64-v0.9.1
|
||||||
./install.sh
|
./install.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
I can then verify the installation is working correctly:
|
I can then verify the installation is working correctly:
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu version
|
tanzu version # [tl! .cmd]
|
||||||
version: v0.2.1
|
version: v0.2.1 # [tl! .nocopy:2]
|
||||||
buildDate: 2021-09-29
|
buildDate: 2021-09-29
|
||||||
sha: ceaa474
|
sha: ceaa474
|
||||||
```
|
```
|
||||||
|
@ -142,15 +131,15 @@ Okay, now it's time for the good stuff - creating some shiny new Tanzu clusters!
|
||||||
|
|
||||||
#### Management cluster
|
#### Management cluster
|
||||||
I need to create a Management cluster first and I'd like to do that with the UI, so that's as simple as:
|
I need to create a Management cluster first and I'd like to do that with the UI, so that's as simple as:
|
||||||
```bash
|
```shell
|
||||||
tanzu management-cluster create --ui
|
tanzu management-cluster create --ui # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
I should then be able to access the UI by pointing a web browser at `http://127.0.0.1:8080`... but I'm running this on a VM without a GUI, so I'll need to back up and tell it to bind on `0.0.0.0:8080` so the web installer will be accessible across the network. I can also include `--browser none` so that the installer doesn't bother with trying to launch a browser locally.
|
I should then be able to access the UI by pointing a web browser at `http://127.0.0.1:8080`... but I'm running this on a VM without a GUI, so I'll need to back up and tell it to bind on `0.0.0.0:8080` so the web installer will be accessible across the network. I can also include `--browser none` so that the installer doesn't bother with trying to launch a browser locally.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu management-cluster create --ui --bind 0.0.0.0:8080 --browser none
|
tanzu management-cluster create --ui --bind 0.0.0.0:8080 --browser none # [tl! .cmd]
|
||||||
|
# [tl! .nocopy:2]
|
||||||
Validating the pre-requisites...
|
Validating the pre-requisites...
|
||||||
Serving kickstart UI at http://[::]:8080
|
Serving kickstart UI at http://[::]:8080
|
||||||
```
|
```
|
||||||
|
@ -186,20 +175,22 @@ I skip the Tanzu Mission Control piece (since I'm still waiting on access to [TM
|
||||||
|
|
||||||
See the option at the bottom to copy the CLI command? I'll need to use that since clicking the friendly **Deploy** button doesn't seem to work while connected to the web server remotely.
|
See the option at the bottom to copy the CLI command? I'll need to use that since clicking the friendly **Deploy** button doesn't seem to work while connected to the web server remotely.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
tanzu management-cluster create --file /home/john/.config/tanzu/tkg/clusterconfigs/dr94t3m2on.yaml -v 6
|
tanzu management-cluster create \ # [tl! .cmd]
|
||||||
|
--file /home/john/.config/tanzu/tkg/clusterconfigs/dr94t3m2on.yaml -v 6
|
||||||
```
|
```
|
||||||
|
|
||||||
In fact, I'm going to copy that file into my working directory and give it a more descriptive name so that I can re-use it in the future.
|
In fact, I'm going to copy that file into my working directory and give it a more descriptive name so that I can re-use it in the future.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
cp ~/.config/tanzu/tkg/clusterconfigs/dr94t3m2on.yaml ~/projects/tanzu-homelab/tce-mgmt.yaml
|
cp ~/.config/tanzu/tkg/clusterconfigs/dr94t3m2on.yaml \ # [tl! .cmd]
|
||||||
|
~/projects/tanzu-homelab/tce-mgmt.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Now I can run the install command:
|
Now I can run the install command:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
tanzu management-cluster create --file ./tce-mgmt.yaml -v 6
|
tanzu management-cluster create --file ./tce-mgmt.yaml -v 6 # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
After a moment or two of verifying prerequisites, I'm met with a polite offer to enable Tanzu Kubernetes Grid Service in vSphere:
|
After a moment or two of verifying prerequisites, I'm met with a polite offer to enable Tanzu Kubernetes Grid Service in vSphere:
|
||||||
|
@ -246,9 +237,9 @@ Some addons might be getting installed! Check their status by running the follow
|
||||||
|
|
||||||
I can run that last command to go ahead and verify that the addon installation has completed:
|
I can run that last command to go ahead and verify that the addon installation has completed:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl get apps -A
|
kubectl get apps -A # [tl! .cmd]
|
||||||
NAMESPACE NAME DESCRIPTION SINCE-DEPLOY AGE
|
NAMESPACE NAME DESCRIPTION SINCE-DEPLOY AGE # [tl! .nocopy:5]
|
||||||
tkg-system antrea Reconcile succeeded 26s 6m49s
|
tkg-system antrea Reconcile succeeded 26s 6m49s
|
||||||
tkg-system metrics-server Reconcile succeeded 36s 6m49s
|
tkg-system metrics-server Reconcile succeeded 36s 6m49s
|
||||||
tkg-system tanzu-addons-manager Reconcile succeeded 22s 8m54s
|
tkg-system tanzu-addons-manager Reconcile succeeded 22s 8m54s
|
||||||
|
@ -257,9 +248,9 @@ tkg-system vsphere-csi Reconcile succeeded 36s 6m50s
|
||||||
```
|
```
|
||||||
|
|
||||||
And I can use the Tanzu CLI to get some other details about the new management cluster:
|
And I can use the Tanzu CLI to get some other details about the new management cluster:
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu management-cluster get tce-mgmt
|
tanzu management-cluster get tce-mgmt # [tl! .cmd]
|
||||||
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES
|
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES # [tl! .nocopy:start]
|
||||||
tce-mgmt tkg-system running 1/1 1/1 v1.21.2+vmware.1 management
|
tce-mgmt tkg-system running 1/1 1/1 v1.21.2+vmware.1 management
|
||||||
|
|
||||||
|
|
||||||
|
@ -281,7 +272,7 @@ Providers:
|
||||||
capi-kubeadm-bootstrap-system bootstrap-kubeadm BootstrapProvider kubeadm v0.3.23
|
capi-kubeadm-bootstrap-system bootstrap-kubeadm BootstrapProvider kubeadm v0.3.23
|
||||||
capi-kubeadm-control-plane-system control-plane-kubeadm ControlPlaneProvider kubeadm v0.3.23
|
capi-kubeadm-control-plane-system control-plane-kubeadm ControlPlaneProvider kubeadm v0.3.23
|
||||||
capi-system cluster-api CoreProvider cluster-api v0.3.23
|
capi-system cluster-api CoreProvider cluster-api v0.3.23
|
||||||
capv-system infrastructure-vsphere InfrastructureProvider vsphere v0.7.10
|
capv-system infrastructure-vsphere InfrastructureProvider vsphere v0.7.10 # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -292,8 +283,8 @@ Excellent! Things are looking good so I can move on to create the cluster which
|
||||||
#### Workload cluster
|
#### Workload cluster
|
||||||
I won't use the UI for this but will instead take a copy of my `tce-mgmt.yaml` file and adapt it to suit the workload needs (as described [here](https://tanzucommunityedition.io/docs/latest/workload-clusters/)).
|
I won't use the UI for this but will instead take a copy of my `tce-mgmt.yaml` file and adapt it to suit the workload needs (as described [here](https://tanzucommunityedition.io/docs/latest/workload-clusters/)).
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
cp tce-mgmt.yaml tce-work.yaml
|
cp tce-mgmt.yaml tce-work.yaml # [tl! .cmd:1]
|
||||||
vi tce-work.yaml
|
vi tce-work.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -310,9 +301,9 @@ I *could* change a few others if I wanted to[^i_wont]:
|
||||||
|
|
||||||
After saving my changes to the `tce-work.yaml` file, I'm ready to deploy the cluster:
|
After saving my changes to the `tce-work.yaml` file, I'm ready to deploy the cluster:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu cluster create --file tce-work.yaml
|
tanzu cluster create --file tce-work.yaml # [tl! .cmd]
|
||||||
Validating configuration...
|
Validating configuration... # [tl! .nocopy:start]
|
||||||
Warning: Pinniped configuration not found. Skipping pinniped configuration in workload cluster. Please refer to the documentation to check if you can configure pinniped on workload cluster manually
|
Warning: Pinniped configuration not found. Skipping pinniped configuration in workload cluster. Please refer to the documentation to check if you can configure pinniped on workload cluster manually
|
||||||
Creating workload cluster 'tce-work'...
|
Creating workload cluster 'tce-work'...
|
||||||
Waiting for cluster to be initialized...
|
Waiting for cluster to be initialized...
|
||||||
|
@ -320,13 +311,13 @@ Waiting for cluster nodes to be available...
|
||||||
Waiting for addons installation...
|
Waiting for addons installation...
|
||||||
Waiting for packages to be up and running...
|
Waiting for packages to be up and running...
|
||||||
|
|
||||||
Workload cluster 'tce-work' created
|
Workload cluster 'tce-work' created # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Right on! I'll use `tanzu cluster get` to check out the workload cluster:
|
Right on! I'll use `tanzu cluster get` to check out the workload cluster:
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu cluster get tce-work
|
tanzu cluster get tce-work # [tl! .cmd]
|
||||||
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES
|
NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES # [tl! .nocopy:start]
|
||||||
tce-work default running 1/1 1/1 v1.21.2+vmware.1 <none>
|
tce-work default running 1/1 1/1 v1.21.2+vmware.1 <none>
|
||||||
ℹ
|
ℹ
|
||||||
|
|
||||||
|
@ -339,7 +330,7 @@ NAME READY SEVERITY RE
|
||||||
│ └─Machine/tce-work-control-plane-8km9m True 9m31s
|
│ └─Machine/tce-work-control-plane-8km9m True 9m31s
|
||||||
└─Workers
|
└─Workers
|
||||||
└─MachineDeployment/tce-work-md-0
|
└─MachineDeployment/tce-work-md-0
|
||||||
└─Machine/tce-work-md-0-687444b744-cck4x True 8m31s
|
└─Machine/tce-work-md-0-687444b744-cck4x True 8m31s # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
I can also go into vCenter and take a look at the VMs which constitute the two clusters:
|
I can also go into vCenter and take a look at the VMs which constitute the two clusters:
|
||||||
|
@ -356,9 +347,9 @@ Excellent, I've got a Tanzu management cluster and a Tanzu workload cluster. Wha
|
||||||
|
|
||||||
If I run `kubectl get nodes` right now, I'll only get information about the management cluster:
|
If I run `kubectl get nodes` right now, I'll only get information about the management cluster:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl get nodes
|
kubectl get nodes # [tl! .cmd]
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION # [tl! .nocopy:2]
|
||||||
tce-mgmt-control-plane-xtdnx Ready control-plane,master 18h v1.21.2+vmware.1
|
tce-mgmt-control-plane-xtdnx Ready control-plane,master 18h v1.21.2+vmware.1
|
||||||
tce-mgmt-md-0-745b858d44-4c9vv Ready <none> 17h v1.21.2+vmware.1
|
tce-mgmt-md-0-745b858d44-4c9vv Ready <none> 17h v1.21.2+vmware.1
|
||||||
```
|
```
|
||||||
|
@ -366,28 +357,29 @@ tce-mgmt-md-0-745b858d44-4c9vv Ready <none> 17h v1.21.2+v
|
||||||
#### Setting the right context
|
#### Setting the right context
|
||||||
To be able to deploy stuff to the workload cluster, I need to tell `kubectl` how to talk to it. And to do that, I'll first need to use `tanzu` to capture the cluster's kubeconfig:
|
To be able to deploy stuff to the workload cluster, I need to tell `kubectl` how to talk to it. And to do that, I'll first need to use `tanzu` to capture the cluster's kubeconfig:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ tanzu cluster kubeconfig get tce-work --admin
|
tanzu cluster kubeconfig get tce-work --admin # [tl! .cmd]
|
||||||
Credentials of cluster 'tce-work' have been saved
|
Credentials of cluster 'tce-work' have been saved # [tl! .nocopy:1]
|
||||||
You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work'
|
You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work'
|
||||||
```
|
```
|
||||||
|
|
||||||
I can now run `kubectl config get-contexts` and see that I have access to contexts on both management and workload clusters:
|
I can now run `kubectl config get-contexts` and see that I have access to contexts on both management and workload clusters:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl config get-contexts
|
kubectl config get-contexts # [tl! .cmd]
|
||||||
CURRENT NAME CLUSTER AUTHINFO NAMESPACE
|
CURRENT NAME CLUSTER AUTHINFO NAMESPACE # [tl! .nocopy:2]
|
||||||
* tce-mgmt-admin@tce-mgmt tce-mgmt tce-mgmt-admin
|
* tce-mgmt-admin@tce-mgmt tce-mgmt tce-mgmt-admin
|
||||||
tce-work-admin@tce-work tce-work tce-work-admin
|
tce-work-admin@tce-work tce-work tce-work-admin
|
||||||
```
|
```
|
||||||
|
|
||||||
And I can switch to the `tce-work` cluster like so:
|
And I can switch to the `tce-work` cluster like so:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl config use-context tce-work-admin@tce-work
|
kubectl config use-context tce-work-admin@tce-work # [tl! .cmd]
|
||||||
Switched to context "tce-work-admin@tce-work".
|
Switched to context "tce-work-admin@tce-work". # [tl! .nocopy]
|
||||||
❯ kubectl get nodes
|
|
||||||
NAME STATUS ROLES AGE VERSION
|
kubectl get nodes # [tl! .cmd]
|
||||||
|
NAME STATUS ROLES AGE VERSION # [tl! .nocopy:2]
|
||||||
tce-work-control-plane-8km9m Ready control-plane,master 17h v1.21.2+vmware.1
|
tce-work-control-plane-8km9m Ready control-plane,master 17h v1.21.2+vmware.1
|
||||||
tce-work-md-0-687444b744-cck4x Ready <none> 17h v1.21.2+vmware.1
|
tce-work-md-0-687444b744-cck4x Ready <none> 17h v1.21.2+vmware.1
|
||||||
```
|
```
|
||||||
|
@ -399,12 +391,12 @@ Before I move on to deploying actually *useful* workloads, I'll start with deplo
|
||||||
|
|
||||||
I can check out the sample deployment that William put together [here](https://github.com/lamw/vmware-k8s-app-demo/blob/master/yelb.yaml), and then deploy it with:
|
I can check out the sample deployment that William put together [here](https://github.com/lamw/vmware-k8s-app-demo/blob/master/yelb.yaml), and then deploy it with:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl create ns yelb
|
kubectl create ns yelb # [tl! .cmd]
|
||||||
namespace/yelb created
|
namespace/yelb created # [tl! .nocopy:1]
|
||||||
|
|
||||||
❯ kubectl apply -f https://raw.githubusercontent.com/lamw/vmware-k8s-app-demo/master/yelb.yaml
|
kubectl apply -f https://raw.githubusercontent.com/lamw/vmware-k8s-app-demo/master/yelb.yaml # [tl! .cmd]
|
||||||
service/redis-server created
|
service/redis-server created # [tl! .nocopy:start]
|
||||||
service/yelb-db created
|
service/yelb-db created
|
||||||
service/yelb-appserver created
|
service/yelb-appserver created
|
||||||
service/yelb-ui created
|
service/yelb-ui created
|
||||||
|
@ -412,9 +404,9 @@ deployment.apps/yelb-ui created
|
||||||
deployment.apps/redis-server created
|
deployment.apps/redis-server created
|
||||||
deployment.apps/yelb-db created
|
deployment.apps/yelb-db created
|
||||||
deployment.apps/yelb-appserver created
|
deployment.apps/yelb-appserver created
|
||||||
|
# [tl! .nocopy:end]
|
||||||
❯ kubectl -n yelb get pods
|
kubectl -n yelb get pods # [tl! .cmd]
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE # [tl! .nocopy:4]
|
||||||
redis-server-74556bbcb7-r9jqc 1/1 Running 0 10s
|
redis-server-74556bbcb7-r9jqc 1/1 Running 0 10s
|
||||||
yelb-appserver-d584bb889-2jspg 1/1 Running 0 10s
|
yelb-appserver-d584bb889-2jspg 1/1 Running 0 10s
|
||||||
yelb-db-694586cd78-wb8tt 1/1 Running 0 10s
|
yelb-db-694586cd78-wb8tt 1/1 Running 0 10s
|
||||||
|
@ -423,35 +415,35 @@ yelb-ui-8f54fd88c-k2dw9 1/1 Running 0 10s
|
||||||
|
|
||||||
Once the app is running, I can point my web browser at it to see it in action. But what IP do I use?
|
Once the app is running, I can point my web browser at it to see it in action. But what IP do I use?
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n yelb get svc/yelb-ui
|
kubectl -n yelb get svc/yelb-ui # [tl! .cmd]
|
||||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # [tl! .nocopy:1]
|
||||||
yelb-ui NodePort 100.71.228.116 <none> 80:30001/TCP 84s
|
yelb-ui NodePort 100.71.228.116 <none> 80:30001/TCP 84s
|
||||||
```
|
```
|
||||||
|
|
||||||
This demo is using a `NodePort` type service to expose the front end, which means it will be accessible on port `30001` on the node it's running on. I can find that IP by:
|
This demo is using a `NodePort` type service to expose the front end, which means it will be accessible on port `30001` on the node it's running on. I can find that IP by:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n yelb describe pod $(kubectl -n yelb get pods | grep yelb-ui | awk '{print $1}') | grep "Node:"
|
kubectl -n yelb describe pod $(kubectl -n yelb get pods | grep yelb-ui | awk '{print $1}') | grep "Node:" # [tl! .cmd]
|
||||||
Node: tce-work-md-0-687444b744-cck4x/192.168.1.145
|
Node: tce-work-md-0-687444b744-cck4x/192.168.1.145 # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
So I can point my browser at `http://192.168.1.145:30001` and see the demo:
|
So I can point my browser at `http://192.168.1.145:30001` and see the demo:
|
||||||
![yelb demo page](yelb_nodeport_demo.png)
|
![yelb demo page](yelb_nodeport_demo.png)
|
||||||
|
|
||||||
After marveling at my own magnificence[^magnificence] for a few minutes, I'm ready to move on to something more interesting - but first, I'll just delete the `yelb` namespace to clean up the work I just did:
|
After marveling at my own magnificence[^magnificence] for a few minutes, I'm ready to move on to something more interesting - but first, I'll just delete the `yelb` namespace to clean up the work I just did:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl delete ns yelb
|
kubectl delete ns yelb # [tl! .cmd]
|
||||||
namespace "yelb" deleted
|
namespace "yelb" deleted # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
Now let's move on and try to deploy `yelb` behind a `LoadBalancer` service so it will get its own IP. William has a [deployment spec](https://github.com/lamw/vmware-k8s-app-demo/blob/master/yelb-lb.yaml) for that too.
|
Now let's move on and try to deploy `yelb` behind a `LoadBalancer` service so it will get its own IP. William has a [deployment spec](https://github.com/lamw/vmware-k8s-app-demo/blob/master/yelb-lb.yaml) for that too.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl create ns yelb
|
kubectl create ns yelb # [tl! .cmd]
|
||||||
namespace/yelb created
|
namespace/yelb created # [tl! .nocopy:1]
|
||||||
|
|
||||||
❯ kubectl apply -f https://raw.githubusercontent.com/lamw/vmware-k8s-app-demo/master/yelb-lb.yaml
|
kubectl apply -f https://raw.githubusercontent.com/lamw/vmware-k8s-app-demo/master/yelb-lb.yaml # [tl! .cmd]
|
||||||
service/redis-server created
|
service/redis-server created # [tl! .nocopy:8]
|
||||||
service/yelb-db created
|
service/yelb-db created
|
||||||
service/yelb-appserver created
|
service/yelb-appserver created
|
||||||
service/yelb-ui created
|
service/yelb-ui created
|
||||||
|
@ -460,8 +452,8 @@ deployment.apps/redis-server created
|
||||||
deployment.apps/yelb-db created
|
deployment.apps/yelb-db created
|
||||||
deployment.apps/yelb-appserver created
|
deployment.apps/yelb-appserver created
|
||||||
|
|
||||||
❯ kubectl -n yelb get pods
|
kubectl -n yelb get pods # [tl! .cmd]
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE # [tl! .nocopy:4]
|
||||||
redis-server-74556bbcb7-q6l62 1/1 Running 0 7s
|
redis-server-74556bbcb7-q6l62 1/1 Running 0 7s
|
||||||
yelb-appserver-d584bb889-p5qgd 1/1 Running 0 7s
|
yelb-appserver-d584bb889-p5qgd 1/1 Running 0 7s
|
||||||
yelb-db-694586cd78-hjtn4 1/1 Running 0 7s
|
yelb-db-694586cd78-hjtn4 1/1 Running 0 7s
|
||||||
|
@ -469,9 +461,9 @@ yelb-ui-8f54fd88c-pm9qw 1/1 Running 0 7s
|
||||||
```
|
```
|
||||||
|
|
||||||
And I can take a look at that service...
|
And I can take a look at that service...
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n yelb get svc/yelb-ui
|
kubectl -n yelb get svc/yelb-ui # [tl! .cmd]
|
||||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # [tl! .nocopy:1]
|
||||||
yelb-ui LoadBalancer 100.67.177.185 <pending> 80:32339/TCP 15s
|
yelb-ui LoadBalancer 100.67.177.185 <pending> 80:32339/TCP 15s
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -482,21 +474,23 @@ Wait a minute. That external IP is *still* `<pending>`. What gives? Oh yeah I ne
|
||||||
#### Deploying `kube-vip` as a load balancer
|
#### Deploying `kube-vip` as a load balancer
|
||||||
Fortunately, William Lam [wrote up some tips](https://williamlam.com/2021/10/quick-tip-install-kube-vip-as-service-load-balancer-with-tanzu-community-edition-tce.html) for handling that too. It's [based on work by Scott Rosenberg](https://github.com/vrabbi/tkgm-customizations). The quick-and-dirty steps needed to make this work are:
|
Fortunately, William Lam [wrote up some tips](https://williamlam.com/2021/10/quick-tip-install-kube-vip-as-service-load-balancer-with-tanzu-community-edition-tce.html) for handling that too. It's [based on work by Scott Rosenberg](https://github.com/vrabbi/tkgm-customizations). The quick-and-dirty steps needed to make this work are:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
git clone https://github.com/vrabbi/tkgm-customizations.git
|
git clone https://github.com/vrabbi/tkgm-customizations.git # [tl! .cmd:3]
|
||||||
cd tkgm-customizations/carvel-packages/kube-vip-package
|
cd tkgm-customizations/carvel-packages/kube-vip-package
|
||||||
kubectl apply -n tanzu-package-repo-global -f metadata.yml
|
kubectl apply -n tanzu-package-repo-global -f metadata.yml
|
||||||
kubectl apply -n tanzu-package-repo-global -f package.yaml
|
kubectl apply -n tanzu-package-repo-global -f package.yaml
|
||||||
cat << EOF > values.yaml
|
|
||||||
|
cat << EOF > values.yaml # [tl! .cmd]
|
||||||
vip_range: 192.168.1.64-192.168.1.80
|
vip_range: 192.168.1.64-192.168.1.80
|
||||||
EOF
|
EOF
|
||||||
tanzu package install kubevip -p kubevip.terasky.com -v 0.3.9 -f values.yaml
|
|
||||||
|
tanzu package install kubevip -p kubevip.terasky.com -v 0.3.9 -f values.yaml # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
Now I can check out the `yelb-ui` service again:
|
Now I can check out the `yelb-ui` service again:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n yelb get svc/yelb-ui
|
kubectl -n yelb get svc/yelb-ui # [tl!.cmd]
|
||||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # [tl! .nocopy:1]
|
||||||
yelb-ui LoadBalancer 100.67.177.185 192.168.1.65 80:32339/TCP 4h35m
|
yelb-ui LoadBalancer 100.67.177.185 192.168.1.65 80:32339/TCP 4h35m
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -504,9 +498,9 @@ And it's got an IP! I can point my browser to `http://192.168.1.65` now and see:
|
||||||
![Successful LoadBalancer test!](yelb_loadbalancer_demo.png)
|
![Successful LoadBalancer test!](yelb_loadbalancer_demo.png)
|
||||||
|
|
||||||
I'll keep the `kube-vip` load balancer since it'll come in handy, but I have no further use for `yelb`:
|
I'll keep the `kube-vip` load balancer since it'll come in handy, but I have no further use for `yelb`:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl delete ns yelb
|
kubectl delete ns yelb # [tl! .cmd]
|
||||||
namespace "yelb" deleted
|
namespace "yelb" deleted # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Persistent Volume Claims, Storage Classes, and Storage Policies
|
#### Persistent Volume Claims, Storage Classes, and Storage Policies
|
||||||
|
@ -520,6 +514,7 @@ Then I create a new vSphere Storage Policy called `tkg-storage-policy` which sta
|
||||||
|
|
||||||
So that's the vSphere side of things sorted; now to map that back to the Kubernetes side. For that, I'll need to define a Storage Class tied to the vSphere Storage profile so I drop these details into a new file called `vsphere-sc.yaml`:
|
So that's the vSphere side of things sorted; now to map that back to the Kubernetes side. For that, I'll need to define a Storage Class tied to the vSphere Storage profile so I drop these details into a new file called `vsphere-sc.yaml`:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
kind: StorageClass
|
kind: StorageClass
|
||||||
apiVersion: storage.k8s.io/v1
|
apiVersion: storage.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -530,13 +525,14 @@ parameters:
|
||||||
```
|
```
|
||||||
|
|
||||||
And then apply it with :
|
And then apply it with :
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f vsphere-sc.yaml
|
kubectl apply -f vsphere-sc.yaml # [tl! .cmd]
|
||||||
storageclass.storage.k8s.io/vsphere created
|
storageclass.storage.k8s.io/vsphere created # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
I can test that I can create a Persistent Volume Claim against the new `vsphere` Storage Class by putting this in a new file called `vsphere-pvc.yaml`:
|
I can test that I can create a Persistent Volume Claim against the new `vsphere` Storage Class by putting this in a new file called `vsphere-pvc.yaml`:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -553,15 +549,15 @@ spec:
|
||||||
```
|
```
|
||||||
|
|
||||||
And applying it:
|
And applying it:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f demo-pvc.yaml
|
kubectl apply -f demo-pvc.yaml # [tl! .cmd]
|
||||||
persistentvolumeclaim/vsphere-demo-1 created
|
persistentvolumeclaim/vsphere-demo-1 created # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
I can see the new claim, and confirm that its status is `Bound`:
|
I can see the new claim, and confirm that its status is `Bound`:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl get pvc
|
kubectl get pvc # [tl! .cmd]
|
||||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE # [tl! .nocopy:1]
|
||||||
vsphere-demo-1 Bound pvc-36cc7c01-a1b3-4c1c-ba0d-dff3fd47f93b 5Gi RWO vsphere 4m25s
|
vsphere-demo-1 Bound pvc-36cc7c01-a1b3-4c1c-ba0d-dff3fd47f93b 5Gi RWO vsphere 4m25s
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -569,9 +565,9 @@ And for bonus points, I can see that the container volume was created on the vSp
|
||||||
![Container Volume in vSphere](container_volume_in_vsphere.png)
|
![Container Volume in vSphere](container_volume_in_vsphere.png)
|
||||||
|
|
||||||
So that's storage sorted. I'll clean up my test volume before moving on:
|
So that's storage sorted. I'll clean up my test volume before moving on:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl delete -f demo-pvc.yaml
|
kubectl delete -f demo-pvc.yaml # [tl! .cmd]
|
||||||
persistentvolumeclaim "vsphere-demo-1" deleted
|
persistentvolumeclaim "vsphere-demo-1" deleted # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
### A real workload - phpIPAM
|
### A real workload - phpIPAM
|
||||||
|
@ -583,9 +579,9 @@ So I set to work exploring some containerization options, and I found [phpipam-d
|
||||||
|
|
||||||
To start, I'll create a new namespace to keep things tidy:
|
To start, I'll create a new namespace to keep things tidy:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl create ns ipam
|
kubectl create ns ipam # [tl! .cmd]
|
||||||
namespace/ipam created
|
namespace/ipam created # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
I'm going to wind up with four pods:
|
I'm going to wind up with four pods:
|
||||||
|
@ -601,6 +597,7 @@ I'll use each container's original `docker-compose` configuration and adapt that
|
||||||
#### phpipam-db
|
#### phpipam-db
|
||||||
The phpIPAM database will live inside a MariaDB container. Here's the relevant bit from `docker-compose`:
|
The phpIPAM database will live inside a MariaDB container. Here's the relevant bit from `docker-compose`:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
services:
|
services:
|
||||||
phpipam-db:
|
phpipam-db:
|
||||||
image: mariadb:latest
|
image: mariadb:latest
|
||||||
|
@ -616,6 +613,7 @@ So it will need a `Service` exposing the container's port `3306` so that other p
|
||||||
|
|
||||||
It might look like this on the Kubernetes side:
|
It might look like this on the Kubernetes side:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# phpipam-db.yaml
|
# phpipam-db.yaml
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
|
@ -687,6 +685,7 @@ Moving on:
|
||||||
#### phpipam-www
|
#### phpipam-www
|
||||||
This is the `docker-compose` excerpt for the web component:
|
This is the `docker-compose` excerpt for the web component:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
services:
|
services:
|
||||||
phpipam-web:
|
phpipam-web:
|
||||||
image: phpipam/phpipam-www:1.5x
|
image: phpipam/phpipam-www:1.5x
|
||||||
|
@ -705,6 +704,7 @@ Based on that, I can see that my `phpipam-www` pod will need a container running
|
||||||
|
|
||||||
Here's how I'd adapt that into a structure that Kubernetes will understand:
|
Here's how I'd adapt that into a structure that Kubernetes will understand:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# phpipam-www.yaml
|
# phpipam-www.yaml
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
|
@ -753,7 +753,7 @@ spec:
|
||||||
labels:
|
labels:
|
||||||
app: phpipam-www
|
app: phpipam-www
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers: # [tl! focus:2]
|
||||||
- name: phpipam-www
|
- name: phpipam-www
|
||||||
image: phpipam/phpipam-www:1.5x
|
image: phpipam/phpipam-www:1.5x
|
||||||
env:
|
env:
|
||||||
|
@ -779,6 +779,7 @@ spec:
|
||||||
#### phpipam-cron
|
#### phpipam-cron
|
||||||
This container has a pretty simple configuration in `docker-compose`:
|
This container has a pretty simple configuration in `docker-compose`:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
services:
|
services:
|
||||||
phpipam-cron:
|
phpipam-cron:
|
||||||
image: phpipam/phpipam-cron:1.5x
|
image: phpipam/phpipam-cron:1.5x
|
||||||
|
@ -792,6 +793,7 @@ services:
|
||||||
No exposed ports, no need for persistence - just a base image and a few variables to tell it how to connect to the database and how often to run the scans:
|
No exposed ports, no need for persistence - just a base image and a few variables to tell it how to connect to the database and how often to run the scans:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# phpipam-cron.yaml
|
# phpipam-cron.yaml
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
|
@ -825,6 +827,7 @@ spec:
|
||||||
#### phpipam-agent
|
#### phpipam-agent
|
||||||
And finally, my remote scan agent. Here's the `docker-compose`:
|
And finally, my remote scan agent. Here's the `docker-compose`:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
services:
|
services:
|
||||||
phpipam-agent:
|
phpipam-agent:
|
||||||
container_name: phpipam-agent
|
container_name: phpipam-agent
|
||||||
|
@ -847,6 +850,7 @@ It's got a few additional variables to make it extra-configurable, but still no
|
||||||
|
|
||||||
For now, here's how I'd tell Kubernetes about it:
|
For now, here's how I'd tell Kubernetes about it:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# phpipam-agent.yaml
|
# phpipam-agent.yaml
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
|
@ -891,32 +895,32 @@ spec:
|
||||||
|
|
||||||
#### Deployment and configuration of phpIPAM
|
#### Deployment and configuration of phpIPAM
|
||||||
I can now go ahead and start deploying these containers, starting with the database one (upon which all the others rely):
|
I can now go ahead and start deploying these containers, starting with the database one (upon which all the others rely):
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f phpipam-db.yaml
|
kubectl apply -f phpipam-db.yaml # [tl! .cmd]
|
||||||
service/phpipam-db created
|
service/phpipam-db created # [tl! .nocopy:2]
|
||||||
persistentvolumeclaim/phpipam-db-pvc created
|
persistentvolumeclaim/phpipam-db-pvc created
|
||||||
deployment.apps/phpipam-db created
|
deployment.apps/phpipam-db created
|
||||||
```
|
```
|
||||||
|
|
||||||
And the web server:
|
And the web server:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f phpipam-www.yaml
|
kubectl apply -f phpipam-www.yaml # [tl! .cmd]
|
||||||
service/phpipam-www created
|
service/phpipam-www created # [tl! .nocopy:2]
|
||||||
persistentvolumeclaim/phpipam-www-pvc created
|
persistentvolumeclaim/phpipam-www-pvc created
|
||||||
deployment.apps/phpipam-www created
|
deployment.apps/phpipam-www created
|
||||||
```
|
```
|
||||||
|
|
||||||
And the cron runner:
|
And the cron runner:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f phpipam-cron.yaml
|
kubectl apply -f phpipam-cron.yaml # [tl! .cmd]
|
||||||
deployment.apps/phpipam-cron created
|
deployment.apps/phpipam-cron created # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
I'll hold off on the agent container for now since I'll need to adjust the configuration slightly after getting phpIPAM set up, but I will go ahead and check out my work so far:
|
I'll hold off on the agent container for now since I'll need to adjust the configuration slightly after getting phpIPAM set up, but I will go ahead and check out my work so far:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl -n ipam get all
|
kubectl -n ipam get all # [tl! .cmd]
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE # [tl! .nocopy:start]
|
||||||
pod/phpipam-cron-6c994897c4-6rsnp 1/1 Running 0 4m30s
|
pod/phpipam-cron-6c994897c4-6rsnp 1/1 Running 0 4m30s
|
||||||
pod/phpipam-db-5f4c47d4b9-sb5bd 1/1 Running 0 16m
|
pod/phpipam-db-5f4c47d4b9-sb5bd 1/1 Running 0 16m
|
||||||
pod/phpipam-www-769c95c68d-94klg 1/1 Running 0 5m59s
|
pod/phpipam-www-769c95c68d-94klg 1/1 Running 0 5m59s
|
||||||
|
@ -933,7 +937,7 @@ deployment.apps/phpipam-www 1/1 1 1 5m59s
|
||||||
NAME DESIRED CURRENT READY AGE
|
NAME DESIRED CURRENT READY AGE
|
||||||
replicaset.apps/phpipam-cron-6c994897c4 1 1 1 4m30s
|
replicaset.apps/phpipam-cron-6c994897c4 1 1 1 4m30s
|
||||||
replicaset.apps/phpipam-db-5f4c47d4b9 1 1 1 16m
|
replicaset.apps/phpipam-db-5f4c47d4b9 1 1 1 16m
|
||||||
replicaset.apps/phpipam-www-769c95c68d 1 1 1 5m59s
|
replicaset.apps/phpipam-www-769c95c68d 1 1 1 5m59s # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
And I can point my browser to the `EXTERNAL-IP` associated with the `phpipam-www` service to see the initial setup page:
|
And I can point my browser to the `EXTERNAL-IP` associated with the `phpipam-www` service to see the initial setup page:
|
||||||
|
@ -963,9 +967,9 @@ I'll copy the agent code and plug it into my `phpipam-agent.yaml` file:
|
||||||
```
|
```
|
||||||
|
|
||||||
And then deploy that:
|
And then deploy that:
|
||||||
```bash
|
```shell
|
||||||
❯ kubectl apply -f phpipam-agent.yaml
|
kubectl apply -f phpipam-agent.yaml # [tl! .cmd]
|
||||||
deployment.apps/phpipam-agent created
|
deployment.apps/phpipam-agent created # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
The scan agent isn't going to do anything until it's assigned to a subnet though, so now I head to **Administration > IP related management > Sections**. phpIPAM comes with a few default sections and ranges and such defined so I'll delete those and create a new one that I'll call `Lab`.
|
The scan agent isn't going to do anything until it's assigned to a subnet though, so now I head to **Administration > IP related management > Sections**. phpIPAM comes with a few default sections and ranges and such defined so I'll delete those and create a new one that I'll call `Lab`.
|
||||||
|
|
|
@ -42,20 +42,20 @@ The host will need to be in maintenance mode in order to apply the upgrade, and
|
||||||
### 3. Place host in maintenance mode
|
### 3. Place host in maintenance mode
|
||||||
I can do that by SSH'ing to the host and running:
|
I can do that by SSH'ing to the host and running:
|
||||||
```shell
|
```shell
|
||||||
esxcli system maintenanceMode set -e true
|
esxcli system maintenanceMode set -e true # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And can confirm that it happened with:
|
And can confirm that it happened with:
|
||||||
```shell
|
```shell
|
||||||
esxcli system maintenanceMode get
|
esxcli system maintenanceMode get # [tl! .cmd]
|
||||||
Enabled
|
Enabled # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
### 4. Identify the profile name
|
### 4. Identify the profile name
|
||||||
Because this is an *upgrade* from one major release to another rather than a simple *update*, I need to know the name of the profile which will be applied. I can identify that with:
|
Because this is an *upgrade* from one major release to another rather than a simple *update*, I need to know the name of the profile which will be applied. I can identify that with:
|
||||||
```shell
|
```shell
|
||||||
esxcli software sources profile list -d /vmfs/volumes/nuchost-local/_Patches/VMware-ESXi-8.0-20513097-depot.zip
|
esxcli software sources profile list -d /vmfs/volumes/nuchost-local/_Patches/VMware-ESXi-8.0-20513097-depot.zip # [tl! .cmd]
|
||||||
Name Vendor Acceptance Level Creation Time Modification Time
|
Name Vendor Acceptance Level Creation Time Modification Time # [tl! .nocopy:3]
|
||||||
---------------------------- ------------ ---------------- ------------------- -----------------
|
---------------------------- ------------ ---------------- ------------------- -----------------
|
||||||
ESXi-8.0.0-20513097-standard VMware, Inc. PartnerSupported 2022-09-23T18:59:28 2022-09-23T18:59:28
|
ESXi-8.0.0-20513097-standard VMware, Inc. PartnerSupported 2022-09-23T18:59:28 2022-09-23T18:59:28
|
||||||
ESXi-8.0.0-20513097-no-tools VMware, Inc. PartnerSupported 2022-09-23T18:59:28 2022-09-23T18:59:28
|
ESXi-8.0.0-20513097-no-tools VMware, Inc. PartnerSupported 2022-09-23T18:59:28 2022-09-23T18:59:28
|
||||||
|
@ -69,13 +69,12 @@ In this case, I'll use the `ESXi-8.0.0-20513097-standard` profile.
|
||||||
### 5. Install the upgrade
|
### 5. Install the upgrade
|
||||||
Now for the moment of truth:
|
Now for the moment of truth:
|
||||||
```shell
|
```shell
|
||||||
esxcli software profile update -d /vmfs/volumes/nuchost-local/_Patches/VMware-ESXi-8.0-2051309
|
esxcli software profile update -d /vmfs/volumes/nuchost-local/_Patches/VMware-ESXi-8.0-20513097-depot.zip -p ESXi-8.0.0-20513097-standard # [tl! .cmd]
|
||||||
7-depot.zip -p ESXi-8.0.0-20513097-standard
|
|
||||||
```
|
```
|
||||||
|
|
||||||
When it finishes (successfully), it leaves a little message that the update won't be complete until the host is rebooted, so I'll go ahead and do that as well:
|
When it finishes (successfully), it leaves a little message that the update won't be complete until the host is rebooted, so I'll go ahead and do that as well:
|
||||||
```shell
|
```shell
|
||||||
reboot
|
reboot # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
And then wait (oh-so-patiently) for the host to come back up.
|
And then wait (oh-so-patiently) for the host to come back up.
|
||||||
|
|
|
@ -11,18 +11,19 @@ toc: false
|
||||||
|
|
||||||
In the same vein as [my script to automagically resize a Linux LVM volume to use up free space on a disk](/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk), I wanted a way to automatically apply Windows updates for servers deployed by [my vRealize Automation environment](/series/vra8). I'm only really concerned with Windows Server 2019, which includes the [built-in Windows Update Provider PowerShell module](https://4sysops.com/archives/scan-download-and-install-windows-updates-with-powershell/). So this could be as simple as `Install-WUUpdates -Updates (Start-WUScan)` to scan for and install any available updates.
|
In the same vein as [my script to automagically resize a Linux LVM volume to use up free space on a disk](/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk), I wanted a way to automatically apply Windows updates for servers deployed by [my vRealize Automation environment](/series/vra8). I'm only really concerned with Windows Server 2019, which includes the [built-in Windows Update Provider PowerShell module](https://4sysops.com/archives/scan-download-and-install-windows-updates-with-powershell/). So this could be as simple as `Install-WUUpdates -Updates (Start-WUScan)` to scan for and install any available updates.
|
||||||
|
|
||||||
Unfortunately, I found that this approach can take a long time to run and often exceeded the timeout limits imposed upon my ABX script, causing the PowerShell session to end and terminating the update process. I really needed a way to do this without requiring a persistent session.
|
Unfortunately, I found that this approach can take a long time to run and often exceeded the timeout limits imposed upon my ABX script, causing the PowerShell session to end and terminating the update process. I really needed a way to do this without requiring a persistent session.
|
||||||
|
|
||||||
After further experimentation, I settled on using PowerShell to create a one-time scheduled task that would run the updates and reboot, if necessary. I also wanted the task to automatically delete itself after running to avoid cluttering up the task scheduler library - and that last item had me quite stumped until I found [this blog post with the solution](https://iamsupergeek.com/self-deleting-scheduled-task-via-powershell/).
|
After further experimentation, I settled on using PowerShell to create a one-time scheduled task that would run the updates and reboot, if necessary. I also wanted the task to automatically delete itself after running to avoid cluttering up the task scheduler library - and that last item had me quite stumped until I found [this blog post with the solution](https://iamsupergeek.com/self-deleting-scheduled-task-via-powershell/).
|
||||||
|
|
||||||
So here's what I put together:
|
So here's what I put together:
|
||||||
```powershell
|
```powershell
|
||||||
# This can be easily pasted into a remote PowerShell session to automatically install any available updates and reboot.
|
# torchlight! {"lineNumbers": true}
|
||||||
|
# This can be easily pasted into a remote PowerShell session to automatically install any available updates and reboot.
|
||||||
# It creates a scheduled task to start the update process after a one-minute delay so that you don't have to maintain
|
# It creates a scheduled task to start the update process after a one-minute delay so that you don't have to maintain
|
||||||
# the session during the process (or have the session timeout), and it also sets the task to automatically delete itself 2 hours later.
|
# the session during the process (or have the session timeout), and it also sets the task to automatically delete itself 2 hours later.
|
||||||
#
|
#
|
||||||
# This leverages the Windows Update Provider PowerShell module which is included in Windows 10 1709+ and Windows Server 2019.
|
# This leverages the Windows Update Provider PowerShell module which is included in Windows 10 1709+ and Windows Server 2019.
|
||||||
#
|
#
|
||||||
# Adapted from https://iamsupergeek.com/self-deleting-scheduled-task-via-powershell/
|
# Adapted from https://iamsupergeek.com/self-deleting-scheduled-task-via-powershell/
|
||||||
|
|
||||||
$action = New-ScheduledTaskAction -Execute 'C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe' -Argument '-NoProfile -WindowStyle Hidden -Command "& {Install-WUUpdates -Updates (Start-WUScan); if (Get-WUIsPendingReboot) {shutdown.exe /f /r /d p:2:4 /t 120 /c `"Rebooting to apply updates`"}}"'
|
$action = New-ScheduledTaskAction -Execute 'C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe' -Argument '-NoProfile -WindowStyle Hidden -Command "& {Install-WUUpdates -Updates (Start-WUScan); if (Get-WUIsPendingReboot) {shutdown.exe /f /r /d p:2:4 /t 120 /c `"Rebooting to apply updates`"}}"'
|
||||||
|
|
|
@ -21,20 +21,20 @@ tags:
|
||||||
- python
|
- python
|
||||||
comment: true # Disable comment if false.
|
comment: true # Disable comment if false.
|
||||||
---
|
---
|
||||||
VMware vCenter does wonders for abstracting away the layers of complexity involved in managing a large virtual infrastructure, but when something goes wrong it can be challenging to find exactly where the problem lies. And it can be even harder to proactively address potential issues before they occur.
|
VMware vCenter does wonders for abstracting away the layers of complexity involved in managing a large virtual infrastructure, but when something goes wrong it can be challenging to find exactly where the problem lies. And it can be even harder to proactively address potential issues before they occur.
|
||||||
|
|
||||||
Fortunately there's a super-handy utility which can making diagnosing vCenter significantly easier, and it comes in the form of the [vSphere Diagnostic Tool Fling](https://flings.vmware.com/vsphere-diagnostic-tool). VDT is a Python script which can be run directly on a vCenter Server appliance (version 6.5 and newer) to quickly check for problems and misconfigurations affecting:
|
Fortunately there's a super-handy utility which can making diagnosing vCenter significantly easier, and it comes in the form of the [vSphere Diagnostic Tool Fling](https://flings.vmware.com/vsphere-diagnostic-tool). VDT is a Python script which can be run directly on a vCenter Server appliance (version 6.5 and newer) to quickly check for problems and misconfigurations affecting:
|
||||||
- vCenter Basic Info
|
- vCenter Basic Info
|
||||||
- Lookup Service
|
- Lookup Service
|
||||||
- Active Directory
|
- Active Directory
|
||||||
- vCenter Certificates
|
- vCenter Certificates
|
||||||
- Core Files
|
- Core Files
|
||||||
- Disk Health
|
- Disk Health
|
||||||
- vCenter DNS
|
- vCenter DNS
|
||||||
- vCenter NTP
|
- vCenter NTP
|
||||||
- vCenter Port
|
- vCenter Port
|
||||||
- Root Account
|
- Root Account
|
||||||
- vCenter Services
|
- vCenter Services
|
||||||
- VCHA
|
- VCHA
|
||||||
|
|
||||||
For any problems which are identified, VDT will provide simple instructions and/or links to Knowledge Base articles for more detailed instructions on how to proceed with resolving the issues. Sounds pretty useful, right? And yet, somehow, I keep forgetting that VDT is a thing. So here's a friendly reminder to myself of how to obtain and use VDT to fix vSphere woes. Let's get started.
|
For any problems which are identified, VDT will provide simple instructions and/or links to Knowledge Base articles for more detailed instructions on how to proceed with resolving the issues. Sounds pretty useful, right? And yet, somehow, I keep forgetting that VDT is a thing. So here's a friendly reminder to myself of how to obtain and use VDT to fix vSphere woes. Let's get started.
|
||||||
|
@ -55,29 +55,28 @@ This needs to be run directly on the vCenter appliance so you'll need to copy th
|
||||||
|
|
||||||
Once that's done, just execute this on your local workstation to copy the `.zip` from your `~/Downloads/` folder to the VCSA's `/tmp/` directory:
|
Once that's done, just execute this on your local workstation to copy the `.zip` from your `~/Downloads/` folder to the VCSA's `/tmp/` directory:
|
||||||
```shell
|
```shell
|
||||||
scp ~/Downloads/vdt-v1.1.4.zip root@vcsa.lab.bowdre.net:/tmp/
|
scp ~/Downloads/vdt-v1.1.4.zip root@vcsa.lab.bowdre.net:/tmp/ # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
### 3. Extract
|
### 3. Extract
|
||||||
Now pop back over to an SSH session to the VCSA, extract the `.zip`, and get ready for action:
|
Now pop back over to an SSH session to the VCSA, extract the `.zip`, and get ready for action:
|
||||||
```shell
|
```shell
|
||||||
root@VCSA [ ~ ]# cd /tmp
|
cd /tmp # [tl! .cmd_root:1]
|
||||||
|
unzip vdt-v1.1.4.zip
|
||||||
root@VCSA [ /tmp ]# unzip vdt-v1.1.4.zip
|
Archive: vdt-v1.1.4.zip # [tl! .nocopy:5]
|
||||||
Archive: vdt-v1.1.4.zip
|
|
||||||
3557676756cffd658fd61aab5a6673269104e83c
|
3557676756cffd658fd61aab5a6673269104e83c
|
||||||
creating: vdt-v1.1.4/
|
creating: vdt-v1.1.4/
|
||||||
...
|
...
|
||||||
inflating: vdt-v1.1.4/vdt.py
|
inflating: vdt-v1.1.4/vdt.py
|
||||||
|
|
||||||
root@VCSA [ /tmp ]# cd vdt-v1.1.4/
|
cd vdt-v1.1.4/ # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
### 4. Execute
|
### 4. Execute
|
||||||
Now for the fun part:
|
Now for the fun part:
|
||||||
```shell
|
```shell
|
||||||
root@VCSA [ /tmp/vdt-v1.1.4 ]# python vdt.py
|
python vdt.py # [tl! .cmd_root]
|
||||||
_________________________
|
_________________________ # [tl! .nocopy:7]
|
||||||
RUNNING PULSE CHECK
|
RUNNING PULSE CHECK
|
||||||
|
|
||||||
Today: Sunday, August 28 19:53:00
|
Today: Sunday, August 28 19:53:00
|
||||||
|
@ -93,7 +92,7 @@ After entering the SSO password, VDT will run for a few minutes and generate an
|
||||||
Once the script has completed, it's time to look through the results and fix whatever can be found. As an example, here are some of the findings from my _deliberately-broken-for-the-purposes-of-this-post_ vCenter:
|
Once the script has completed, it's time to look through the results and fix whatever can be found. As an example, here are some of the findings from my _deliberately-broken-for-the-purposes-of-this-post_ vCenter:
|
||||||
|
|
||||||
#### Hostname/PNID mismatch
|
#### Hostname/PNID mismatch
|
||||||
```log {hl_lines=[8,9,23,24]}
|
```text
|
||||||
VCENTER BASIC INFO
|
VCENTER BASIC INFO
|
||||||
BASIC:
|
BASIC:
|
||||||
Current Time: 2022-08-28 19:54:08.370889
|
Current Time: 2022-08-28 19:54:08.370889
|
||||||
|
@ -101,7 +100,7 @@ BASIC:
|
||||||
vCenter Load Average: 0.26, 0.19, 0.12
|
vCenter Load Average: 0.26, 0.19, 0.12
|
||||||
Number of CPUs: 2
|
Number of CPUs: 2
|
||||||
Total Memory: 11.71
|
Total Memory: 11.71
|
||||||
vCenter Hostname: VCSA
|
vCenter Hostname: VCSA # [tl! highlight:1]
|
||||||
vCenter PNID: vcsa.lab.bowdre.net
|
vCenter PNID: vcsa.lab.bowdre.net
|
||||||
vCenter IP Address: 192.168.1.12
|
vCenter IP Address: 192.168.1.12
|
||||||
Proxy Configured: "no"
|
Proxy Configured: "no"
|
||||||
|
@ -116,16 +115,16 @@ DETAILS:
|
||||||
Number of Clusters: 1
|
Number of Clusters: 1
|
||||||
Disabled Plugins: None
|
Disabled Plugins: None
|
||||||
|
|
||||||
[FAIL] The hostname and PNID do not match!
|
[FAIL] The hostname and PNID do not match! # [tl! highlight:1]
|
||||||
Please see https://kb.vmware.com/s/article/2130599 for more details.
|
Please see https://kb.vmware.com/s/article/2130599 for more details.
|
||||||
```
|
```
|
||||||
Silly me - I must have changed the hostname at some point, which is not generally a Thing Which Should Be done. I can quickly [consult the referenced KB](https://kb.vmware.com/s/article/2130599) to figure out how to fix my mistake using the `/opt/vmware/share/vami/vami_config_net` utility.
|
Silly me - I must have changed the hostname at some point, which is not generally a Thing Which Should Be done. I can quickly [consult the referenced KB](https://kb.vmware.com/s/article/2130599) to figure out how to fix my mistake using the `/opt/vmware/share/vami/vami_config_net` utility.
|
||||||
|
|
||||||
#### Missing DNS
|
#### Missing DNS
|
||||||
```log {hl_lines=[3,4,5,12,13]}
|
```text
|
||||||
Nameserver Queries
|
Nameserver Queries
|
||||||
192.168.1.5
|
192.168.1.5
|
||||||
[FAIL] DNS with UDP - unable to resolve vcsa to 192.168.1.12
|
[FAIL] DNS with UDP - unable to resolve vcsa to 192.168.1.12 # [tl! highlight:2]
|
||||||
[FAIL] Reverse DNS - unable to resolve 192.168.1.12 to vcsa
|
[FAIL] Reverse DNS - unable to resolve 192.168.1.12 to vcsa
|
||||||
[FAIL] DNS with TCP - unable to resolve vcsa to 192.168.1.12
|
[FAIL] DNS with TCP - unable to resolve vcsa to 192.168.1.12
|
||||||
|
|
||||||
|
@ -134,13 +133,13 @@ Nameserver Queries
|
||||||
dig +noall +answer -x <ip> <namserver>
|
dig +noall +answer -x <ip> <namserver>
|
||||||
dig +short +tcp <fqdn> <nameserver>
|
dig +short +tcp <fqdn> <nameserver>
|
||||||
|
|
||||||
RESULT: [FAIL]
|
RESULT: [FAIL] # [tl! highlight:1]
|
||||||
Please see KB: https://kb.vmware.com/s/article/54682
|
Please see KB: https://kb.vmware.com/s/article/54682
|
||||||
```
|
```
|
||||||
Whoops - I guess I should go recreate the appropriate DNS records.
|
Whoops - I guess I should go recreate the appropriate DNS records.
|
||||||
|
|
||||||
#### Old core files
|
#### Old core files
|
||||||
```log
|
```text
|
||||||
CORE FILE CHECK
|
CORE FILE CHECK
|
||||||
INFO:
|
INFO:
|
||||||
These core files are older than 72 hours. consider deleting them
|
These core files are older than 72 hours. consider deleting them
|
||||||
|
@ -166,18 +165,18 @@ at your discretion to reduce the size of log bundles.
|
||||||
Those core files can be useful for investigating specific issues, but holding on to them long-term doesn't really do much good. _After checking to be sure I don't need them_, I can get rid of them all pretty easily like so:
|
Those core files can be useful for investigating specific issues, but holding on to them long-term doesn't really do much good. _After checking to be sure I don't need them_, I can get rid of them all pretty easily like so:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
find /storage/core/ -name "core.*" -type f -mtime +3 -exec rm {} \;
|
find /storage/core/ -name "core.*" -type f -mtime +3 -exec rm {} \; # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### NTP status
|
#### NTP status
|
||||||
```log
|
```text
|
||||||
VC NTP CHECK
|
VC NTP CHECK
|
||||||
[FAIL] NTP and Host time are both disabled!
|
[FAIL] NTP and Host time are both disabled!
|
||||||
```
|
```
|
||||||
Oh yeah, let's turn that back on with `systemctl start ntpd`.
|
Oh yeah, let's turn that back on with `systemctl start ntpd`.
|
||||||
|
|
||||||
#### Account status
|
#### Account status
|
||||||
```log
|
```text
|
||||||
Root Account Check
|
Root Account Check
|
||||||
[FAIL] Root password expires in 13 days
|
[FAIL] Root password expires in 13 days
|
||||||
Please search for 'Change the Password of the Root User'
|
Please search for 'Change the Password of the Root User'
|
||||||
|
@ -186,13 +185,13 @@ Oh yeah, let's turn that back on with `systemctl start ntpd`.
|
||||||
That's a good thing to know. I'll [take care of that](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenter.configuration.doc/GUID-48BAF973-4FD3-4FF3-B1B6-5F7286C9B59A.html) while I'm thinking about it.
|
That's a good thing to know. I'll [take care of that](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenter.configuration.doc/GUID-48BAF973-4FD3-4FF3-B1B6-5F7286C9B59A.html) while I'm thinking about it.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
chage -M -1 -E -1 root
|
chage -M -1 -E -1 root # [tl! .cmd_root]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Recheck
|
#### Recheck
|
||||||
Now that I've corrected these issues, I can run VDT again to confirm that everything is back in a good state:
|
Now that I've corrected these issues, I can run VDT again to confirm that everything is back in a good state:
|
||||||
|
|
||||||
```log {hl_lines=[8,9,"25-27",32,35,"55-56",59]}
|
```text {hl_lines=[8,9,"25-27",32,35,"55-56",59]}
|
||||||
VCENTER BASIC INFO
|
VCENTER BASIC INFO
|
||||||
BASIC:
|
BASIC:
|
||||||
Current Time: 2022-08-28 20:13:25.192503
|
Current Time: 2022-08-28 20:13:25.192503
|
||||||
|
@ -200,7 +199,7 @@ Now that I've corrected these issues, I can run VDT again to confirm that everyt
|
||||||
vCenter Load Average: 0.28, 0.14, 0.10
|
vCenter Load Average: 0.28, 0.14, 0.10
|
||||||
Number of CPUs: 2
|
Number of CPUs: 2
|
||||||
Total Memory: 11.71
|
Total Memory: 11.71
|
||||||
vCenter Hostname: vcsa.lab.bowdre.net
|
vCenter Hostname: vcsa.lab.bowdre.net # [tl! highlight:1]
|
||||||
vCenter PNID: vcsa.lab.bowdre.net
|
vCenter PNID: vcsa.lab.bowdre.net
|
||||||
vCenter IP Address: 192.168.1.12
|
vCenter IP Address: 192.168.1.12
|
||||||
Proxy Configured: "no"
|
Proxy Configured: "no"
|
||||||
|
@ -217,20 +216,20 @@ DETAILS:
|
||||||
[...]
|
[...]
|
||||||
Nameserver Queries
|
Nameserver Queries
|
||||||
192.168.1.5
|
192.168.1.5
|
||||||
[PASS] DNS with UDP - resolved vcsa.lab.bowdre.net to 192.168.1.12
|
[PASS] DNS with UDP - resolved vcsa.lab.bowdre.net to 192.168.1.12 # [tl! highlight:2]
|
||||||
[PASS] Reverse DNS - resolved 192.168.1.12 to vcsa.lab.bowdre.net
|
[PASS] Reverse DNS - resolved 192.168.1.12 to vcsa.lab.bowdre.net
|
||||||
[PASS] DNS with TCP - resolved vcsa.lab.bowdre.net to 192.168.1.12
|
[PASS] DNS with TCP - resolved vcsa.lab.bowdre.net to 192.168.1.12
|
||||||
Commands used:
|
Commands used:
|
||||||
dig +short <fqdn> <nameserver>
|
dig +short <fqdn> <nameserver>
|
||||||
dig +noall +answer -x <ip> <namserver>
|
dig +noall +answer -x <ip> <namserver>
|
||||||
dig +short +tcp <fqdn> <nameserver>
|
dig +short +tcp <fqdn> <nameserver>
|
||||||
RESULT: [PASS]
|
RESULT: [PASS] # [tl! highlight]
|
||||||
[...]
|
[...]
|
||||||
CORE FILE CHECK
|
CORE FILE CHECK
|
||||||
[PASS] Number of core files: 0
|
[PASS] Number of core files: 0 # [tl! highlight:1]
|
||||||
[PASS] Number of hprof files: 0
|
[PASS] Number of hprof files: 0
|
||||||
[...]
|
[...]
|
||||||
NTP Status Check
|
NTP Status Check # [tl! collapse:start]
|
||||||
+-----------------------------------LEGEND-----------------------------------+
|
+-----------------------------------LEGEND-----------------------------------+
|
||||||
| remote: NTP peer server |
|
| remote: NTP peer server |
|
||||||
| refid: server that this peer gets its time from |
|
| refid: server that this peer gets its time from |
|
||||||
|
@ -244,16 +243,16 @@ NTP Status Check
|
||||||
| + Peer selected for possible synchronization |
|
| + Peer selected for possible synchronization |
|
||||||
| – Peer is a candidate for selection |
|
| – Peer is a candidate for selection |
|
||||||
| ~ Peer is statically configured |
|
| ~ Peer is statically configured |
|
||||||
+----------------------------------------------------------------------------+
|
+----------------------------------------------------------------------------+ # [tl! collapse:end]
|
||||||
remote refid st t when poll reach delay offset jitter
|
remote refid st t when poll reach delay offset jitter
|
||||||
==============================================================================
|
==============================================================================
|
||||||
*104.171.113.34 130.207.244.240 2 u 1 64 17 16.831 -34.597 0.038
|
*104.171.113.34 130.207.244.240 2 u 1 64 17 16.831 -34.597 0.038
|
||||||
RESULT: [PASS]
|
RESULT: [PASS] # [tl! highlight]
|
||||||
[...]
|
[...]
|
||||||
Root Account Check
|
Root Account Check
|
||||||
[PASS] Root password never expires
|
[PASS] Root password never expires # [tl! highlight]
|
||||||
```
|
```
|
||||||
All better!
|
All better!
|
||||||
|
|
||||||
### Conclusion
|
### Conclusion
|
||||||
The vSphere Diagnostic Tool makes a great addition to your arsenal of troubleshooting skills and utilities. It makes it easy to troubleshoot errors which might occur in your vSphere environment, as well as to uncover dormant issues which could cause serious problems in the future.
|
The vSphere Diagnostic Tool makes a great addition to your arsenal of troubleshooting skills and utilities. It makes it easy to troubleshoot errors which might occur in your vSphere environment, as well as to uncover dormant issues which could cause serious problems in the future.
|
|
@ -11,7 +11,7 @@ tags:
|
||||||
title: Virtually Potato migrated to GitHub Pages!
|
title: Virtually Potato migrated to GitHub Pages!
|
||||||
---
|
---
|
||||||
|
|
||||||
After a bit less than a year of hosting my little technical blog with [Hashnode](https://hashnode.com), I spent a few days [migrating the content](/script-to-update-image-embed-links-in-markdown-files) over to a new format hosted with [GitHub Pages](https://pages.github.com/).
|
After a bit less than a year of hosting my little technical blog with [Hashnode](https://hashnode.com), I spent a few days [migrating the content](/script-to-update-image-embed-links-in-markdown-files) over to a new format hosted with [GitHub Pages](https://pages.github.com/).
|
||||||
|
|
||||||
![Party!](20210720-party.gif)
|
![Party!](20210720-party.gif)
|
||||||
|
|
||||||
|
@ -25,36 +25,36 @@ I knew about GitHub Pages, but had never seriously looked into it. Once I did, t
|
||||||
I found that the quite-popular [Minimal Mistakes](https://mademistakes.com/work/minimal-mistakes-jekyll-theme/) theme for Jekyll offers a [remote theme starter](https://github.com/mmistakes/mm-github-pages-starter/generate) that can be used to quickly get things going. I just used that generator to spawn a new repository in my GitHub account ([`jbowdre.github.io`](https://github.com/jbowdre/jbowdre.github.io)). And that was it - I had a starter GitHub Pages-hosted Jekyll-powered static site with an elegant theme applied. I could even make changes to the various configuration and sample post files, point any browser to `https://jbowdre.github.io`, and see the results almost immediately. I got to work digging through the lengthy [configuration documentation](https://mmistakes.github.io/minimal-mistakes/docs/configuration/) to start making the site my own, like [connecting with my custom domain](https://docs.github.com/en/pages/configuring-a-custom-domain-for-your-github-pages-site/managing-a-custom-domain-for-your-github-pages-site) and enabling [GitHub Issue-based comments](https://github.com/apps/utterances).
|
I found that the quite-popular [Minimal Mistakes](https://mademistakes.com/work/minimal-mistakes-jekyll-theme/) theme for Jekyll offers a [remote theme starter](https://github.com/mmistakes/mm-github-pages-starter/generate) that can be used to quickly get things going. I just used that generator to spawn a new repository in my GitHub account ([`jbowdre.github.io`](https://github.com/jbowdre/jbowdre.github.io)). And that was it - I had a starter GitHub Pages-hosted Jekyll-powered static site with an elegant theme applied. I could even make changes to the various configuration and sample post files, point any browser to `https://jbowdre.github.io`, and see the results almost immediately. I got to work digging through the lengthy [configuration documentation](https://mmistakes.github.io/minimal-mistakes/docs/configuration/) to start making the site my own, like [connecting with my custom domain](https://docs.github.com/en/pages/configuring-a-custom-domain-for-your-github-pages-site/managing-a-custom-domain-for-your-github-pages-site) and enabling [GitHub Issue-based comments](https://github.com/apps/utterances).
|
||||||
|
|
||||||
#### Working locally
|
#### Working locally
|
||||||
A quick `git clone` operation was sufficient to create a local copy of my new site in my Lenovo Chromebook Duet's [Linux environment](/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications). That lets me easily create and edit Markdown posts or configuration files with VS Code, commit them to the local copy of the repo, and then push them back to GitHub when I'm ready to publish the changes.
|
A quick `git clone` operation was sufficient to create a local copy of my new site in my Lenovo Chromebook Duet's [Linux environment](/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications). That lets me easily create and edit Markdown posts or configuration files with VS Code, commit them to the local copy of the repo, and then push them back to GitHub when I'm ready to publish the changes.
|
||||||
|
|
||||||
In order to view the local changes, I needed to install Jekyll locally as well. I started by installing Ruby and other prerequisites:
|
In order to view the local changes, I needed to install Jekyll locally as well. I started by installing Ruby and other prerequisites:
|
||||||
```shell
|
```shell
|
||||||
sudo apt-get install ruby-full build-essential zlib1g-dev
|
sudo apt-get install ruby-full build-essential zlib1g-dev # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
I added the following to my `~/.zshrc` file so that the gems would be installed under my home directory rather than somewhere more privileged:
|
I added the following to my `~/.zshrc` file so that the gems would be installed under my home directory rather than somewhere more privileged:
|
||||||
```shell
|
```shell
|
||||||
export GEM_HOME="$HOME/gems"
|
export GEM_HOME="$HOME/gems" # [tl! .cmd:1]
|
||||||
export PATH="$HOME/gems/bin:$PATH"
|
export PATH="$HOME/gems/bin:$PATH"
|
||||||
```
|
```
|
||||||
|
|
||||||
And then ran `source ~/.zshrc` so the change would take immediate effect.
|
And then ran `source ~/.zshrc` so the change would take immediate effect.
|
||||||
|
|
||||||
I could then install Jekyll:
|
I could then install Jekyll:
|
||||||
```shell
|
```shell
|
||||||
gem install jekyll bundler
|
gem install jekyll bundler # [tl! .cmd]
|
||||||
```
|
```
|
||||||
|
|
||||||
I then `cd`ed to the local repo and ran `bundle install` to also load up the components specified in the repo's `Gemfile`.
|
I then `cd`ed to the local repo and ran `bundle install` to also load up the components specified in the repo's `Gemfile`.
|
||||||
|
|
||||||
And, finally, I can run this to start up the local Jekyll server instance:
|
And, finally, I can run this to start up the local Jekyll server instance:
|
||||||
```shell
|
```shell
|
||||||
❯ bundle exec jekyll serve -l --drafts
|
bundle exec jekyll serve -l --drafts # [tl! .cmd]
|
||||||
Configuration file: /home/jbowdre/projects/jbowdre.github.io/_config.yml
|
Configuration file: /home/jbowdre/projects/jbowdre.github.io/_config.yml # [tl! .nocopy:start]
|
||||||
Source: /home/jbowdre/projects/jbowdre.github.io
|
Source: /home/jbowdre/projects/jbowdre.github.io
|
||||||
Destination: /home/jbowdre/projects/jbowdre.github.io/_site
|
Destination: /home/jbowdre/projects/jbowdre.github.io/_site
|
||||||
Incremental build: enabled
|
Incremental build: enabled
|
||||||
Generating...
|
Generating...
|
||||||
Remote Theme: Using theme mmistakes/minimal-mistakes
|
Remote Theme: Using theme mmistakes/minimal-mistakes
|
||||||
Jekyll Feed: Generating feed for posts
|
Jekyll Feed: Generating feed for posts
|
||||||
GitHub Metadata: No GitHub API authentication could be found. Some fields may be missing or have incorrect data.
|
GitHub Metadata: No GitHub API authentication could be found. Some fields may be missing or have incorrect data.
|
||||||
|
@ -62,7 +62,7 @@ Configuration file: /home/jbowdre/projects/jbowdre.github.io/_config.yml
|
||||||
Auto-regeneration: enabled for '/home/jbowdre/projects/jbowdre.github.io'
|
Auto-regeneration: enabled for '/home/jbowdre/projects/jbowdre.github.io'
|
||||||
LiveReload address: http://0.0.0.0:35729
|
LiveReload address: http://0.0.0.0:35729
|
||||||
Server address: http://0.0.0.0:4000
|
Server address: http://0.0.0.0:4000
|
||||||
Server running... press ctrl-c to stop.
|
Server running... press ctrl-c to stop. # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
And there it is!
|
And there it is!
|
||||||
|
@ -71,4 +71,4 @@ And there it is!
|
||||||
### `git push` time
|
### `git push` time
|
||||||
Alright that's enough rambling for now. I'm very happy with this new setup, particularly with the automatically-generated Table of Contents to help folks navigate some of my longer posts. (I can't believe I was having to piece those together manually in this blog's previous iteration!)
|
Alright that's enough rambling for now. I'm very happy with this new setup, particularly with the automatically-generated Table of Contents to help folks navigate some of my longer posts. (I can't believe I was having to piece those together manually in this blog's previous iteration!)
|
||||||
|
|
||||||
I'll continue to make some additional tweaks in the coming weeks but for now I'll `git push` this post and get back to documenting my never-ending [vRA project](/series/vra8).
|
I'll continue to make some additional tweaks in the coming weeks but for now I'll `git push` this post and get back to documenting my never-ending [vRA project](/series/vra8).
|
|
@ -13,7 +13,7 @@ tags:
|
||||||
---
|
---
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
cp -a virtuallypotato.com runtimeterror.dev
|
cp -a virtuallypotato.com runtimeterror.dev # [tl! .cmd:2]
|
||||||
rm -rf virtuallypotato.com
|
rm -rf virtuallypotato.com
|
||||||
ln -s virtuallypotato.com runtimeterror.dev
|
ln -s virtuallypotato.com runtimeterror.dev
|
||||||
```
|
```
|
||||||
|
|
|
@ -12,7 +12,7 @@ title: VMware Home Lab on Intel NUC 9
|
||||||
featured: false
|
featured: false
|
||||||
---
|
---
|
||||||
|
|
||||||
I picked up an Intel NUC 9 Extreme kit a few months back (thanks, VMware!) and have been slowly tinkering with turning it into an extremely capable self-contained home lab environment. I'm pretty happy with where things sit right now so figured it was about time to start documenting and sharing what I've done.
|
I picked up an Intel NUC 9 Extreme kit a few months back (thanks, VMware!) and have been slowly tinkering with turning it into an extremely capable self-contained home lab environment. I'm pretty happy with where things sit right now so figured it was about time to start documenting and sharing what I've done.
|
||||||
|
|
||||||
![But boy would I love some more RAM](SIDah-Lag.png)
|
![But boy would I love some more RAM](SIDah-Lag.png)
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ I picked up an Intel NUC 9 Extreme kit a few months back (thanks, VMware!) and h
|
||||||
The NUC runs ESXi 7.0u1 and currently hosts the following:
|
The NUC runs ESXi 7.0u1 and currently hosts the following:
|
||||||
- vCenter Server 7.0u1
|
- vCenter Server 7.0u1
|
||||||
- Windows 2019 domain controller
|
- Windows 2019 domain controller
|
||||||
- [VyOS router](https://vyos.io/)
|
- [VyOS router](https://vyos.io/)
|
||||||
- [Home Assistant OS 5.9](https://www.home-assistant.io/hassio/installation/)
|
- [Home Assistant OS 5.9](https://www.home-assistant.io/hassio/installation/)
|
||||||
- vRealize Lifecycle Manager 8.2
|
- vRealize Lifecycle Manager 8.2
|
||||||
- vRealize Identity Manager 3.3.2
|
- vRealize Identity Manager 3.3.2
|
||||||
|
@ -41,7 +41,7 @@ The NUC connects to my home network through its onboard gigabit Ethernet interfa
|
||||||
|
|
||||||
I used the Chromebook Recovery Utility to write the ESXi installer ISO to *another* USB drive (how-to [here](/burn-an-iso-to-usb-with-the-chromebook-recovery-utility)), inserted that bootable drive to a port on the front of the NUC, and booted the NUC from the drive. Installing ESXi 7.0u1 was as easy as it could possibly be. All hardware was automatically detected and the appropriate drivers loaded. Once the host booted up, I used the DCUI to configure a static IP address (`192.168.1.11`). I then shut down the NUC, disconnected the keyboard and monitor, and moved it into the cabinet where it will live out its headless existence.
|
I used the Chromebook Recovery Utility to write the ESXi installer ISO to *another* USB drive (how-to [here](/burn-an-iso-to-usb-with-the-chromebook-recovery-utility)), inserted that bootable drive to a port on the front of the NUC, and booted the NUC from the drive. Installing ESXi 7.0u1 was as easy as it could possibly be. All hardware was automatically detected and the appropriate drivers loaded. Once the host booted up, I used the DCUI to configure a static IP address (`192.168.1.11`). I then shut down the NUC, disconnected the keyboard and monitor, and moved it into the cabinet where it will live out its headless existence.
|
||||||
|
|
||||||
I was then able to point my web browser to `https://192.168.1.11/ui/` to log in to the host and get down to business. First stop: networking. For now, I only need a single standard switch (`vSwitch0`) with two portgroups: one for the host's vmkernel interface, and the other for the VMs (including the nested ESXi appliances) that are going to run directly on this physical host. The one "gotcha" when working with a nested environment is that you'll need to edit the virtual switch's security settings to "Allow promiscuous mode" and "Allow forged transmits" (for reasons described [here](https://williamlam.com/2013/11/why-is-promiscuous-mode-forged.html)).
|
I was then able to point my web browser to `https://192.168.1.11/ui/` to log in to the host and get down to business. First stop: networking. For now, I only need a single standard switch (`vSwitch0`) with two portgroups: one for the host's vmkernel interface, and the other for the VMs (including the nested ESXi appliances) that are going to run directly on this physical host. The one "gotcha" when working with a nested environment is that you'll need to edit the virtual switch's security settings to "Allow promiscuous mode" and "Allow forged transmits" (for reasons described [here](https://williamlam.com/2013/11/why-is-promiscuous-mode-forged.html)).
|
||||||
![Allowing promiscuous mode and forged transmits](w0HeFSi7Q.png)
|
![Allowing promiscuous mode and forged transmits](w0HeFSi7Q.png)
|
||||||
|
|
||||||
I created a single datastore to span the entirety of that 1TB NVMe drive. The nested ESXi hosts will use VMDKs stored here to provide storage to the nested VMs.
|
I created a single datastore to span the entirety of that 1TB NVMe drive. The nested ESXi hosts will use VMDKs stored here to provide storage to the nested VMs.
|
||||||
|
@ -77,7 +77,7 @@ My home network uses the generic `192.168.1.0/24` address space, with internet r
|
||||||
Of course, not everything that I'm going to deploy in the lab will need to be accessible from outside the lab environment. This goes for obvious things like the vMotion and vSAN networks of the nested ESXi hosts, but it will also be useful to have internal networks that can be used by VMs provisioned by vRA. So I'll be creating these networks:
|
Of course, not everything that I'm going to deploy in the lab will need to be accessible from outside the lab environment. This goes for obvious things like the vMotion and vSAN networks of the nested ESXi hosts, but it will also be useful to have internal networks that can be used by VMs provisioned by vRA. So I'll be creating these networks:
|
||||||
|
|
||||||
| VLAN ID | Network | Purpose |
|
| VLAN ID | Network | Purpose |
|
||||||
| ---- | ---- | ---- |
|
| ---- | ---- | ---- |
|
||||||
| 1610 | `172.16.10.0/24` | Management |
|
| 1610 | `172.16.10.0/24` | Management |
|
||||||
| 1620 | `172.16.20.0/24` | Servers-1 |
|
| 1620 | `172.16.20.0/24` | Servers-1 |
|
||||||
| 1630 | `172.16.30.0/24` | Servers-2 |
|
| 1630 | `172.16.30.0/24` | Servers-2 |
|
||||||
|
@ -85,7 +85,7 @@ Of course, not everything that I'm going to deploy in the lab will need to be ac
|
||||||
| 1699 | `172.16.99.0/24` | vMotion |
|
| 1699 | `172.16.99.0/24` | vMotion |
|
||||||
|
|
||||||
#### vSwitch1
|
#### vSwitch1
|
||||||
I'll start by adding a second vSwitch to the physical host. It doesn't need a physical adapter assigned since this switch will be for internal traffic. I create two port groups: one tagged for the VLAN 1610 Management traffic, which will be useful for attaching VMs on the physical host to the internal network; and the second will use VLAN 4095 to pass all VLAN traffic to the nested ESXi hosts. And again, this vSwitch needs to have its security policy set to allow Promiscuous Mode and Forged Transmits. I also set the vSwitch to support an MTU of 9000 so I can use Jumbo Frames on the vMotion and vSAN networks.
|
I'll start by adding a second vSwitch to the physical host. It doesn't need a physical adapter assigned since this switch will be for internal traffic. I create two port groups: one tagged for the VLAN 1610 Management traffic, which will be useful for attaching VMs on the physical host to the internal network; and the second will use VLAN 4095 to pass all VLAN traffic to the nested ESXi hosts. And again, this vSwitch needs to have its security policy set to allow Promiscuous Mode and Forged Transmits. I also set the vSwitch to support an MTU of 9000 so I can use Jumbo Frames on the vMotion and vSAN networks.
|
||||||
|
|
||||||
![Second vSwitch](7aNJa2Hlm.png)
|
![Second vSwitch](7aNJa2Hlm.png)
|
||||||
|
|
||||||
|
@ -95,15 +95,14 @@ Wouldn't it be great if the VMs that are going to be deployed on those `1610`, `
|
||||||
After logging in to the VM, I entered the router's configuration mode:
|
After logging in to the VM, I entered the router's configuration mode:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
vyos@vyos:~$ configure
|
configure # [tl! .cmd]
|
||||||
[edit]
|
[edit] # [tl! .nocopy]
|
||||||
vyos@vyos#
|
|
||||||
```
|
```
|
||||||
|
|
||||||
I then started with setting up the interfaces - `eth0` for the `192.168.1.0/24` network, `eth1` on the trunked portgroup, and a number of VIFs on `eth1` to handle the individual VLANs I'm interested in using.
|
I then started with setting up the interfaces - `eth0` for the `192.168.1.0/24` network, `eth1` on the trunked portgroup, and a number of VIFs on `eth1` to handle the individual VLANs I'm interested in using.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
set interfaces ethernet eth0 address '192.168.1.8/24'
|
set interfaces ethernet eth0 address '192.168.1.8/24' # [tl! .cmd_root:start]
|
||||||
set interfaces ethernet eth0 description 'Outside'
|
set interfaces ethernet eth0 description 'Outside'
|
||||||
set interfaces ethernet eth1 mtu '9000'
|
set interfaces ethernet eth1 mtu '9000'
|
||||||
set interfaces ethernet eth1 vif 1610 address '172.16.10.1/24'
|
set interfaces ethernet eth1 vif 1610 address '172.16.10.1/24'
|
||||||
|
@ -118,13 +117,13 @@ set interfaces ethernet eth1 vif 1630 mtu '1500'
|
||||||
set interfaces ethernet eth1 vif 1698 description 'VLAN 1698 for vSAN'
|
set interfaces ethernet eth1 vif 1698 description 'VLAN 1698 for vSAN'
|
||||||
set interfaces ethernet eth1 vif 1698 mtu '9000'
|
set interfaces ethernet eth1 vif 1698 mtu '9000'
|
||||||
set interfaces ethernet eth1 vif 1699 description 'VLAN 1699 for vMotion'
|
set interfaces ethernet eth1 vif 1699 description 'VLAN 1699 for vMotion'
|
||||||
set interfaces ethernet eth1 vif 1699 mtu '9000'
|
set interfaces ethernet eth1 vif 1699 mtu '9000' # [tl! .cmd_root:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
I also set up NAT for the networks that should be routable:
|
I also set up NAT for the networks that should be routable:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
set nat source rule 10 outbound-interface 'eth0'
|
set nat source rule 10 outbound-interface 'eth0' # [tl! .cmd_root:start]
|
||||||
set nat source rule 10 source address '172.16.10.0/24'
|
set nat source rule 10 source address '172.16.10.0/24'
|
||||||
set nat source rule 10 translation address 'masquerade'
|
set nat source rule 10 translation address 'masquerade'
|
||||||
set nat source rule 20 outbound-interface 'eth0'
|
set nat source rule 20 outbound-interface 'eth0'
|
||||||
|
@ -135,13 +134,13 @@ set nat source rule 30 source address '172.16.30.0/24'
|
||||||
set nat source rule 30 translation address 'masquerade'
|
set nat source rule 30 translation address 'masquerade'
|
||||||
set nat source rule 100 outbound-interface 'eth0'
|
set nat source rule 100 outbound-interface 'eth0'
|
||||||
set nat source rule 100 translation address 'masquerade'
|
set nat source rule 100 translation address 'masquerade'
|
||||||
set protocols static route 0.0.0.0/0 next-hop 192.168.1.1
|
set protocols static route 0.0.0.0/0 next-hop 192.168.1.1 # [tl! .cmd_root:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
And I configured DNS forwarding:
|
And I configured DNS forwarding:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
set service dns forwarding allow-from '0.0.0.0/0'
|
set service dns forwarding allow-from '0.0.0.0/0' # [tl! .cmd_root:start]
|
||||||
set service dns forwarding domain 10.16.172.in-addr.arpa. server '192.168.1.5'
|
set service dns forwarding domain 10.16.172.in-addr.arpa. server '192.168.1.5'
|
||||||
set service dns forwarding domain 20.16.172.in-addr.arpa. server '192.168.1.5'
|
set service dns forwarding domain 20.16.172.in-addr.arpa. server '192.168.1.5'
|
||||||
set service dns forwarding domain 30.16.172.in-addr.arpa. server '192.168.1.5'
|
set service dns forwarding domain 30.16.172.in-addr.arpa. server '192.168.1.5'
|
||||||
|
@ -149,13 +148,13 @@ set service dns forwarding domain lab.bowdre.net server '192.168.1.5'
|
||||||
set service dns forwarding listen-address '172.16.10.1'
|
set service dns forwarding listen-address '172.16.10.1'
|
||||||
set service dns forwarding listen-address '172.16.20.1'
|
set service dns forwarding listen-address '172.16.20.1'
|
||||||
set service dns forwarding listen-address '172.16.30.1'
|
set service dns forwarding listen-address '172.16.30.1'
|
||||||
set service dns forwarding name-server '192.168.1.1'
|
set service dns forwarding name-server '192.168.1.1' # [tl! .cmd_root:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally, I also configured VyOS's DHCP server so that I won't have to statically configure the networking for VMs deployed from vRA:
|
Finally, I also configured VyOS's DHCP server so that I won't have to statically configure the networking for VMs deployed from vRA:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
set service dhcp-server shared-network-name SCOPE_10_MGMT authoritative
|
set service dhcp-server shared-network-name SCOPE_10_MGMT authoritative # [tl! .cmd_root:start]
|
||||||
set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 default-router '172.16.10.1'
|
set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 default-router '172.16.10.1'
|
||||||
set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 dns-server '192.168.1.5'
|
set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 dns-server '192.168.1.5'
|
||||||
set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 domain-name 'lab.bowdre.net'
|
set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 domain-name 'lab.bowdre.net'
|
||||||
|
@ -175,7 +174,7 @@ set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/
|
||||||
set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 domain-name 'lab.bowdre.net'
|
set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 domain-name 'lab.bowdre.net'
|
||||||
set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 lease '86400'
|
set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 lease '86400'
|
||||||
set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 range 0 start '172.16.30.100'
|
set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 range 0 start '172.16.30.100'
|
||||||
set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 range 0 stop '172.16.30.200'
|
set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 range 0 stop '172.16.30.200' # [tl! .cmd_root:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Satisfied with my work, I ran the `commit` and `save` commands. BOOM, this server jockey just configured a router!
|
Satisfied with my work, I ran the `commit` and `save` commands. BOOM, this server jockey just configured a router!
|
||||||
|
@ -213,8 +212,8 @@ I migrated the physical NICs and `vmk0` to the new dvSwitch, and then created ne
|
||||||
I then ssh'd into the hosts and used `vmkping` to make sure they could talk to each other over these interfaces. I changed the vMotion interface to use the vMotion TCP/IP stack so needed to append the `-S vmotion` flag to the command:
|
I then ssh'd into the hosts and used `vmkping` to make sure they could talk to each other over these interfaces. I changed the vMotion interface to use the vMotion TCP/IP stack so needed to append the `-S vmotion` flag to the command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
[root@esxi01:~] vmkping -I vmk1 172.16.98.22
|
vmkping -I vmk1 172.16.98.22 # [tl! .cmd_root]
|
||||||
PING 172.16.98.22 (172.16.98.22): 56 data bytes
|
PING 172.16.98.22 (172.16.98.22): 56 data bytes # [tl! .nocopy:start]
|
||||||
64 bytes from 172.16.98.22: icmp_seq=0 ttl=64 time=0.243 ms
|
64 bytes from 172.16.98.22: icmp_seq=0 ttl=64 time=0.243 ms
|
||||||
64 bytes from 172.16.98.22: icmp_seq=1 ttl=64 time=0.260 ms
|
64 bytes from 172.16.98.22: icmp_seq=1 ttl=64 time=0.260 ms
|
||||||
64 bytes from 172.16.98.22: icmp_seq=2 ttl=64 time=0.262 ms
|
64 bytes from 172.16.98.22: icmp_seq=2 ttl=64 time=0.262 ms
|
||||||
|
@ -222,16 +221,16 @@ PING 172.16.98.22 (172.16.98.22): 56 data bytes
|
||||||
--- 172.16.98.22 ping statistics ---
|
--- 172.16.98.22 ping statistics ---
|
||||||
3 packets transmitted, 3 packets received, 0% packet loss
|
3 packets transmitted, 3 packets received, 0% packet loss
|
||||||
round-trip min/avg/max = 0.243/0.255/0.262 ms
|
round-trip min/avg/max = 0.243/0.255/0.262 ms
|
||||||
|
# [tl! .nocopy:end]
|
||||||
[root@esxi01:~] vmkping -I vmk2 172.16.99.22 -S vmotion
|
vmkping -I vmk2 172.16.99.22 -S vmotion # [tl! .cmd_root]
|
||||||
PING 172.16.99.22 (172.16.99.22): 56 data bytes
|
PING 172.16.99.22 (172.16.99.22): 56 data bytes # [tl! .nocopy:start]
|
||||||
64 bytes from 172.16.99.22: icmp_seq=0 ttl=64 time=0.202 ms
|
64 bytes from 172.16.99.22: icmp_seq=0 ttl=64 time=0.202 ms
|
||||||
64 bytes from 172.16.99.22: icmp_seq=1 ttl=64 time=0.312 ms
|
64 bytes from 172.16.99.22: icmp_seq=1 ttl=64 time=0.312 ms
|
||||||
64 bytes from 172.16.99.22: icmp_seq=2 ttl=64 time=0.242 ms
|
64 bytes from 172.16.99.22: icmp_seq=2 ttl=64 time=0.242 ms
|
||||||
|
|
||||||
--- 172.16.99.22 ping statistics ---
|
--- 172.16.99.22 ping statistics ---
|
||||||
3 packets transmitted, 3 packets received, 0% packet loss
|
3 packets transmitted, 3 packets received, 0% packet loss
|
||||||
round-trip min/avg/max = 0.202/0.252/0.312 ms
|
round-trip min/avg/max = 0.202/0.252/0.312 ms # [tl! .nocopy:end]
|
||||||
```
|
```
|
||||||
|
|
||||||
Okay, time to throw some vSAN on these hosts. Select the cluster object, go to the configuration tab, scroll down to vSAN, and click "Turn on vSAN". This will be a single site cluster, and I don't need to enable any additional services. When prompted, I claim the 8GB drives for the cache tier and the 16GB drives for capacity.
|
Okay, time to throw some vSAN on these hosts. Select the cluster object, go to the configuration tab, scroll down to vSAN, and click "Turn on vSAN". This will be a single site cluster, and I don't need to enable any additional services. When prompted, I claim the 8GB drives for the cache tier and the 16GB drives for capacity.
|
||||||
|
@ -253,7 +252,7 @@ Anyhoo, each of these VMs will need to be resolvable in DNS so I started by crea
|
||||||
|`idm.lab.bowdre.net`|`192.168.1.41`|
|
|`idm.lab.bowdre.net`|`192.168.1.41`|
|
||||||
|`vra.lab.bowdre.net`|`192.168.1.42`|
|
|`vra.lab.bowdre.net`|`192.168.1.42`|
|
||||||
|
|
||||||
I then attached the installer ISO to my Windows VM and ran through the installation from there.
|
I then attached the installer ISO to my Windows VM and ran through the installation from there.
|
||||||
![vRealize Easy Installer](42n3aMim5.png)
|
![vRealize Easy Installer](42n3aMim5.png)
|
||||||
|
|
||||||
Similar to the vCenter deployment process, this one prompts you for all the information it needs up front and then takes care of everything from there. That's great news because this is a pretty long deployment; it took probably two hours from clicking the final "Okay, do it" button to being able to log in to my shiny new vRealize Automation environment.
|
Similar to the vCenter deployment process, this one prompts you for all the information it needs up front and then takes care of everything from there. That's great news because this is a pretty long deployment; it took probably two hours from clicking the final "Okay, do it" button to being able to log in to my shiny new vRealize Automation environment.
|
||||||
|
|
|
@ -25,7 +25,8 @@ So this will generate a name that looks something like `[user]_[catalog_item]_[s
|
||||||
That does mean that I'll need to add another vRO call, but I can set this up so that it only gets triggered once, when the form loads, instead of refreshing each time the inputs change.
|
That does mean that I'll need to add another vRO call, but I can set this up so that it only gets triggered once, when the form loads, instead of refreshing each time the inputs change.
|
||||||
|
|
||||||
So I hop over to vRO and create a new action, which I call `getTimestamp`. It doesn't require any inputs, and returns a single string. Here's the code:
|
So I hop over to vRO and create a new action, which I call `getTimestamp`. It doesn't require any inputs, and returns a single string. Here's the code:
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: getTimestamp action
|
// JavaScript: getTimestamp action
|
||||||
// Inputs: None
|
// Inputs: None
|
||||||
// Returns: result (String)
|
// Returns: result (String)
|
||||||
|
|
|
@ -12,14 +12,14 @@ tags:
|
||||||
title: 'vRA8 Custom Provisioning: Part Four'
|
title: 'vRA8 Custom Provisioning: Part Four'
|
||||||
---
|
---
|
||||||
|
|
||||||
My [last post in this series](/vra8-custom-provisioning-part-three) marked the completion of the vRealize Orchestrator workflow that I use for pre-provisioning tasks, namely generating a unique *sequential* hostname which complies with a defined naming standard and doesn't conflict with any existing records in vSphere, Active Directory, or DNS. That takes care of many of the "back-end" tasks for a simple deployment.
|
My [last post in this series](/vra8-custom-provisioning-part-three) marked the completion of the vRealize Orchestrator workflow that I use for pre-provisioning tasks, namely generating a unique *sequential* hostname which complies with a defined naming standard and doesn't conflict with any existing records in vSphere, Active Directory, or DNS. That takes care of many of the "back-end" tasks for a simple deployment.
|
||||||
|
|
||||||
This post will add in some "front-end" operations, like creating a customized VM request form in Service Broker and dynamically populating a drop-down with a list of networks available at the user-selected deployment site. I'll also take care of some housekeeping items like automatically generating a unique deployment name.
|
This post will add in some "front-end" operations, like creating a customized VM request form in Service Broker and dynamically populating a drop-down with a list of networks available at the user-selected deployment site. I'll also take care of some housekeeping items like automatically generating a unique deployment name.
|
||||||
|
|
||||||
### Getting started with Service Broker Custom Forms
|
### Getting started with Service Broker Custom Forms
|
||||||
So far, I've been working either in the Cloud Assembly or Orchestrator UIs, both of which are really geared toward administrators. Now I'm going to be working with Service Broker which will provide the user-facing front-end. This is where "normal" users will be able to submit provisioning requests without having to worry about any of the underlying infrastructure or orchestration.
|
So far, I've been working either in the Cloud Assembly or Orchestrator UIs, both of which are really geared toward administrators. Now I'm going to be working with Service Broker which will provide the user-facing front-end. This is where "normal" users will be able to submit provisioning requests without having to worry about any of the underlying infrastructure or orchestration.
|
||||||
|
|
||||||
Before I can do anything with my Cloud Template in the Service Broker UI, though, I'll need to release it from Cloud Assembly. I do this by opening the template on the *Design* tab and clicking the *Version* button at the bottom of the screen. I'll label this as `1.0` and tick the checkbox to *Release this version to the catalog*.
|
Before I can do anything with my Cloud Template in the Service Broker UI, though, I'll need to release it from Cloud Assembly. I do this by opening the template on the *Design* tab and clicking the *Version* button at the bottom of the screen. I'll label this as `1.0` and tick the checkbox to *Release this version to the catalog*.
|
||||||
![Releasing the Cloud Template to the Service Broker catalog](0-9BaWJqq.png)
|
![Releasing the Cloud Template to the Service Broker catalog](0-9BaWJqq.png)
|
||||||
|
|
||||||
I can then go to the Service Broker UI and add a new Content Source for my Cloud Assembly templates.
|
I can then go to the Service Broker UI and add a new Content Source for my Cloud Assembly templates.
|
||||||
|
@ -28,7 +28,7 @@ I can then go to the Service Broker UI and add a new Content Source for my Cloud
|
||||||
After hitting the *Create & Import* button, all released Cloud Templates in the selected Project will show up in the Service Broker *Content* section:
|
After hitting the *Create & Import* button, all released Cloud Templates in the selected Project will show up in the Service Broker *Content* section:
|
||||||
![New content!](Hlnnd_8Ed.png)
|
![New content!](Hlnnd_8Ed.png)
|
||||||
|
|
||||||
In order for users to deploy from this template, I also need to go to *Content Sharing*, select the Project, and share the content. This can be done either at the Project level or by selecting individual content items.
|
In order for users to deploy from this template, I also need to go to *Content Sharing*, select the Project, and share the content. This can be done either at the Project level or by selecting individual content items.
|
||||||
![Content sharing](iScnhmzVY.png)
|
![Content sharing](iScnhmzVY.png)
|
||||||
|
|
||||||
That template now appears on the Service Broker *Catalog* tab:
|
That template now appears on the Service Broker *Catalog* tab:
|
||||||
|
@ -48,7 +48,7 @@ How about that Deployment Name field? In my tests, I'd been manually creating a
|
||||||
### Automatic deployment naming
|
### Automatic deployment naming
|
||||||
*[Update] I've since come up with what I think is a better approach to handling this. Check it out [here](/vra8-automatic-deployment-naming-another-take)!*
|
*[Update] I've since come up with what I think is a better approach to handling this. Check it out [here](/vra8-automatic-deployment-naming-another-take)!*
|
||||||
|
|
||||||
That means it's time to dive back into the vRealize Orchestrator interface and whip up a new action for this purpose. I created a new action within my existing `net.bowdre.utility` module called `createDeploymentName`.
|
That means it's time to dive back into the vRealize Orchestrator interface and whip up a new action for this purpose. I created a new action within my existing `net.bowdre.utility` module called `createDeploymentName`.
|
||||||
![createDeploymentName action](GMCWhns7u.png)
|
![createDeploymentName action](GMCWhns7u.png)
|
||||||
|
|
||||||
A good deployment name *must* be globally unique, and it would be great if it could also convey some useful information like who requested the deployment, which template it is being deployed from, and the purpose of the server. The `siteCode (String)`, `envCode (String)`, `functionCode (String)`, and `appCode (String)` variables from the request form will do a great job of describing the server's purpose. I can also pass in some additional information from the Service Broker form like `catalogItemName (String)` to get the template name and `requestedByName (String)` to identify the user making the request. So I'll set all those as inputs to my action:
|
A good deployment name *must* be globally unique, and it would be great if it could also convey some useful information like who requested the deployment, which template it is being deployed from, and the purpose of the server. The `siteCode (String)`, `envCode (String)`, `functionCode (String)`, and `appCode (String)` variables from the request form will do a great job of describing the server's purpose. I can also pass in some additional information from the Service Broker form like `catalogItemName (String)` to get the template name and `requestedByName (String)` to identify the user making the request. So I'll set all those as inputs to my action:
|
||||||
|
@ -58,9 +58,10 @@ I also went ahead and specified that the action will return a String.
|
||||||
|
|
||||||
And now for the code. I really just want to mash all those variables together into a long string, and I'll also add a timestamp to make sure each deployment name is truly unique.
|
And now for the code. I really just want to mash all those variables together into a long string, and I'll also add a timestamp to make sure each deployment name is truly unique.
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: createDeploymentName
|
// JavaScript: createDeploymentName
|
||||||
// Inputs: catalogItemName (String), requestedByName (String), siteCode (String),
|
// Inputs: catalogItemName (String), requestedByName (String), siteCode (String),
|
||||||
// envCode (String), functionCode (String), appCode (String)
|
// envCode (String), functionCode (String), appCode (String)
|
||||||
// Returns: deploymentName (String)
|
// Returns: deploymentName (String)
|
||||||
|
|
||||||
|
@ -99,7 +100,7 @@ As a quick recap, I've got five networks available for vRA, split across my two
|
||||||
I'm going to add additional tags to these networks to further define their purpose.
|
I'm going to add additional tags to these networks to further define their purpose.
|
||||||
|
|
||||||
|Name |Purpose |Tags |
|
|Name |Purpose |Tags |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| d1620-Servers-1 |Management | `net:bow`, `net:mgmt` |
|
| d1620-Servers-1 |Management | `net:bow`, `net:mgmt` |
|
||||||
| d1630-Servers-2 | Front-end | `net:bow`, `net:front` |
|
| d1630-Servers-2 | Front-end | `net:bow`, `net:front` |
|
||||||
| d1640-Servers-3 | Back-end | `net:bow`, `net:back` |
|
| d1640-Servers-3 | Back-end | `net:bow`, `net:back` |
|
||||||
|
@ -109,7 +110,7 @@ I'm going to add additional tags to these networks to further define their purpo
|
||||||
I *could* just use those tags to let users pick the appropriate network, but I've found that a lot of times users don't know why they're picking a certain network, they just know the IP range they need to use. So I'll take it a step further and add a giant tag to include the Site, Purpose, and Subnet, and this is what will ultimately be presented to the users:
|
I *could* just use those tags to let users pick the appropriate network, but I've found that a lot of times users don't know why they're picking a certain network, they just know the IP range they need to use. So I'll take it a step further and add a giant tag to include the Site, Purpose, and Subnet, and this is what will ultimately be presented to the users:
|
||||||
|
|
||||||
|Name |Tags |
|
|Name |Tags |
|
||||||
| --- | --- |
|
| --- | --- |
|
||||||
| d1620-Servers-1 | `net:bow`, `net:mgmt`, `net:bow-mgmt-172.16.20.0` |
|
| d1620-Servers-1 | `net:bow`, `net:mgmt`, `net:bow-mgmt-172.16.20.0` |
|
||||||
| d1630-Servers-2 | `net:bow`, `net:front`, `net:bow-front-172.16.30.0` |
|
| d1630-Servers-2 | `net:bow`, `net:front`, `net:bow-front-172.16.30.0` |
|
||||||
| d1640-Servers-3 | `net:bow`, `net:back`, `net:bow-back-172.16.40.0` |
|
| d1640-Servers-3 | `net:bow`, `net:back`, `net:bow-back-172.16.40.0` |
|
||||||
|
@ -121,12 +122,13 @@ I *could* just use those tags to let users pick the appropriate network, but I'v
|
||||||
So I can now use a single tag to positively identify a single network, as long as I know its site and either its purpose or its IP space. I'll reference these tags in a vRO action that will populate a dropdown in the request form with the available networks for the selected site. Unfortunately I couldn't come up with an easy way to dynamically pull the tags into vRO so I create another Configuration Element to store them:
|
So I can now use a single tag to positively identify a single network, as long as I know its site and either its purpose or its IP space. I'll reference these tags in a vRO action that will populate a dropdown in the request form with the available networks for the selected site. Unfortunately I couldn't come up with an easy way to dynamically pull the tags into vRO so I create another Configuration Element to store them:
|
||||||
![networksPerSite configuration element](xfEultDM_.png)
|
![networksPerSite configuration element](xfEultDM_.png)
|
||||||
|
|
||||||
This gets filed under the existing `CustomProvisioning` folder, and I name it `networksPerSite`. Each site gets a new variable of type `Array/string`. The name of the variable matches the site ID, and the contents are just the tags minus the `net:` prefix.
|
This gets filed under the existing `CustomProvisioning` folder, and I name it `networksPerSite`. Each site gets a new variable of type `Array/string`. The name of the variable matches the site ID, and the contents are just the tags minus the `net:` prefix.
|
||||||
|
|
||||||
I created a new action named (appropriately) `getNetworksForSite`. This will accept `siteCode (String)` as its input from the Service Broker request form, and will return an array of strings containing the available networks.
|
I created a new action named (appropriately) `getNetworksForSite`. This will accept `siteCode (String)` as its input from the Service Broker request form, and will return an array of strings containing the available networks.
|
||||||
![getNetworksForSite action](IdrT-Un8H1.png)
|
![getNetworksForSite action](IdrT-Un8H1.png)
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: getNetworksForSite
|
// JavaScript: getNetworksForSite
|
||||||
// Inputs: siteCode (String)
|
// Inputs: siteCode (String)
|
||||||
// Returns: site.value (Array/String)
|
// Returns: site.value (Array/String)
|
||||||
|
@ -164,6 +166,7 @@ inputs:
|
||||||
and update the resource configuration for the network entity to constrain it based on `input.network` instead of `input.site` as before:
|
and update the resource configuration for the network entity to constrain it based on `input.network` instead of `input.site` as before:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
resources:
|
resources:
|
||||||
Cloud_vSphere_Machine_1:
|
Cloud_vSphere_Machine_1:
|
||||||
type: Cloud.vSphere.Machine
|
type: Cloud.vSphere.Machine
|
||||||
|
@ -194,7 +197,7 @@ Back on the Service Broker UI, I hit my `LAB` Content Source again to Save & Imp
|
||||||
Now I can just go back to the Catalog tab and request a new deployment to check out my--
|
Now I can just go back to the Catalog tab and request a new deployment to check out my--
|
||||||
![Ew, an ugly error](zWFTuOYOG.png)
|
![Ew, an ugly error](zWFTuOYOG.png)
|
||||||
|
|
||||||
Oh yeah. That vRO action gets called as soon as the request form loads - before selecting the required site code as an input. I could modify the action so that returns an empty string if the site hasn't been selected yet, but I'm kind of lazy so I'll instead just modify the custom form so that the Site field defaults to the `BOW` site.
|
Oh yeah. That vRO action gets called as soon as the request form loads - before selecting the required site code as an input. I could modify the action so that returns an empty string if the site hasn't been selected yet, but I'm kind of lazy so I'll instead just modify the custom form so that the Site field defaults to the `BOW` site.
|
||||||
![BOW is default](yb77nH2Fp.png)
|
![BOW is default](yb77nH2Fp.png)
|
||||||
|
|
||||||
*Now* I can open up the request form and see how well it works:
|
*Now* I can open up the request form and see how well it works:
|
||||||
|
@ -214,4 +217,4 @@ And I can also confirm that the VM got named appropriately (based on the [naming
|
||||||
|
|
||||||
Very slick. And I think that's a great stopping point for today.
|
Very slick. And I think that's a great stopping point for today.
|
||||||
|
|
||||||
Coming up, I'll describe how I create AD computer objects in site-specific OUs, add notes and custom attributes to the VM in vSphere, and optionally create static DNS records on a Windows DNS server.
|
Coming up, I'll describe how I create AD computer objects in site-specific OUs, add notes and custom attributes to the VM in vSphere, and optionally create static DNS records on a Windows DNS server.
|
|
@ -47,7 +47,7 @@ Since each of my hosts only has 100GB of datastore and my Windows template speci
|
||||||
I created a few Flavor Mappings ranging from `micro` (1vCPU|1GB RAM) to `giant` (8vCPU|16GB) but for this resource-constrained lab I'll stick mostly to the `micro`, `tiny` (1vCPU|2GB), and `small` (2vCPU|2GB) sizes.
|
I created a few Flavor Mappings ranging from `micro` (1vCPU|1GB RAM) to `giant` (8vCPU|16GB) but for this resource-constrained lab I'll stick mostly to the `micro`, `tiny` (1vCPU|2GB), and `small` (2vCPU|2GB) sizes.
|
||||||
![T-shirt size Flavor Mappings](lodJlc8Hp.png)
|
![T-shirt size Flavor Mappings](lodJlc8Hp.png)
|
||||||
|
|
||||||
And I created an Image Mapping named `ws2019` which points to a Windows Server 2019 Core template I have stored in my lab's Content Library (cleverly-named "LABrary" for my own amusement).
|
And I created an Image Mapping named `ws2019` which points to a Windows Server 2019 Core template I have stored in my lab's Content Library (cleverly-named "LABrary" for my own amusement).
|
||||||
![Windows Server Image Mapping](6k06ySON7.png)
|
![Windows Server Image Mapping](6k06ySON7.png)
|
||||||
|
|
||||||
And with that, my vRA infrastructure is ready for testing a *very* basic deployment.
|
And with that, my vRA infrastructure is ready for testing a *very* basic deployment.
|
||||||
|
@ -58,6 +58,7 @@ Now it's time to leave the Infrastructure tab and visit the Design one, where I'
|
||||||
|
|
||||||
VMware's got a [pretty great document](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-6BA1DA96-5C20-44BF-9C81-F8132B9B4872.html#list-of-input-properties-2) describing the syntax for these input properties, plus a lot of it is kind of self-explanatory. Let's step through this real quick:
|
VMware's got a [pretty great document](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-6BA1DA96-5C20-44BF-9C81-F8132B9B4872.html#list-of-input-properties-2) describing the syntax for these input properties, plus a lot of it is kind of self-explanatory. Let's step through this real quick:
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
formatVersion: 1
|
formatVersion: 1
|
||||||
inputs:
|
inputs:
|
||||||
# Image Mapping
|
# Image Mapping
|
||||||
|
@ -69,11 +70,12 @@ inputs:
|
||||||
const: ws2019
|
const: ws2019
|
||||||
default: ws2019
|
default: ws2019
|
||||||
```
|
```
|
||||||
`formatVersion` is always gonna be 1 so we'll skip right past that.
|
`formatVersion` is always gonna be 1 so we'll skip right past that.
|
||||||
|
|
||||||
The first input is going to ask the user to select the desired Operating System for this deployment. The `oneOf` type will be presented as a dropdown (with only one option in this case, but I'll leave it this way for future flexibility); the user will see the friendly "Windows Server 2019" `title` which is tied to the `ws2019` `const` value. For now, I'll also set the `default` value of the field so I don't have to actually click the dropdown each time I test the deployment.
|
The first input is going to ask the user to select the desired Operating System for this deployment. The `oneOf` type will be presented as a dropdown (with only one option in this case, but I'll leave it this way for future flexibility); the user will see the friendly "Windows Server 2019" `title` which is tied to the `ws2019` `const` value. For now, I'll also set the `default` value of the field so I don't have to actually click the dropdown each time I test the deployment.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# Flavor Mapping
|
# Flavor Mapping
|
||||||
size:
|
size:
|
||||||
title: Resource Size
|
title: Resource Size
|
||||||
|
@ -93,6 +95,7 @@ Now I'm asking the user to pick the t-shirt size of the VM. These will correspon
|
||||||
The `resources` section is where the data from the inputs gets applied to the deployment:
|
The `resources` section is where the data from the inputs gets applied to the deployment:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
resources:
|
resources:
|
||||||
Cloud_vSphere_Machine_1:
|
Cloud_vSphere_Machine_1:
|
||||||
type: Cloud.vSphere.Machine
|
type: Cloud.vSphere.Machine
|
||||||
|
@ -113,6 +116,7 @@ So I'm connecting the selected `input.image` to the Image Mapping configured in
|
||||||
All together now:
|
All together now:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
formatVersion: 1
|
formatVersion: 1
|
||||||
inputs:
|
inputs:
|
||||||
# Image Mapping
|
# Image Mapping
|
||||||
|
@ -180,7 +184,7 @@ And I can pop over to the IPAM interface to confirm that the IP has been marked
|
||||||
Fantastic! But one of my objectives from earlier was to let the user control where a VM gets provisioned. Fortunately it's pretty easy to implement thanks to vRA 8's use of tags.
|
Fantastic! But one of my objectives from earlier was to let the user control where a VM gets provisioned. Fortunately it's pretty easy to implement thanks to vRA 8's use of tags.
|
||||||
|
|
||||||
### Using tags for resource placement
|
### Using tags for resource placement
|
||||||
Just about every entity within vRA 8 can have tags applied to it, and you can leverage those tags in some pretty creative and useful ways. For now, I'll start by applying tags to my compute resources; I'll use `comp:bow` for the "BOW Cluster" and `comp:dre` for the "DRE Cluster".
|
Just about every entity within vRA 8 can have tags applied to it, and you can leverage those tags in some pretty creative and useful ways. For now, I'll start by applying tags to my compute resources; I'll use `comp:bow` for the "BOW Cluster" and `comp:dre` for the "DRE Cluster".
|
||||||
![Compute tags](oz1IAp-i0.png)
|
![Compute tags](oz1IAp-i0.png)
|
||||||
|
|
||||||
I'll also use the `net:bow` and `net:dre` tags to logically divide up the networks between my sites:
|
I'll also use the `net:bow` and `net:dre` tags to logically divide up the networks between my sites:
|
||||||
|
@ -189,6 +193,7 @@ I'll also use the `net:bow` and `net:dre` tags to logically divide up the networ
|
||||||
I can now add an input to the Cloud Template so the user can pick which site they need to deploy to:
|
I can now add an input to the Cloud Template so the user can pick which site they need to deploy to:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
inputs:
|
inputs:
|
||||||
# Datacenter location
|
# Datacenter location
|
||||||
site:
|
site:
|
||||||
|
@ -205,6 +210,7 @@ I'm using the `enum` option now instead of `oneOf` since the site names shouldn'
|
||||||
And then I'll add some `constraints` to the `resources` section, making use of the `to_lower` function from the [cloud template expression syntax](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-12F0BC64-6391-4E5F-AA48-C5959024F3EB.html) to automatically convert the selected site name from all-caps to lowercase so it matches the appropriate tag:
|
And then I'll add some `constraints` to the `resources` section, making use of the `to_lower` function from the [cloud template expression syntax](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-12F0BC64-6391-4E5F-AA48-C5959024F3EB.html) to automatically convert the selected site name from all-caps to lowercase so it matches the appropriate tag:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
resources:
|
resources:
|
||||||
Cloud_vSphere_Machine_1:
|
Cloud_vSphere_Machine_1:
|
||||||
type: Cloud.vSphere.Machine
|
type: Cloud.vSphere.Machine
|
||||||
|
|
|
@ -35,12 +35,13 @@ Once it completes successfully, I can visit the Inventory section of the vRO int
|
||||||
![New AD endpoint](vlnle_ekN.png)
|
![New AD endpoint](vlnle_ekN.png)
|
||||||
|
|
||||||
#### checkForAdConflict Action
|
#### checkForAdConflict Action
|
||||||
Since I try to keep things modular, I'm going to write a new vRO action within the `net.bowdre.utility` module called `checkForAdConflict` which can be called from the `Generate unique hostname` workflow. It will take in `computerName (String)` as an input and return a boolean `True` if a conflict is found or `False` if the name is available.
|
Since I try to keep things modular, I'm going to write a new vRO action within the `net.bowdre.utility` module called `checkForAdConflict` which can be called from the `Generate unique hostname` workflow. It will take in `computerName (String)` as an input and return a boolean `True` if a conflict is found or `False` if the name is available.
|
||||||
![Action: checkForAdConflict](JT7pbzM-5.png)
|
![Action: checkForAdConflict](JT7pbzM-5.png)
|
||||||
|
|
||||||
It's basically going to loop through the Active Directory hosts defined in vRO and search each for a matching computer name. Here's the full code:
|
It's basically going to loop through the Active Directory hosts defined in vRO and search each for a matching computer name. Here's the full code:
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: checkForAdConflict action
|
// JavaScript: checkForAdConflict action
|
||||||
// Inputs: computerName (String)
|
// Inputs: computerName (String)
|
||||||
// Outputs: (Boolean)
|
// Outputs: (Boolean)
|
||||||
|
@ -65,7 +66,8 @@ Now I can pop back over to my massive `Generate unique hostname` workflow and dr
|
||||||
|
|
||||||
I'm using this as a scriptable task so that I can do a little bit of processing before I call the action I created earlier - namely, if `conflict (Boolean)` was already set, the task should skip any further processing. That does mean that I'll need to call the action by both its module and name using `System.getModule("net.bowdre.utility").checkForAdConflict(candidateVmName)`. So here's the full script:
|
I'm using this as a scriptable task so that I can do a little bit of processing before I call the action I created earlier - namely, if `conflict (Boolean)` was already set, the task should skip any further processing. That does mean that I'll need to call the action by both its module and name using `System.getModule("net.bowdre.utility").checkForAdConflict(candidateVmName)`. So here's the full script:
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: check for AD conflict task
|
// JavaScript: check for AD conflict task
|
||||||
// Inputs: candidateVmName (String), conflict (Boolean)
|
// Inputs: candidateVmName (String), conflict (Boolean)
|
||||||
// Outputs: conflict (Boolean)
|
// Outputs: conflict (Boolean)
|
||||||
|
@ -91,7 +93,7 @@ Cool, so that's the AD check in the bank. Onward to DNS!
|
||||||
### DNS
|
### DNS
|
||||||
**[Update]** Thanks to a [kind commenter](https://github.com/jbowdre/jbowdre.github.io/issues/10#issuecomment-932541245), I've learned that my DNS-checking solution detailed below is somewhat unnecessarily complicated. I overlooked it at the time I was putting this together, but vRO _does_ provide a `System.resolveHostName()` function to easily perform DNS lookups. I've updated the [Adding it to the workflow](#adding-it-to-the-workflow-1) section below with the simplified script which eliminates the need for building an external script with dependencies and importing that as a vRO action, but I'm going to leave those notes in place as well in case anyone else (or Future John) might need to leverage a similar approach to solve another issue.
|
**[Update]** Thanks to a [kind commenter](https://github.com/jbowdre/jbowdre.github.io/issues/10#issuecomment-932541245), I've learned that my DNS-checking solution detailed below is somewhat unnecessarily complicated. I overlooked it at the time I was putting this together, but vRO _does_ provide a `System.resolveHostName()` function to easily perform DNS lookups. I've updated the [Adding it to the workflow](#adding-it-to-the-workflow-1) section below with the simplified script which eliminates the need for building an external script with dependencies and importing that as a vRO action, but I'm going to leave those notes in place as well in case anyone else (or Future John) might need to leverage a similar approach to solve another issue.
|
||||||
|
|
||||||
Seriously. Go ahead and skip to [here](#adding-it-to-the-workflow-1).
|
Seriously. Go ahead and skip to [here](#adding-it-to-the-workflow-1).
|
||||||
|
|
||||||
#### The Challenge (Deprecated)
|
#### The Challenge (Deprecated)
|
||||||
JavaScript can't talk directly to Active Directory on its own, but in the previous action I was able to leverage the AD plugin built into vRO to bridge that gap. Unfortunately ~~there isn't~~ _I couldn't find_ a corresponding pre-installed plugin that will work as a DNS client. vRO 8 does introduce support for using other languages like (cross-platform) PowerShell or Python instead of being restricted to just JavaScript... but I wasn't able to find an easy solution for querying DNS from those languages either without requiring external modules. (The cross-platform version of PowerShell doesn't include handy Windows-centric cmdlets like `Get-DnsServerResourceRecord`.)
|
JavaScript can't talk directly to Active Directory on its own, but in the previous action I was able to leverage the AD plugin built into vRO to bridge that gap. Unfortunately ~~there isn't~~ _I couldn't find_ a corresponding pre-installed plugin that will work as a DNS client. vRO 8 does introduce support for using other languages like (cross-platform) PowerShell or Python instead of being restricted to just JavaScript... but I wasn't able to find an easy solution for querying DNS from those languages either without requiring external modules. (The cross-platform version of PowerShell doesn't include handy Windows-centric cmdlets like `Get-DnsServerResourceRecord`.)
|
||||||
|
@ -104,21 +106,22 @@ Luckily, vRO does provide a way to import scripts bundled with their required mo
|
||||||
I start by creating a folder to store the script and needed module, and then I create the required `handler.ps1` file.
|
I start by creating a folder to store the script and needed module, and then I create the required `handler.ps1` file.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
❯ mkdir checkDnsConflicts
|
mkdir checkDnsConflicts # [tl! .cmd:2]
|
||||||
❯ cd checkDnsConflicts
|
cd checkDnsConflicts
|
||||||
❯ touch handler.ps1
|
touch handler.ps1
|
||||||
```
|
```
|
||||||
|
|
||||||
I then create a `Modules` folder and install the DnsClient-PS module:
|
I then create a `Modules` folder and install the DnsClient-PS module:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
❯ mkdir Modules
|
mkdir Modules # [tl! .cmd:1]
|
||||||
❯ pwsh -c "Save-Module -Name DnsClient-PS -Path ./Modules/ -Repository PSGallery"
|
pwsh -c "Save-Module -Name DnsClient-PS -Path ./Modules/ -Repository PSGallery"
|
||||||
```
|
```
|
||||||
|
|
||||||
And then it's time to write the PowerShell script in `handler.ps1`:
|
And then it's time to write the PowerShell script in `handler.ps1`:
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
# PowerShell: checkForDnsConflict script
|
# PowerShell: checkForDnsConflict script
|
||||||
# Inputs: $inputs.hostname (String), $inputs.domain (String)
|
# Inputs: $inputs.hostname (String), $inputs.domain (String)
|
||||||
# Outputs: $queryresult (String)
|
# Outputs: $queryresult (String)
|
||||||
|
@ -148,8 +151,8 @@ function handler {
|
||||||
Now to package it up in a `.zip` which I can then import into vRO:
|
Now to package it up in a `.zip` which I can then import into vRO:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
❯ zip -r --exclude=\*.zip -X checkDnsConflicts.zip .
|
zip -r --exclude=\*.zip -X checkDnsConflicts.zip . # [tl! .cmd]
|
||||||
adding: Modules/ (stored 0%)
|
adding: Modules/ (stored 0%) # [tl! .nocopy:start]
|
||||||
adding: Modules/DnsClient-PS/ (stored 0%)
|
adding: Modules/DnsClient-PS/ (stored 0%)
|
||||||
adding: Modules/DnsClient-PS/1.0.0/ (stored 0%)
|
adding: Modules/DnsClient-PS/1.0.0/ (stored 0%)
|
||||||
adding: Modules/DnsClient-PS/1.0.0/Public/ (stored 0%)
|
adding: Modules/DnsClient-PS/1.0.0/Public/ (stored 0%)
|
||||||
|
@ -170,8 +173,9 @@ Now to package it up in a `.zip` which I can then import into vRO:
|
||||||
adding: Modules/DnsClient-PS/1.0.0/DnsClient-PS.Format.ps1xml (deflated 80%)
|
adding: Modules/DnsClient-PS/1.0.0/DnsClient-PS.Format.ps1xml (deflated 80%)
|
||||||
adding: Modules/DnsClient-PS/1.0.0/DnsClient-PS.psd1 (deflated 59%)
|
adding: Modules/DnsClient-PS/1.0.0/DnsClient-PS.psd1 (deflated 59%)
|
||||||
adding: handler.ps1 (deflated 49%)
|
adding: handler.ps1 (deflated 49%)
|
||||||
❯ ls
|
# [tl! .nocopy:end]
|
||||||
checkDnsConflicts.zip handler.ps1 Modules
|
ls # [tl! .cmd]
|
||||||
|
checkDnsConflicts.zip handler.ps1 Modules # [tl! .nocopy]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### checkForDnsConflict action (Deprecated)
|
#### checkForDnsConflict action (Deprecated)
|
||||||
|
@ -188,7 +192,8 @@ Just like with the `check for AD conflict` action, I'll add this onto the workfl
|
||||||
|
|
||||||
_[Update] The below script has been altered to drop the unneeded call to my homemade `checkForDnsConflict` action and instead use the built-in `System.resolveHostName()`. Thanks @powertim!_
|
_[Update] The below script has been altered to drop the unneeded call to my homemade `checkForDnsConflict` action and instead use the built-in `System.resolveHostName()`. Thanks @powertim!_
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: check for DNS conflict
|
// JavaScript: check for DNS conflict
|
||||||
// Inputs: candidateVmName (String), conflict (Boolean), requestProperties (Properties)
|
// Inputs: candidateVmName (String), conflict (Boolean), requestProperties (Properties)
|
||||||
// Outputs: conflict (Boolean)
|
// Outputs: conflict (Boolean)
|
||||||
|
@ -212,7 +217,7 @@ if (conflict) {
|
||||||
Once that's all in place, I kick off another deployment to make sure that everything works correctly. After it completes, I can navigate to the **Extensibility > Workflow runs** section of the vRA interface to review the details:
|
Once that's all in place, I kick off another deployment to make sure that everything works correctly. After it completes, I can navigate to the **Extensibility > Workflow runs** section of the vRA interface to review the details:
|
||||||
![Workflow run success](GZKQbELfM.png)
|
![Workflow run success](GZKQbELfM.png)
|
||||||
|
|
||||||
It worked!
|
It worked!
|
||||||
|
|
||||||
But what if there *had* been conflicts? It's important to make sure that works too. I know that if I run that deployment again, the VM will get named `DRE-DTST-XXX008` and then `DRE-DTST-XXX009`. So I'm going to force conflicts by creating an AD object for one and a DNS record for the other.
|
But what if there *had* been conflicts? It's important to make sure that works too. I know that if I run that deployment again, the VM will get named `DRE-DTST-XXX008` and then `DRE-DTST-XXX009`. So I'm going to force conflicts by creating an AD object for one and a DNS record for the other.
|
||||||
![Making conflicts](6HBIUf6KE.png)
|
![Making conflicts](6HBIUf6KE.png)
|
||||||
|
@ -225,6 +230,6 @@ The workflow saw that the last VM was created as `-007` so it first grabbed `-00
|
||||||
### Next steps
|
### Next steps
|
||||||
So now I've got a pretty capable workflow for controlled naming of my deployed VMs. The names conform with my established naming scheme and increment predictably in response to naming conflicts in vSphere, Active Directory, and DNS.
|
So now I've got a pretty capable workflow for controlled naming of my deployed VMs. The names conform with my established naming scheme and increment predictably in response to naming conflicts in vSphere, Active Directory, and DNS.
|
||||||
|
|
||||||
In the next post, I'll be enhancing my cloud template to let users pick which network to use for the deployed VM. That sounds simple, but I'll want the list of available networks to be filtered based on the selected site - that means using a Service Broker custom form to query another vRO action. I will also add the ability to create AD computer objects in a site-specific OU and automatically join the server to the domain. And I'll add notes to the VM to make it easier to remember why it was deployed.
|
In the next post, I'll be enhancing my cloud template to let users pick which network to use for the deployed VM. That sounds simple, but I'll want the list of available networks to be filtered based on the selected site - that means using a Service Broker custom form to query another vRO action. I will also add the ability to create AD computer objects in a site-specific OU and automatically join the server to the domain. And I'll add notes to the VM to make it easier to remember why it was deployed.
|
||||||
|
|
||||||
Stay tuned!
|
Stay tuned!
|
||||||
|
|
|
@ -38,6 +38,7 @@ I'll start by adding those fields as inputs on my cloud template.
|
||||||
I already have a `site` input at the top of the template, used for selecting the deployment location. I'll leave that there:
|
I already have a `site` input at the top of the template, used for selecting the deployment location. I'll leave that there:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
inputs:
|
inputs:
|
||||||
site:
|
site:
|
||||||
type: string
|
type: string
|
||||||
|
@ -50,6 +51,7 @@ inputs:
|
||||||
I'll add the rest of the naming components below the prompts for image selection and size, starting with a dropdown of environments to pick from:
|
I'll add the rest of the naming components below the prompts for image selection and size, starting with a dropdown of environments to pick from:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
environment:
|
environment:
|
||||||
type: string
|
type: string
|
||||||
title: Environment
|
title: Environment
|
||||||
|
@ -63,6 +65,7 @@ I'll add the rest of the naming components below the prompts for image selection
|
||||||
And a dropdown for those function options:
|
And a dropdown for those function options:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
function:
|
function:
|
||||||
type: string
|
type: string
|
||||||
title: Function Code
|
title: Function Code
|
||||||
|
@ -83,6 +86,7 @@ And a dropdown for those function options:
|
||||||
And finally a text entry field for the application descriptor. Note that this one includes the `minLength` and `maxLength` constraints to enforce the three-character format.
|
And finally a text entry field for the application descriptor. Note that this one includes the `minLength` and `maxLength` constraints to enforce the three-character format.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
app:
|
app:
|
||||||
type: string
|
type: string
|
||||||
title: Application Code
|
title: Application Code
|
||||||
|
@ -96,6 +100,7 @@ And finally a text entry field for the application descriptor. Note that this on
|
||||||
I then need to map these inputs to the resource entity at the bottom of the template so that they can be passed to vRO as custom properties. All of these are direct mappings except for `environment` since I only want the first letter. I use the `substring()` function to achieve that, but wrap it in a conditional so that it won't implode if the environment hasn't been picked yet. I'm also going to add in a `dnsDomain` property that will be useful later when I need to query for DNS conflicts.
|
I then need to map these inputs to the resource entity at the bottom of the template so that they can be passed to vRO as custom properties. All of these are direct mappings except for `environment` since I only want the first letter. I use the `substring()` function to achieve that, but wrap it in a conditional so that it won't implode if the environment hasn't been picked yet. I'm also going to add in a `dnsDomain` property that will be useful later when I need to query for DNS conflicts.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
resources:
|
resources:
|
||||||
Cloud_vSphere_Machine_1:
|
Cloud_vSphere_Machine_1:
|
||||||
type: Cloud.vSphere.Machine
|
type: Cloud.vSphere.Machine
|
||||||
|
@ -112,6 +117,7 @@ resources:
|
||||||
So here's the complete template:
|
So here's the complete template:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
# torchlight! {"lineNumbers": true}
|
||||||
formatVersion: 1
|
formatVersion: 1
|
||||||
inputs:
|
inputs:
|
||||||
site:
|
site:
|
||||||
|
@ -228,7 +234,8 @@ The first thing I'll want this workflow to do (particularly for testing) is to t
|
||||||
|
|
||||||
This action has a single input, a `Properties` object named `payload`. (By the way, vRO is pretty particular about variable typing so going forward I'll reference variables as `variableName (type)`.) Here's the JavaScript that will basically loop through each element and write the contents to the vRO debug log:
|
This action has a single input, a `Properties` object named `payload`. (By the way, vRO is pretty particular about variable typing so going forward I'll reference variables as `variableName (type)`.) Here's the JavaScript that will basically loop through each element and write the contents to the vRO debug log:
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: logPayloadProperties
|
// JavaScript: logPayloadProperties
|
||||||
// Inputs: payload (Properties)
|
// Inputs: payload (Properties)
|
||||||
// Outputs: none
|
// Outputs: none
|
||||||
|
@ -291,7 +298,8 @@ Anyway, I drop a Scriptable Task item onto the workflow canvas to handle parsing
|
||||||
|
|
||||||
The script for this is pretty straight-forward:
|
The script for this is pretty straight-forward:
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: parse payload
|
// JavaScript: parse payload
|
||||||
// Inputs: inputProperties (Properties)
|
// Inputs: inputProperties (Properties)
|
||||||
// Outputs: requestProperties (Properties), originalNames (Array/string)
|
// Outputs: requestProperties (Properties), originalNames (Array/string)
|
||||||
|
@ -333,7 +341,8 @@ Select **Output** at the top of the *New Variable* dialog and the complete the f
|
||||||
|
|
||||||
And here's the script for that task:
|
And here's the script for that task:
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: Apply new names
|
// JavaScript: Apply new names
|
||||||
// Inputs: inputProperties (Properties), newNames (Array/string)
|
// Inputs: inputProperties (Properties), newNames (Array/string)
|
||||||
// Outputs: resourceNames (Array/string)
|
// Outputs: resourceNames (Array/string)
|
||||||
|
@ -363,7 +372,8 @@ Okay, on to the schema. This workflow may take a little while to execute, and it
|
||||||
|
|
||||||
The script is very short:
|
The script is very short:
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: create lock
|
// JavaScript: create lock
|
||||||
// Inputs: lockOwner (String), lockId (String)
|
// Inputs: lockOwner (String), lockId (String)
|
||||||
// Outputs: none
|
// Outputs: none
|
||||||
|
@ -377,7 +387,8 @@ We're getting to the meat of the operation now - another scriptable task named `
|
||||||
![Task: generate hostnameBase](XATryy20y.png)
|
![Task: generate hostnameBase](XATryy20y.png)
|
||||||
|
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: generate hostnameBase
|
// JavaScript: generate hostnameBase
|
||||||
// Inputs: nameFormat (String), requestProperties (Properties), baseFormat (String)
|
// Inputs: nameFormat (String), requestProperties (Properties), baseFormat (String)
|
||||||
// Outputs: hostnameBase (String), digitCount (Number), hostnameSeq (Number)
|
// Outputs: hostnameBase (String), digitCount (Number), hostnameSeq (Number)
|
||||||
|
@ -415,7 +426,8 @@ I've only got the one vCenter in my lab. At work, I've got multiple vCenters so
|
||||||
Anyway, back to my "Generate unique hostname" workflow, where I'll add another scriptable task to prepare the vCenter SDK connection. This one doesn't require any inputs, but will output an array of `VC:SdkConnection` objects:
|
Anyway, back to my "Generate unique hostname" workflow, where I'll add another scriptable task to prepare the vCenter SDK connection. This one doesn't require any inputs, but will output an array of `VC:SdkConnection` objects:
|
||||||
![Task: prepare vCenter SDK connection](ByIWO66PC.png)
|
![Task: prepare vCenter SDK connection](ByIWO66PC.png)
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: prepare vCenter SDK connection
|
// JavaScript: prepare vCenter SDK connection
|
||||||
// Inputs: none
|
// Inputs: none
|
||||||
// Outputs: sdkConnections (Array/VC:SdkConnection)
|
// Outputs: sdkConnections (Array/VC:SdkConnection)
|
||||||
|
@ -432,7 +444,8 @@ Next, I'm going to drop another ForEach element onto the canvas. For each vCente
|
||||||
That `vmsByHost (Array/array)` object contains any and all VMs which match `hostnameBase (String)`, but they're broken down by the host they're running on. So I use a scriptable task to convert that array-of-arrays into a new array-of-strings containing just the VM names.
|
That `vmsByHost (Array/array)` object contains any and all VMs which match `hostnameBase (String)`, but they're broken down by the host they're running on. So I use a scriptable task to convert that array-of-arrays into a new array-of-strings containing just the VM names.
|
||||||
![Task: unpack results for all hosts](gIEFRnilq.png)
|
![Task: unpack results for all hosts](gIEFRnilq.png)
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: unpack results for all hosts
|
// JavaScript: unpack results for all hosts
|
||||||
// Inputs: vmsByHost (Array/Array)
|
// Inputs: vmsByHost (Array/Array)
|
||||||
// Outputs: vmNames (Array/string)
|
// Outputs: vmNames (Array/string)
|
||||||
|
@ -453,7 +466,8 @@ vmNames = vms.map(function(i) {return (i.displayName).toUpperCase()})
|
||||||
This scriptable task will check the `computerNames` configuration element we created earlier to see if we've already named a VM starting with `hostnameBase (String)`. If such a name exists, we'll increment the number at the end by one, and return that as a new `hostnameSeq (Number)` variable; if it's the first of its kind, `hostnameSeq (Number)` will be set to `1`. And then we'll combine `hostnameBase (String)` and `hostnameSeq (Number)` to create the new `candidateVmName (String)`. If things don't work out, this script will throw `errMsg (String)` so I need to add that as an output exception binding as well.
|
This scriptable task will check the `computerNames` configuration element we created earlier to see if we've already named a VM starting with `hostnameBase (String)`. If such a name exists, we'll increment the number at the end by one, and return that as a new `hostnameSeq (Number)` variable; if it's the first of its kind, `hostnameSeq (Number)` will be set to `1`. And then we'll combine `hostnameBase (String)` and `hostnameSeq (Number)` to create the new `candidateVmName (String)`. If things don't work out, this script will throw `errMsg (String)` so I need to add that as an output exception binding as well.
|
||||||
![Task: generate hostnameSeq & candidateVmName](fWlSrD56N.png)
|
![Task: generate hostnameSeq & candidateVmName](fWlSrD56N.png)
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: generate hostnameSeq & candidateVmName
|
// JavaScript: generate hostnameSeq & candidateVmName
|
||||||
// Inputs: hostnameBase (String), digitCount (Number)
|
// Inputs: hostnameBase (String), digitCount (Number)
|
||||||
// Outputs: hostnameSeq (Number), computerNames (ConfigurationElement), candidateVmName (String)
|
// Outputs: hostnameSeq (Number), computerNames (ConfigurationElement), candidateVmName (String)
|
||||||
|
@ -500,7 +514,8 @@ System.log("Proposed VM name: " + candidateVmName)
|
||||||
Now that I know what I'd like to try to name this new VM, it's time to start checking for any potential conflicts. So this task will compare my `candidateVmName (String)` against the existing `vmNames (Array/string)` to see if there are any collisions. If there's a match, it will set a new variable called `conflict (Boolean)` to `true` and also report the issue through the `errMsg (String)` output exception binding. Otherwise it will move on to the next check.
|
Now that I know what I'd like to try to name this new VM, it's time to start checking for any potential conflicts. So this task will compare my `candidateVmName (String)` against the existing `vmNames (Array/string)` to see if there are any collisions. If there's a match, it will set a new variable called `conflict (Boolean)` to `true` and also report the issue through the `errMsg (String)` output exception binding. Otherwise it will move on to the next check.
|
||||||
![Task: check for VM name conflicts](qmHszypww.png)
|
![Task: check for VM name conflicts](qmHszypww.png)
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: check for VM name conflicts
|
// JavaScript: check for VM name conflicts
|
||||||
// Inputs: candidateVmName (String), vmNames (Array/string)
|
// Inputs: candidateVmName (String), vmNames (Array/string)
|
||||||
// Outputs: conflict (Boolean)
|
// Outputs: conflict (Boolean)
|
||||||
|
@ -527,7 +542,8 @@ I can then drag the new element away from the "everything is fine" flow, and con
|
||||||
|
|
||||||
All this task really does is clear the `conflict (Boolean)` flag so that's the only output.
|
All this task really does is clear the `conflict (Boolean)` flag so that's the only output.
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: conflict resolution
|
// JavaScript: conflict resolution
|
||||||
// Inputs: none
|
// Inputs: none
|
||||||
// Outputs: conflict (Boolean)
|
// Outputs: conflict (Boolean)
|
||||||
|
@ -542,7 +558,8 @@ So if `check VM name conflict` encounters a collision with an existing VM name i
|
||||||
Assuming that everything has gone according to plan and the workflow has avoided any naming conflicts, it will need to return `nextVmName (String)` back to the `VM Provisioning` workflow. That's as simple as setting it to the last value of `candidateVmName (String)`:
|
Assuming that everything has gone according to plan and the workflow has avoided any naming conflicts, it will need to return `nextVmName (String)` back to the `VM Provisioning` workflow. That's as simple as setting it to the last value of `candidateVmName (String)`:
|
||||||
![Task: return nextVmName](5QFTPHp5H.png)
|
![Task: return nextVmName](5QFTPHp5H.png)
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript: return nextVmName
|
// JavaScript: return nextVmName
|
||||||
// Inputs: candidateVmName (String)
|
// Inputs: candidateVmName (String)
|
||||||
// Outputs: nextVmName (String)
|
// Outputs: nextVmName (String)
|
||||||
|
@ -555,7 +572,8 @@ System.log(" ***** Selecting [" + nextVmName + "] as the next VM name ***** ")
|
||||||
And we should also remove that lock that we created at the start of this workflow.
|
And we should also remove that lock that we created at the start of this workflow.
|
||||||
![Task: remove lock](BhBnBh8VB.png)
|
![Task: remove lock](BhBnBh8VB.png)
|
||||||
|
|
||||||
```js
|
```javascript
|
||||||
|
// torchlight! {"lineNumbers": true}
|
||||||
// JavaScript remove lock
|
// JavaScript remove lock
|
||||||
// Inputs: lockId (String), lockOwner (String)
|
// Inputs: lockId (String), lockOwner (String)
|
||||||
// Outputs: none
|
// Outputs: none
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
{{ .Content }}
|
{{ .Content }}
|
||||||
</header>
|
</header>
|
||||||
|
|
||||||
{{ range $pages }}
|
{{- range (.Paginate $pages).Pages }}
|
||||||
{{- $postDate := .Date.Format "2006-01-02" }}
|
{{- $postDate := .Date.Format "2006-01-02" }}
|
||||||
{{- $updateDate := .Lastmod.Format "2006-01-02" }}
|
{{- $updateDate := .Lastmod.Format "2006-01-02" }}
|
||||||
<article class="post">
|
<article class="post">
|
||||||
|
@ -27,4 +27,5 @@
|
||||||
</section>
|
</section>
|
||||||
<br>
|
<br>
|
||||||
</article>
|
</article>
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
{{- template "_internal/pagination.html" . }}
|
|
@ -1,4 +1,9 @@
|
||||||
{{- partial "lang.html" . -}}
|
{{- partial "lang.html" . -}}
|
||||||
<p class="copyright">{{ .Site.Copyright | markdownify }}</p>
|
<p class="copyright">{{ .Site.Copyright | markdownify }}</p>
|
||||||
<p class="advertisement">Powered by <a target="_blank" href="https://gohugo.io/">hugo</a>, <a target="_blank" href="https://github.com/joeroe/risotto">risotto</a>, and <a target="_blank" href="https://www.netlify.com">netlify</a>. Analytics with <a href="https://withcabin.com/privacy/runtimeterror.dev">Cabin</a>.
|
<p class="powered_by">{"powered_by": [{{- range $i, $link := .Site.Params.powerLinks }}{{ if $i }}, {{ end }}"<a target="_blank" href="{{ $link.url }}">{{ $link.title }}</a>"{{ end }}]}
|
||||||
<br><a target="_blank" href="https://github.com/jbowdre/runtimeterror">View source</a>.</p>
|
<br><<a target="_blank" href="https://github.com/jbowdre/runtimeterror">view source</a>></p>
|
||||||
|
|
||||||
|
{{ if (findRE "<pre" .Content 1) }}
|
||||||
|
{{ $jsCopy := resources.Get "js/code-copy-button.js" | minify }}
|
||||||
|
<script src="{{ $jsCopy.RelPermalink }}"></script>
|
||||||
|
{{ end }}
|
||||||
|
|
|
@ -19,5 +19,15 @@
|
||||||
<link rel="stylesheet" href="{{ "css/risotto.css" | absURL }}">
|
<link rel="stylesheet" href="{{ "css/risotto.css" | absURL }}">
|
||||||
<link rel="stylesheet" href="{{ "css/custom.css" | absURL }}">
|
<link rel="stylesheet" href="{{ "css/custom.css" | absURL }}">
|
||||||
|
|
||||||
|
{{ if .Site.Params.analytics }}
|
||||||
<!-- cabin analytics -->
|
<!-- cabin analytics -->
|
||||||
<script async defer src="https://scripts.withcabin.com/hello.js"></script>
|
<script async defer src="https://scripts.withcabin.com/hello.js"></script>
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
<!-- syntax highlighting -->
|
||||||
|
{{ if (findRE "<pre" .Content 1) }}
|
||||||
|
{{ $syntax := resources.Get "css/torchlight.css" | minify }}
|
||||||
|
<link href="{{ $syntax.RelPermalink }}" rel="stylesheet">
|
||||||
|
{{ $copyCss := resources.Get "css/code-copy-button.css" | minify }}
|
||||||
|
<link href="{{ $copyCss.RelPermalink }}" rel="stylesheet">
|
||||||
|
{{ end }}
|
||||||
|
|
28
netlify.toml
28
netlify.toml
|
@ -5,7 +5,33 @@
|
||||||
HUGO_VERSION = "0.111.3"
|
HUGO_VERSION = "0.111.3"
|
||||||
|
|
||||||
[context.production]
|
[context.production]
|
||||||
command = "hugo"
|
command = """
|
||||||
|
hugo --minify
|
||||||
|
npm i @torchlight-api/torchlight-cli
|
||||||
|
npx torchlight
|
||||||
|
"""
|
||||||
|
|
||||||
|
[context.preview]
|
||||||
|
command = """
|
||||||
|
hugo --minify --environment preview
|
||||||
|
npm i @torchlight-api/torchlight-cli
|
||||||
|
npx torchlight
|
||||||
|
"""
|
||||||
|
[[headers]]
|
||||||
|
for = "/*"
|
||||||
|
[headers.values]
|
||||||
|
X-Robots-Tag = "noindex"
|
||||||
|
|
||||||
|
[context.drafts]
|
||||||
|
command = """
|
||||||
|
hugo --minify --environment drafts -D
|
||||||
|
npm i @torchlight-api/torchlight-cli
|
||||||
|
npx torchlight
|
||||||
|
"""
|
||||||
|
[[headers]]
|
||||||
|
for = "/*"
|
||||||
|
[headers.values]
|
||||||
|
X-Robots-Tag = "noindex"
|
||||||
|
|
||||||
[[redirects]]
|
[[redirects]]
|
||||||
from = "/*"
|
from = "/*"
|
||||||
|
|
|
@ -13,6 +13,28 @@
|
||||||
opacity: 0.80;
|
opacity: 0.80;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Footer tweaks */
|
||||||
|
.copyright {
|
||||||
|
font-size: 14px;
|
||||||
|
line-height: 1.3rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.powered_by {
|
||||||
|
font-size: 12px;
|
||||||
|
line-height: 1.1rem;
|
||||||
|
color: var(--muted);
|
||||||
|
}
|
||||||
|
|
||||||
|
.powered_by a:link, .powered_by a:visited {
|
||||||
|
color: var(--off-fg);
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.powered_by a:hover {
|
||||||
|
color: var(--hover);
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
|
||||||
/* Notice CSS Built on hugo-notice by Nicolas Martignoni: https://github.com/martignoni/hugo-notice */
|
/* Notice CSS Built on hugo-notice by Nicolas Martignoni: https://github.com/martignoni/hugo-notice */
|
||||||
.notice {
|
.notice {
|
||||||
--root-color: #444;
|
--root-color: #444;
|
||||||
|
@ -133,3 +155,17 @@ body.dark .notice {
|
||||||
top: 0.125em;
|
top: 0.125em;
|
||||||
position: relative;
|
position: relative;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* pagination overrides */
|
||||||
|
ul.pagination li::marker {
|
||||||
|
content:'';
|
||||||
|
}
|
||||||
|
|
||||||
|
ul.pagination li {
|
||||||
|
margin: 0 0.25rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.pagination {
|
||||||
|
display:flex;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
|
|
@ -2,20 +2,20 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
:root {
|
:root {
|
||||||
--base00: #181818; /* background */
|
--base00: #181818; /* bg */
|
||||||
--base01: #282828; /* alt background */
|
--base01: #282828; /* off-bg */
|
||||||
--base02: #383838; /* in-text backgrounds */
|
--base02: #383838; /* inner-bg */
|
||||||
--base03: #585858; /* muted text */
|
--base03: #585858; /* muted */
|
||||||
--base04: #959494; /* alt foreground */
|
--base04: #959494; /* off-fg */
|
||||||
--base05: #d8d8d8; /* foreground */
|
--base05: #d8d8d8; /* fg */
|
||||||
--base06: #e8e8e8;
|
--base06: #e8e8e8;
|
||||||
--base07: #f8f8f8;
|
--base07: #5f8700; /* user prompt */
|
||||||
--base08: #ab4642;
|
--base08: #ab4642; /* root prompt */
|
||||||
--base09: #dc9656;
|
--base09: #dc9656;
|
||||||
--base0A: #f7ca88; /* highlights */
|
--base0A: #f7ca88; /* highlight */
|
||||||
--base0B: #772a28; /* primary accent */
|
--base0B: #772a28; /* logo */
|
||||||
--base0C: #ab2321; /* active links */
|
--base0C: #ab2321; /* hover */
|
||||||
--base0D: #c45a5a; /* links */
|
--base0D: #c45a5a; /* link */
|
||||||
--base0E: #ba8baf;
|
--base0E: #ba8baf;
|
||||||
--base0F: #a16946;
|
--base0F: #a16946;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 21fdc87b56e662133c9bba6ee96940ed8f5be6a6
|
Subproject commit 4343550d785d8cce942ac5109aa9fdd9d9a70823
|
64
torchlight.config.js
Normal file
64
torchlight.config.js
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
module.exports = {
|
||||||
|
// Your token from https://torchlight.dev
|
||||||
|
token: process.env.TORCHLIGHT_TOKEN,
|
||||||
|
|
||||||
|
// The Torchlight client caches highlighted code blocks. Here you
|
||||||
|
// can define which directory you'd like to use. You'll likely
|
||||||
|
// want to add this directory to your .gitignore. Set to
|
||||||
|
// `false` to use an in-memory cache. You may also
|
||||||
|
// provide a full cache implementation.
|
||||||
|
cache: false,
|
||||||
|
|
||||||
|
// Which theme you want to use. You can find all of the themes at
|
||||||
|
// https://torchlight.dev/docs/themes.
|
||||||
|
theme: 'one-dark-pro',
|
||||||
|
|
||||||
|
// The Host of the API.
|
||||||
|
host: 'https://api.torchlight.dev',
|
||||||
|
|
||||||
|
// Global options to control block-level settings.
|
||||||
|
// https://torchlight.dev/docs/options
|
||||||
|
options: {
|
||||||
|
// Turn line numbers on or off globally.
|
||||||
|
lineNumbers: false,
|
||||||
|
|
||||||
|
// Control the `style` attribute applied to line numbers.
|
||||||
|
// lineNumbersStyle: '',
|
||||||
|
|
||||||
|
// Turn on +/- diff indicators.
|
||||||
|
diffIndicators: true,
|
||||||
|
|
||||||
|
// If there are any diff indicators for a line, put them
|
||||||
|
// in place of the line number to save horizontal space.
|
||||||
|
diffIndicatorsInPlaceOfLineNumbers: true,
|
||||||
|
|
||||||
|
// When lines are collapsed, this is the text that will
|
||||||
|
// be shown to indicate that they can be expanded.
|
||||||
|
summaryCollapsedIndicator: 'Click to expand...',
|
||||||
|
},
|
||||||
|
|
||||||
|
// Options for the highlight command.
|
||||||
|
highlight: {
|
||||||
|
// Directory where your un-highlighted source files live. If
|
||||||
|
// left blank, Torchlight will use the current directory.
|
||||||
|
input: 'public',
|
||||||
|
|
||||||
|
// Directory where your highlighted files should be placed. If
|
||||||
|
// left blank, files will be modified in place.
|
||||||
|
output: '',
|
||||||
|
|
||||||
|
// Globs to include when looking for files to highlight.
|
||||||
|
includeGlobs: [
|
||||||
|
'**/*.htm',
|
||||||
|
'**/*.html'
|
||||||
|
],
|
||||||
|
|
||||||
|
// String patterns to ignore (not globs). The entire file
|
||||||
|
// path will be searched and if any of these strings
|
||||||
|
// appear, the file will be ignored.
|
||||||
|
excludePatterns: [
|
||||||
|
'/node_modules/',
|
||||||
|
'/vendor/'
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in a new issue