diff --git a/content/post/accessing-tce-cluster-from-new-device/index.md b/content/post/accessing-tce-cluster-from-new-device/index.md new file mode 100644 index 0000000..4b43a12 --- /dev/null +++ b/content/post/accessing-tce-cluster-from-new-device/index.md @@ -0,0 +1,45 @@ +--- +title: "Accessing a Tanzu Community Edition Kubernetes Cluster from a new device" # Title of the blog post. +date: 2022-02-01T10:58:57-06:00 # Date of post creation. +# lastmod: 2022-02-01T10:58:57-06:00 # Date when last modified +description: "The Tanzu Community Edition documentation does a great job of explaining how to authenticate to a newly-deployed cluster at the tail end of the installation steps, but how do you log in from another system?" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: true # Sets whether to render this page. Draft of true will not be rendered. +toc: false # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips +tags: + - vmware + - kubernetes + - tanzu +comment: true # Disable comment if false. +--- +When I [recently set up my Tanzu Community Edition environment](/tanzu-community-edition-k8s-homelab/), I did so from a Linux VM since I knew that my Chromebook Linux environment wouldn't support the `kind` bootstrap cluster used for the deployment. But now I'd like to be able to connect to the cluster directly using the `tanzu` and `kubectl` CLI tools. How do I get the appropriate cluster configuration over to my Chromebook? + +The Tanzu CLI actually makes that pretty easy. I just run these commands on my Linux VM to export the `kubeconfig` of my management (`tce-mgmt`) and workload (`tce-work`) clusters to a pair of files: +```shell +tanzu management-cluster kubeconfig get --admin --export-file tce-mgmt-kubeconfig.yaml +tanzu cluster kubeconfig get tce-work --admin --export-file tce-work-kubeconfig.yaml +``` + +I could then use `scp` to pull the files from the VM into my local Linux environment. I then needed to [install `kubectl`](/tanzu-community-edition-k8s-homelab/#kubectl-binary) and the [`tanzu` CLI](/tanzu-community-edition-k8s-homelab/#tanzu-cli) (making sure to also [enable shell auto-completion](/enable-tanzu-cli-auto-completion-bash-zsh/) along the way!), and I could import the configurations locally: + +```shell +❯ tanzu login --kubeconfig tce-mgmt-kubeconfig.yaml --context tce-mgmt-admin@tce-mgmt --name tce-mgmt +✔ successfully logged in to management cluster using the kubeconfig tce-mgmt + +❯ tanzu login --kubeconfig tce-work-kubeconfig.yaml --context tce-work-admin@tce-work --name tce-work +✔ successfully logged in to management cluster using the kubeconfig tce-work +``` + + + + + diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/-Fuvz-GmF.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/-Fuvz-GmF.png new file mode 100644 index 0000000..c3f6706 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/-Fuvz-GmF.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/5ATk99aPW.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/5ATk99aPW.png new file mode 100644 index 0000000..a6873e0 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/5ATk99aPW.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/F3Wkd3VT.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/F3Wkd3VT.png new file mode 100644 index 0000000..ffa32a6 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/F3Wkd3VT.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/Lq9DBCK_Y.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/Lq9DBCK_Y.png new file mode 100644 index 0000000..3f4c3f5 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/Lq9DBCK_Y.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/N7YllJkxS.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/N7YllJkxS.png new file mode 100644 index 0000000..1256192 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/N7YllJkxS.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/PmhVOWJsUn.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/PmhVOWJsUn.png new file mode 100644 index 0000000..ffbf3c3 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/PmhVOWJsUn.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/X9JhgWx8x.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/X9JhgWx8x.png new file mode 100644 index 0000000..986ebfa Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/X9JhgWx8x.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/Z2aKLsLou.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/Z2aKLsLou.png new file mode 100644 index 0000000..698e898 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/Z2aKLsLou.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/cEbWSOg00.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/cEbWSOg00.png new file mode 100644 index 0000000..c49c6b0 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/cEbWSOg00.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/index.md b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/index.md new file mode 100644 index 0000000..28ca400 --- /dev/null +++ b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/index.md @@ -0,0 +1,134 @@ +--- +series: vRA8 +date: "2021-06-01T08:34:30Z" +thumbnail: -Fuvz-GmF.png +usePageBundles: true +tags: +- vmware +- vra +- vro +- javascript +title: Adding VM Notes and Custom Attributes with vRA8 +--- + +*In [past posts](/series/vra8), I started by [creating a basic deployment infrastructure](/vra8-custom-provisioning-part-one) in Cloud Assembly and using tags to group those resources. I then [wrote an integration](/integrating-phpipam-with-vrealize-automation-8) to let vRA8 use phpIPAM for static address assignments. I [implemented a vRO workflow](/vra8-custom-provisioning-part-two) for generating unique VM names which fit an organization's established naming standard, and then [extended the workflow](/vra8-custom-provisioning-part-three) to avoid any naming conflicts in Active Directory and DNS. And, finally, I [created an intelligent provisioning request form in Service Broker](/vra8-custom-provisioning-part-four) to make it easy for users to get the servers they need. That's got the core functionality pretty well sorted, so moving forward I'll be detailing additions that enable new capabilities and enhance the experience.* + +In this post, I'll describe how to get certain details from the Service Broker request form and into the VM's properties in vCenter. The obvious application of this is adding descriptive notes so I can remember what purpose a VM serves, but I will also be using [Custom Attributes](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenterhost.doc/GUID-73606C4C-763C-4E27-A1DA-032E4C46219D.html) to store the server's Point of Contact information and a record of which ticketing system request resulted in the server's creation. + +### New inputs +I'll start this by adding a few new inputs to the cloud template in Cloud Assembly. +![New inputs in Cloud Assembly](F3Wkd3VT.png) + +I'm using a basic regex on the `poc_email` field to make sure that the user's input is *probably* a valid email address in the format `[some string]@[some string].[some string]`. + +```yaml +inputs: +[...] + description: + type: string + title: Description + description: Server function/purpose + default: Testing and evaluation + poc_name: + type: string + title: Point of Contact Name + default: Jack Shephard + poc_email: + type: string + title: Point of Contact Email + default: jack.shephard@virtuallypotato.com + pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$' + ticket: + type: string + title: Ticket/Request Number + default: 4815162342 +[...] +``` + +I'll also need to add these to the `resources` section of the template so that they will get passed along with the deployment properties. +![New resource properties](N7YllJkxS.png) + +I'm actually going to combine the `poc_name` and `poc_email` fields into a single `poc` string. + +```yaml +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + <...> + poc: '${input.poc_name + " (" + input.poc_email + ")"}' + ticket: '${input.ticket}' + description: '${input.description}' + <...> +``` + +I'll save this as a new version so that the changes will be available in the Service Broker front-end. +![New template version](Z2aKLsLou.png) + +### Service Broker custom form +I can then go to Service Broker and drag the new fields onto the Custom Form canvas. (If the new fields don't show up, hit up the Content Sources section of Service Broker, select the content source, and click the "Save and Import" button to sync the changes.) While I'm at it, I set the Description field to display as a text area (encouraging more detailed input), and I also set all the fields on the form to be required. +![Service Broker form](unhgNySSzz.png) + +### vRO workflow +Okay, so I've got the information I want to pass on to vCenter. Now I need to whip up a new workflow in vRO that will actually do that (after [telling vRO how to connect to the vCenter](/vra8-custom-provisioning-part-two#interlude-connecting-vro-to-vcenter), of course). I'll want to call this after the VM has been provisioned, so I'll cleverly call the workflow "VM Post-Provisioning". +![Naming the new workflow](X9JhgWx8x.png) + +The workflow will have a single input from vRA, `inputProperties` of type `Properties`. +![Workflow input](zHrp6GPcP.png) + +The first thing this workflow needs to do is parse `inputProperties (Properties)` to get the name of the VM, and it will then use that information to query vCenter and grab the corresponding VM object. So I'll add a scriptable task item to the workflow canvas and call it `Get VM Object`. It will take `inputProperties (Properties)` as its sole input, and output a new variable called `vm` of type `VC:VirtualMachine`. +![Get VM Object action](5ATk99aPW.png) + +The script for this task is fairly straightforward: +```js +// JavaScript: Get VM Object +// Inputs: inputProperties (Properties) +// Outputs: vm (VC:VirtualMachine) + +var name = inputProperties.resourceNames[0] + +var vms = VcPlugin.getAllVirtualMachines(null, name) +System.log("Found VM object: " + vms[0]) +vm = vms[0] +``` + +I'll add another scriptable task item to the workflow to actually apply the notes to the VM - I'll call it `Set Notes`, and it will take both `vm (VC:VirtualMachine)` and `inputProperties (Properties)` as its inputs. +![Set Notes action](w24V6YVOR.png) + +The first part of the script creates a new VM config spec, inserts the description into the spec, and then reconfigures the selected VM with the new spec. + +The second part uses a built-in action to set the `Point of Contact` and `Ticket` custom attributes accordingly. + +```js +// Javascript: Set Notes +// Inputs: vm (VC:VirtualMachine), inputProperties (Properties) +// Outputs: None + +var notes = inputProperties.customProperties.description +var poc = inputProperties.customProperties.poc +var ticket = inputProperties.customProperties.ticket + +var spec = new VcVirtualMachineConfigSpec() +spec.annotation = notes +vm.reconfigVM_Task(spec) + +System.getModule("com.vmware.library.vc.customattribute").setOrCreateCustomField(vm,"Point of Contact", poc) +System.getModule("com.vmware.library.vc.customattribute").setOrCreateCustomField(vm,"Ticket", ticket) +``` + +### Extensibility subscription +Now I need to return to Cloud Assembly and create a new extensibility subscription that will call this new workflow at the appropriate time. I'll call it "VM Post-Provisioning" and attach it to the "Compute Post Provision" topic. +![Creating the new subscription](PmhVOWJsUn.png) + +And then I'll link it to my new workflow: +![Selecting the workflow](cEbWSOg00.png) + +### Testing +And then back to Service Broker to request a VM and see if it works: + +![Test request](Lq9DBCK_Y.png) + +It worked! +![New VM with notes](-Fuvz-GmF.png) + +In the future, I'll be exploring more features that I can add on to this "VM Post-Provisioning" workflow like creating static DNS records as needed. diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/unhgNySSzz.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/unhgNySSzz.png new file mode 100644 index 0000000..e17bf63 Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/unhgNySSzz.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/w24V6YVOR.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/w24V6YVOR.png new file mode 100644 index 0000000..9f2643d Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/w24V6YVOR.png differ diff --git a/content/post/adding-vm-notes-and-custom-attributes-with-vra8/zHrp6GPcP.png b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/zHrp6GPcP.png new file mode 100644 index 0000000..44e789e Binary files /dev/null and b/content/post/adding-vm-notes-and-custom-attributes-with-vra8/zHrp6GPcP.png differ diff --git a/content/post/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk/20210723-script.png b/content/post/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk/20210723-script.png new file mode 100644 index 0000000..191e9e6 Binary files /dev/null and b/content/post/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk/20210723-script.png differ diff --git a/content/post/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk/index.md b/content/post/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk/index.md new file mode 100644 index 0000000..0d6da83 --- /dev/null +++ b/content/post/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk/index.md @@ -0,0 +1,98 @@ +--- +series: Scripts +date: "2021-04-29T08:34:30Z" +usePageBundles: true +thumbnail: 20210723-script.png +tags: +- linux +- shell +- automation +title: Automatic unattended expansion of Linux root LVM volume to fill disk +toc: false +--- + +While working on my [vRealize Automation 8 project](/series/vra8), I wanted to let users specify how large a VM's system drive should be and have vRA apply that without any further user intervention. For instance, if the template has a 60GB C: drive and the user specifies that they want it to be 80GB, vRA will embiggen the new VM's VMDK to 80GB and then expand the guest file system to fill up the new free space. + +I'll get into the details of how that's implemented from the vRA side #soon, but first I needed to come up with simple scripts to extend the guest file system to fill the disk. + +This was pretty straight-forward on Windows with a short PowerShell script to grab the appropriate volume and resize it to its full capacity: +```powershell +$Partition = Get-Volume -DriveLetter C | Get-Partition +$Partition | Resize-Partition -Size ($Partition | Get-PartitionSupportedSize).sizeMax +``` + +It was a bit trickier for Linux systems though. My Linux templates all use LVM to abstract the file systems away from the physical disks, but they may have a different number of physical partitions or different names for the volume groups and logical volumes. So I needed to be able to automagically determine which logical volume was mounted as `/`, which volume group it was a member of, and which partition on which disk is used for that physical volume. I could then expand the physical partition to fill the disk, expand the volume group to fill the now-larger physical volume, grow the logical volume to fill the volume group, and (finally) extend the file system to fill the logical volume. + +I found a great script [here](https://github.com/alpacacode/Homebrewn-Scripts/blob/master/linux-scripts/partresize.sh) that helped with most of those operations, but it required the user to specify the physical and logical volumes. I modified it to auto-detect those, and here's what I came up with: + +{{% notice info "MBR only" %}} +When I cobbled together this script I was primarily targeting the Enterprise Linux (RHEL, CentOS) systems that I work with in my environment, and those happened to have MBR partition tables. This script would need to be modified a bit to work with GPT partitions like you might find on Ubuntu. +{{% /notice %}} + +```shell +#!/bin/bash +# This will attempt to automatically detect the LVM logical volume where / is mounted and then +# expand the underlying physical partition, LVM physical volume, LVM volume group, LVM logical +# volume, and Linux filesystem to consume new free space on the disk. +# Adapted from https://github.com/alpacacode/Homebrewn-Scripts/blob/master/linux-scripts/partresize.sh + +extenddisk() { + echo -e "\n+++Current partition layout of $disk:+++" + parted $disk --script unit s print + if [ $logical == 1 ]; then + parted $disk --script rm $ext_partnum + parted $disk --script "mkpart extended ${ext_startsector}s -1s" + parted $disk --script "set $ext_partnum lba off" + parted $disk --script "mkpart logical ext2 ${startsector}s -1s" + else + parted $disk --script rm $partnum + parted $disk --script "mkpart primary ext2 ${startsector}s -1s" + fi + parted $disk --script set $partnum lvm on + echo -e "\n\n+++New partition layout of $disk:+++" + parted $disk --script unit s print + partx -v -a $disk + pvresize $pvname + lvextend --extents +100%FREE --resize $lvpath + echo -e "\n+++New root partition size:+++" + df -h / | grep -v Filesystem +} +export LVM_SUPPRESS_FD_WARNINGS=1 +mountpoint=$(df --output=source / | grep -v Filesystem) # /dev/mapper/centos-root +lvdisplay $mountpoint > /dev/null +if [ $? != 0 ]; then + echo "Error: $mountpoint does not look like a LVM logical volume. Aborting." + exit 1 +fi +echo -e "\n+++Current root partition size:+++" +df -h / | grep -v Filesystem +lvname=$(lvs --noheadings $mountpoint | awk '{print($1)}') # root +vgname=$(lvs --noheadings $mountpoint | awk '{print($2)}') # centos +lvpath="/dev/${vgname}/${lvname}" # /dev/centos/root +pvname=$(pvs | grep $vgname | tail -n1 | awk '{print($1)}') # /dev/sda2 +disk=$(echo $pvname | rev | cut -c 2- | rev) # /dev/sda +diskshort=$(echo $disk | grep -Po '[^\/]+$') # sda +partnum=$(echo $pvname | grep -Po '\d$') # 2 +startsector=$(fdisk -u -l $disk | grep $pvname | awk '{print $2}') # 2099200 +layout=$(parted $disk --script unit s print) # Model: VMware Virtual disk (scsi) Disk /dev/sda: 83886080s Sector size (logical/physical): 512B/512B Partition Table: msdos Disk Flags: Number Start End Size Type File system Flags 1 2048s 2099199s 2097152s primary xfs boot 2 2099200s 62914559s 60815360s primary lvm +if grep -Pq "^\s$partnum\s+.+?logical.+$" <<< "$layout"; then + logical=1 + ext_partnum=$(parted $disk --script unit s print | grep extended | grep -Po '^\s\d\s' | tr -d ' ') + ext_startsector=$(parted $disk --script unit s print | grep extended | awk '{print $2}' | tr -d 's') +else + logical=0 +fi +parted $disk --script unit s print | if ! grep -Pq "^\s$partnum\s+.+?[^,]+?lvm\s*$"; then + echo -e "Error: $pvname seems to have some flags other than 'lvm' set." + exit 1 +fi +if ! (fdisk -u -l $disk | grep $disk | tail -1 | grep $pvname | grep -q "Linux LVM"); then + echo -e "Error: $pvname is not the last LVM volume on disk $disk." + exit 1 +fi +ls /sys/class/scsi_device/*/device/rescan | while read path; do echo 1 > $path; done +ls /sys/class/scsi_host/host*/scan | while read path; do echo "- - -" > $path; done +extenddisk +``` + +And it works beautifully within my environment. Hopefully it'll work for yours too in case you have a similar need! diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/agent_config.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/agent_config.png new file mode 100644 index 0000000..ca547f9 Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/agent_config.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/api_user.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/api_user.png new file mode 100644 index 0000000..9fad192 Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/api_user.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/code.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/code.png new file mode 100644 index 0000000..1a2d5d7 Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/code.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/created_subnets.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/created_subnets.png new file mode 100644 index 0000000..b2749fc Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/created_subnets.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/discovered_ips.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/discovered_ips.png new file mode 100644 index 0000000..5ba5e4e Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/discovered_ips.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/dvportgroups.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/dvportgroups.png new file mode 100644 index 0000000..e3ce0d2 Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/dvportgroups.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/empty_sections.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/empty_sections.png new file mode 100644 index 0000000..71bf3c0 Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/empty_sections.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/index.md b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/index.md new file mode 100644 index 0000000..fcd4711 --- /dev/null +++ b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/index.md @@ -0,0 +1,620 @@ +--- +title: "Bulk Import vSphere dvPortGroups to phpIPAM" # Title of the blog post. +date: 2022-02-04 # Date of post creation. +# lastmod: 2022-01-21T15:24:00-06:00 # Date when last modified +description: "I wrote a Python script to interface with the phpIPAM API and import a large number of networks exported from vSphere for IP management." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "code.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Scripts +tags: + - vmware + - powercli + - python + - api + - phpipam +comment: true # Disable comment if false. +--- + +I [recently wrote](/tanzu-community-edition-k8s-homelab/#a-real-workload---phpipam) about getting started with VMware's [Tanzu Community Edition](https://tanzucommunityedition.io/) and deploying [phpIPAM](https://phpipam.net/) as my first real-world Kubernetes workload. Well I've spent much of my time since then working on a script which would help to populate my phpIPAM instance with a list of networks to monitor. + +### Planning and Exporting +The first step in making this work was to figure out which networks I wanted to import. We've got hundreds of different networks in use across our production vSphere environments. I focused only on those which are portgroups on distributed virtual switches since those configurations are pretty standardized (being vCenter constructs instead of configured on individual hosts). These dvPortGroups bear a naming standard which conveys all sorts of useful information, and it's easy and safe to rename any dvPortGroups which _don't_ fit the standard (unlike renaming portgroups on a standard virtual switch). + +The standard naming convention is `[Site/Description] [Network Address]{/[Mask]}`. So the networks (across two virtual datacenters and two dvSwitches) look something like this: +![Production dvPortGroups approximated in my testing lab environment](dvportgroups.png) + +Some networks have masks in the name, some don't; and some use an underscore (`_`) rather than a slash (`/`) to separate the network from the mask . Most networks correctly include the network address with a `0` in the last octet, but some use an `x` instead. And the VLANs associated with the networks have a varying number of digits. Consistency can be difficult so these are all things that I had to keep in mind as I worked on a solution which would make a true best effort at importing all of these. + +As long as the dvPortGroup names stick to this format I can parse the name to come up with a description as well as the IP space of the network. The dvPortGroup also carries information about the associated VLAN, which is useful information to have. And I can easily export this information with a simple PowerCLI query: + +```powershell +PS /home/john> get-vdportgroup | select Name, VlanConfiguration + +Name VlanConfiguration +---- ----------------- +MGT-Home 192.168.1.0 +MGT-Servers 172.16.10.0 VLAN 1610 +BOW-Servers 172.16.20.0 VLAN 1620 +BOW-Servers 172.16.30.0 VLAN 1630 +BOW-Servers 172.16.40.0 VLAN 1640 +DRE-Servers 172.16.50.0 VLAN 1650 +DRE-Servers 172.16.60.x VLAN 1660 +VPOT8-Mgmt 172.20.10.0/27 VLAN 20 +VPOT8-Servers 172.20.10.32/27 VLAN 30 +VPOT8-Servers 172.20.10.64_26 VLAN 40 +``` + +In my [homelab](/vmware-home-lab-on-intel-nuc-9/), I only have a single vCenter. In production, we've got a handful of vCenters, and each manages the hosts in a given region. So I can use information about which vCenter hosts a dvPortGroup to figure out which region a network is in. When I import this data into phpIPAM, I can use the vCenter name to assign [remote scan agents](https://github.com/jbowdre/phpipam-agent-docker) to networks based on the region that they're in. I can also grab information about which virtual datacenter a dvPortGroup lives in, which I'll use for grouping networks into sites or sections. + +The vCenter can be found in the `Uid` property returned by `get-vdportgroup`: +```powershell +PS /home/john> get-vdportgroup | select Name, VlanConfiguration, Datacenter, Uid + +Name VlanConfiguration Datacenter Uid +---- ----------------- ---------- --- +MGT-Home 192.168.1.0 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-27015/ +MGT-Servers 172.16.10.0 VLAN 1610 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-27017/ +BOW-Servers 172.16.20.0 VLAN 1620 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-28010/ +BOW-Servers 172.16.30.0 VLAN 1630 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-28011/ +BOW-Servers 172.16.40.0 VLAN 1640 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-28012/ +DRE-Servers 172.16.50.0 VLAN 1650 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-28013/ +DRE-Servers 172.16.60.x VLAN 1660 Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-28014/ +VPOT8-Mgmt 172.20.10.0/… VLAN 20 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35018/ +VPOT8-Servers 172.20.10… VLAN 30 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35019/ +VPOT8-Servers 172.20.10… VLAN 40 Other Lab /VIServer=lab\john@vcsa.lab.bowdre.net:443/DistributedPortgroup=DistributedVirtualPortgroup-dvportgroup-35020/ +``` + +It's not pretty, but it'll do the trick. All that's left is to export this data into a handy-dandy CSV-formatted file that I can easily parse for import: + +```powershell +get-vdportgroup | select Name, VlanConfiguration, Datacenter, Uid | export-csv -NoTypeInformation ./networks.csv +``` +![My networks.csv export, including the networks which don't match the naming criteria and will be skipped by the import process.](networks.csv.png) + +### Setting up phpIPAM +After [deploying a fresh phpIPAM instance on my Tanzu Community Edition Kubernetes cluster](/tanzu-community-edition-k8s-homelab/#a-real-workload---phpipam), there are a few additional steps needed to enable API access. To start, I log in to my phpIPAM instance and navigate to the **Administration > Server Management > phpIPAM Settings** page, where I enabled both the *Prettify links* and *API* feature settings - making sure to hit the **Save** button at the bottom of the page once I do so. +![Enabling the API](server_settings.png) + +Then I need to head to the **User Management** page to create a new user that will be used to authenticate against the API: +![New user creation](new_user.png) + +And finally, I head to the **API** section to create a new API key with Read/Write permissions: +![API key creation](api_user.png) + +I'm also going to head in to **Administration > IP Related Management > Sections** and delete the default sample sections so that the inventory will be nice and empty: +![We don't need no stinkin' sections!](empty_sections.png) + +### Script time +Well that's enough prep work; now it's time for the Python3 [script](https://github.com/jbowdre/misc-scripts/blob/main/Python/phpipam-bulk-import.py): + +```python +# The latest version of this script can be found on Github: +# https://github.com/jbowdre/misc-scripts/blob/main/Python/phpipam-bulk-import.py + +import requests +from collections import namedtuple + +check_cert = True +created = 0 +remote_agent = False +name_to_id = namedtuple('name_to_id', ['name', 'id']) + +## for testing only: +# from requests.packages.urllib3.exceptions import InsecureRequestWarning +# requests.packages.urllib3.disable_warnings(InsecureRequestWarning) +# check_cert = False + +## Makes sure input fields aren't blank. +def validate_input_is_not_empty(field, prompt): + while True: + user_input = input(f'\n{prompt}:\n') + if len(user_input) == 0: + print(f'[ERROR] {field} cannot be empty!') + continue + else: + return user_input + + +## Takes in a list of dictionary items, extracts all the unique values for a given key, +# and returns a sorted list of those. +def get_sorted_list_of_unique_values(key, list_of_dict): + valueSet = set(sub[key] for sub in list_of_dict) + valueList = list(valueSet) + valueList.sort() + return valueList + + +## Match names and IDs +def get_id_from_sets(name, sets): + return [item.id for item in sets if name == item.name][0] + + +## Authenticate to phpIPAM endpoint and return an auth token +def auth_session(uri, auth): + print(f'Authenticating to {uri}...') + try: + req = requests.post(f'{uri}/user/', auth=auth, verify=check_cert) + except: + raise requests.exceptions.RequestException + if req.status_code != 200: + print(f'[ERROR] Authentication failure: {req.json()}') + raise requests.exceptions.RequestException + token = {"token": req.json()['data']['token']} + print('\n[AUTH_SUCCESS] Authenticated successfully!') + return token + + +## Find or create a remote scan agent for each region (vcenter) +def get_agent_sets(uri, token, regions): + agent_sets = [] + + def create_agent_set(uri, token, name): + import secrets + # generate a random secret to be used for identifying this agent + payload = { + 'name': name, + 'type': 'mysql', + 'code': secrets.base64.urlsafe_b64encode(secrets.token_bytes(24)).decode("utf-8"), + 'description': f'Remote scan agent for region {name}' + } + req = requests.post(f'{uri}/tools/scanagents/', data=payload, headers=token, verify=check_cert) + id = req.json()['id'] + agent_set = name_to_id(name, id) + print(f'[AGENT_CREATE] {name} created.') + return agent_set + + for region in regions: + name = regions[region]['name'] + req = requests.get(f'{uri}/tools/scanagents/?filter_by=name&filter_value={name}', headers=token, verify=check_cert) + if req.status_code == 200: + id = req.json()['data'][0]['id'] + agent_set = name_to_id(name, id) + else: + agent_set = create_agent_set(uri, token, name) + agent_sets.append(agent_set) + return agent_sets + + +## Find or create a section for each virtual datacenter +def get_section(uri, token, section, parentSectionId): + + def create_section(uri, token, section, parentSectionId): + payload = { + 'name': section, + 'masterSection': parentSectionId, + 'permissions': '{"2":"2"}', + 'showVLAN': '1' + } + req = requests.post(f'{uri}/sections/', data=payload, headers=token, verify=check_cert) + id = req.json()['id'] + print(f'[SECTION_CREATE] Section {section} created.') + return id + + req = requests.get(f'{uri}/sections/{section}/', headers=token, verify=check_cert) + if req.status_code == 200: + id = req.json()['data']['id'] + else: + id = create_section(uri, token, section, parentSectionId) + return id + + +## Find or create VLANs +def get_vlan_sets(uri, token, vlans): + vlan_sets = [] + + def create_vlan_set(uri, token, vlan): + payload = { + 'name': f'VLAN {vlan}', + 'number': vlan + } + req = requests.post(f'{uri}/vlan/', data=payload, headers=token, verify=check_cert) + id = req.json()['id'] + vlan_set = name_to_id(vlan, id) + print(f'[VLAN_CREATE] VLAN {vlan} created.') + return vlan_set + + for vlan in vlans: + if vlan != 0: + req = requests.get(f'{uri}/vlan/?filter_by=number&filter_value={vlan}', headers=token, verify=check_cert) + if req.status_code == 200: + id = req.json()['data'][0]['vlanId'] + vlan_set = name_to_id(vlan, id) + else: + vlan_set = create_vlan_set(uri, token, vlan) + vlan_sets.append(vlan_set) + return vlan_sets + + +## Find or create nameserver configurations for each region +def get_nameserver_sets(uri, token, regions): + + nameserver_sets = [] + + def create_nameserver_set(uri, token, name, nameservers): + payload = { + 'name': name, + 'namesrv1': nameservers, + 'description': f'Nameserver created for region {name}' + } + req = requests.post(f'{uri}/tools/nameservers/', data=payload, headers=token, verify=check_cert) + id = req.json()['id'] + nameserver_set = name_to_id(name, id) + print(f'[NAMESERVER_CREATE] Nameserver {name} created.') + return nameserver_set + + for region in regions: + name = regions[region]['name'] + req = requests.get(f'{uri}/tools/nameservers/?filter_by=name&filter_value={name}', headers=token, verify=check_cert) + if req.status_code == 200: + id = req.json()['data'][0]['id'] + nameserver_set = name_to_id(name, id) + else: + nameserver_set = create_nameserver_set(uri, token, name, regions[region]['nameservers']) + nameserver_sets.append(nameserver_set) + return nameserver_sets + + +## Find or create subnet for each dvPortGroup +def create_subnet(uri, token, network): + + def update_nameserver_permissions(uri, token, network): + nameserverId = network['nameserverId'] + sectionId = network['sectionId'] + req = requests.get(f'{uri}/tools/nameservers/{nameserverId}/', headers=token, verify=check_cert) + permissions = req.json()['data']['permissions'] + permissions = str(permissions).split(';') + if not sectionId in permissions: + permissions.append(sectionId) + if 'None' in permissions: + permissions.remove('None') + permissions = ';'.join(permissions) + payload = { + 'permissions': permissions + } + req = requests.patch(f'{uri}/tools/nameservers/{nameserverId}/', data=payload, headers=token, verify=check_cert) + + payload = { + 'subnet': network['subnet'], + 'mask': network['mask'], + 'description': network['name'], + 'sectionId': network['sectionId'], + 'scanAgent': network['agentId'], + 'nameserverId': network['nameserverId'], + 'vlanId': network['vlanId'], + 'pingSubnet': '1', + 'discoverSubnet': '1', + 'resolveDNS': '1', + 'DNSrecords': '1' + } + req = requests.post(f'{uri}/subnets/', data=payload, headers=token, verify=check_cert) + if req.status_code == 201: + network['subnetId'] = req.json()['id'] + update_nameserver_permissions(uri, token, network) + print(f"[SUBNET_CREATE] Created subnet {req.json()['data']}") + global created + created += 1 + elif req.status_code == 409: + print(f"[SUBNET_EXISTS] Subnet {network['subnet']}/{network['mask']} already exists.") + else: + print(f"[ERROR] Problem creating subnet {network['name']}: {req.json()}") + + +## Import list of networks from the specified CSV file +def import_networks(filepath): + print(f'Importing networks from {filepath}...') + import csv + import re + ipPattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.[0-9xX]{1,3}') + networks = [] + with open(filepath) as csv_file: + reader = csv.DictReader(csv_file) + line_count = 0 + for row in reader: + network = {} + if line_count > 0: + if(re.search(ipPattern, row['Name'])): + network['subnet'] = re.findall(ipPattern, row['Name'])[0] + if network['subnet'].split('.')[-1].lower() == 'x': + network['subnet'] = network['subnet'].lower().replace('x', '0') + network['name'] = row['Name'] + if '/' in row['Name'][-3]: + network['mask'] = row['Name'].split('/')[-1] + elif '_' in row['Name'][-3]: + network['mask'] = row['Name'].split('_')[-1] + else: + network['mask'] = '24' + network['section'] = row['Datacenter'] + try: + network['vlan'] = int(row['VlanConfiguration'].split('VLAN ')[1]) + except: + network['vlan'] = 0 + network['vcenter'] = f"{(row['Uid'].split('@'))[1].split(':')[0].split('.')[0]}" + networks.append(network) + line_count += 1 + print(f'Processed {line_count} lines and found:') + return networks + + +def main(): + import socket + import getpass + import argparse + from pathlib import Path + + parser = argparse.ArgumentParser() + parser.add_argument("filepath", type=Path) + + # Accept CSV file as an argument to the script or prompt for input if necessary + try: + p = parser.parse_args() + filepath = p.filepath + except: + # make sure filepath is a path to an actual file + print("""\n\n + This script helps to add vSphere networks to phpIPAM for IP address management. It is expected + that the vSphere networks are configured as portgroups on distributed virtual switches and + named like '[Description] [Subnet IP]{/[mask]}' (ex: 'LAB-Servers 192.168.1.0'). The following PowerCLI + command can be used to export the networks from vSphere: + + Get-VDPortgroup | Select Name, Datacenter, VlanConfiguration, Uid | Export-Csv -NoTypeInformation ./networks.csv + + Subnets added to phpIPAM will be automatically configured for monitoring either using the built-in + scan agent (default) or a new remote scan agent for each vCenter. + """) + while True: + filepath = Path(validate_input_is_not_empty('Filepath', 'Path to CSV-formatted export from vCenter')) + if filepath.exists(): + break + else: + print(f'[ERROR] Unable to find file at {filepath.name}.') + continue + + # get collection of networks to import + networks = import_networks(filepath) + networkNames = get_sorted_list_of_unique_values('name', networks) + print(f'\n- {len(networkNames)} networks:\n\t{networkNames}') + vcenters = get_sorted_list_of_unique_values('vcenter', networks) + print(f'\n- {len(vcenters)} vCenter servers:\n\t{vcenters}') + vlans = get_sorted_list_of_unique_values('vlan', networks) + print(f'\n- {len(vlans)} VLANs:\n\t{vlans}') + sections = get_sorted_list_of_unique_values('section', networks) + print(f'\n- {len(sections)} Datacenters:\n\t{sections}') + + regions = {} + for vcenter in vcenters: + nameservers = None + name = validate_input_is_not_empty('Region Name', f'Region name for vCenter {vcenter}') + for region in regions: + if name in regions[region]['name']: + nameservers = regions[region]['nameservers'] + if not nameservers: + nameservers = validate_input_is_not_empty('Nameserver IPs', f"Comma-separated list of nameserver IPs in {name}") + nameservers = nameservers.replace(',',';').replace(' ','') + regions[vcenter] = {'name': name, 'nameservers': nameservers} + + # make sure hostname resolves + while True: + hostname = input('\nFully-qualified domain name of the phpIPAM host:\n') + if len(hostname) == 0: + print('[ERROR] Hostname cannot be empty.') + continue + try: + test = socket.gethostbyname(hostname) + except: + print(f'[ERROR] Unable to resolve {hostname}.') + continue + else: + del test + break + + username = validate_input_is_not_empty('Username', f'Username with read/write access to {hostname}') + password = getpass.getpass(f'Password for {username}:\n') + apiAppId = validate_input_is_not_empty('App ID', f'App ID for API key (from https://{hostname}/administration/api/)') + + agent = input('\nUse per-region remote scan agents instead of a single local scanner? (y/N):\n') + try: + if agent.lower()[0] == 'y': + global remote_agent + remote_agent = True + except: + pass + + proceed = input(f'\n\nProceed with importing {len(networkNames)} networks to {hostname}? (y/N):\n') + try: + if proceed.lower()[0] == 'y': + pass + else: + import sys + sys.exit("Operation aborted.") + except: + import sys + sys.exit("Operation aborted.") + del proceed + + # assemble variables + uri = f'https://{hostname}/api/{apiAppId}' + auth = (username, password) + + # auth to phpIPAM + token = auth_session(uri, auth) + + # create nameserver entries + nameserver_sets = get_nameserver_sets(uri, token, regions) + vlan_sets = get_vlan_sets(uri, token, vlans) + if remote_agent: + agent_sets = get_agent_sets(uri, token, regions) + + # create the networks + for network in networks: + network['region'] = regions[network['vcenter']]['name'] + network['regionId'] = get_section(uri, token, network['region'], None) + network['nameserverId'] = get_id_from_sets(network['region'], nameserver_sets) + network['sectionId'] = get_section(uri, token, network['section'], network['regionId']) + if network['vlan'] == 0: + network['vlanId'] = None + else: + network['vlanId'] = get_id_from_sets(network['vlan'], vlan_sets) + if remote_agent: + network['agentId'] = get_id_from_sets(network['region'], agent_sets) + else: + network['agentId'] = '1' + create_subnet(uri, token, network) + + print(f'\n[FINISH] Created {created} of {len(networks)} networks.') + + +if __name__ == "__main__": + main() + +``` + +I'll run it and provide the path to the network export CSV file: +```bash +python3 phpipam-bulk-import.py ~/networks.csv +``` + +The script will print out a little descriptive bit about what sort of networks it's going to try to import and then will straight away start processing the file to identify the networks, vCenters, VLANs, and datacenters which will be imported: + +``` +Importing networks from /home/john/networks.csv... +Processed 17 lines and found: + +- 10 networks: + ['BOW-Servers 172.16.20.0', 'BOW-Servers 172.16.30.0', 'BOW-Servers 172.16.40.0', 'DRE-Servers 172.16.50.0', 'DRE-Servers 172.16.60.x', 'MGT-Home 192.168.1.0', 'MGT-Servers 172.16.10.0', 'VPOT8-Mgmt 172.20.10.0/27', 'VPOT8-Servers 172.20.10.32/27', 'VPOT8-Servers 172.20.10.64_26'] + +- 1 vCenter servers: + ['vcsa'] + +- 10 VLANs: + [0, 20, 30, 40, 1610, 1620, 1630, 1640, 1650, 1660] + +- 2 Datacenters: + ['Lab', 'Other Lab'] +``` + +It then starts prompting for the additional details which will be needed: + +``` +Region name for vCenter vcsa: +Labby + +Comma-separated list of nameserver IPs in Lab vCenter: +192.168.1.5 + +Fully-qualified domain name of the phpIPAM host: +ipam-k8s.lab.bowdre.net + +Username with read/write access to ipam-k8s.lab.bowdre.net: +api-user +Password for api-user: + + +App ID for API key (from https://ipam-k8s.lab.bowdre.net/administration/api/): +api-user + +Use per-region remote scan agents instead of a single local scanner? (y/N): +y +``` + +Up to this point, the script has only been processing data locally, getting things ready for talking to the phpIPAM API. But now, it prompts to confirm that we actually want to do the thing (yes please) and then gets to work: + +``` +Proceed with importing 10 networks to ipam-k8s.lab.bowdre.net? (y/N): +y +Authenticating to https://ipam-k8s.lab.bowdre.net/api/api-user... + +[AUTH_SUCCESS] Authenticated successfully! +[VLAN_CREATE] VLAN 20 created. +[VLAN_CREATE] VLAN 30 created. +[VLAN_CREATE] VLAN 40 created. +[VLAN_CREATE] VLAN 1610 created. +[VLAN_CREATE] VLAN 1620 created. +[VLAN_CREATE] VLAN 1630 created. +[VLAN_CREATE] VLAN 1640 created. +[VLAN_CREATE] VLAN 1650 created. +[VLAN_CREATE] VLAN 1660 created. +[SECTION_CREATE] Section Labby created. +[SECTION_CREATE] Section Lab created. +[SUBNET_CREATE] Created subnet 192.168.1.0/24 +[SUBNET_CREATE] Created subnet 172.16.10.0/24 +[SUBNET_CREATE] Created subnet 172.16.20.0/24 +[SUBNET_CREATE] Created subnet 172.16.30.0/24 +[SUBNET_CREATE] Created subnet 172.16.40.0/24 +[SUBNET_CREATE] Created subnet 172.16.50.0/24 +[SUBNET_CREATE] Created subnet 172.16.60.0/24 +[SECTION_CREATE] Section Other Lab created. +[SUBNET_CREATE] Created subnet 172.20.10.0/27 +[SUBNET_CREATE] Created subnet 172.20.10.32/27 +[SUBNET_CREATE] Created subnet 172.20.10.64/26 + +[FINISH] Created 10 of 10 networks. +``` + +Success! Now I can log in to my phpIPAM instance and check out my newly-imported subnets: +![New subnets!](created_subnets.png) + +Even the one with the weird name formatting was parsed and imported correctly: +![Subnet details](subnet_detail.png) + +So now phpIPAM knows about the vSphere networks I care about, and it can keep track of which vLAN and nameservers go with which networks. Great! But it still isn't scanning or monitoring those networks, even though I told the script that I wanted to use a remote scan agent. And I can check in the **Administration > Server management > Scan agents** section of the phpIPAM interface to see my newly-created agent configuration. +![New agent config](agent_config.png) + +... but I haven't actually *deployed* an agent yet. I'll do that by following the same basic steps [described here](/tanzu-community-edition-k8s-homelab/#phpipam-agent) to spin up my `phpipam-agent` on Kubernetes, and I'll plug in that automagically-generated code for the `IPAM_AGENT_KEY` environment variable: + +```yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phpipam-agent +spec: + selector: + matchLabels: + app: phpipam-agent + replicas: 1 + template: + metadata: + labels: + app: phpipam-agent + spec: + containers: + - name: phpipam-agent + image: ghcr.io/jbowdre/phpipam-agent:latest + env: + - name: IPAM_DATABASE_HOST + value: "ipam-k8s.lab.bowdre.net" + - name: IPAM_DATABASE_NAME + value: "phpipam" + - name: IPAM_DATABASE_USER + value: "phpipam" + - name: IPAM_DATABASE_PASS + value: "VMware1!" + - name: IPAM_DATABASE_PORT + value: "3306" + - name: IPAM_AGENT_KEY + value: "CxtRbR81r1ojVL2epG90JaShxIUBl0bT" + - name: IPAM_SCAN_INTERVAL + value: "15m" + - name: IPAM_RESET_AUTODISCOVER + value: "false" + - name: IPAM_REMOVE_DHCP + value: "false" + - name: TZ + value: "UTC" +``` + +I kick it off with a `kubectl apply` command and check back a few minutes later (after the 15-minute interval defined in the above YAML) to see that it worked, the remote agent scanned like it was supposed to and is reporting IP status back to the phpIPAM database server: +![Newly-discovered IPs](discovered_ips.png) + +I think I've got some more tweaks to do with this environment (why isn't phpIPAM resolving hostnames despite the correct DNS servers getting configured?) but this at least demonstrates a successful proof-of-concept import thanks to my Python script. Sure, I only imported 10 networks here, but I feel like I'm ready to process the several hundred which are available in our production environment now. + +And who knows, maybe this script will come in handy for someone else. Until next time! \ No newline at end of file diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/networks.csv.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/networks.csv.png new file mode 100644 index 0000000..1035f20 Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/networks.csv.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/new_user.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/new_user.png new file mode 100644 index 0000000..44c386f Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/new_user.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/script_initial.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/script_initial.png new file mode 100644 index 0000000..b9c48fe Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/script_initial.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/server_settings.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/server_settings.png new file mode 100644 index 0000000..425dfe0 Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/server_settings.png differ diff --git a/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/subnet_detail.png b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/subnet_detail.png new file mode 100644 index 0000000..5d15af6 Binary files /dev/null and b/content/post/bulk-import-vsphere-dvportgroups-to-phpipam/subnet_detail.png differ diff --git a/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/index.md b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/index.md index 235571b..5f16135 100644 --- a/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/index.md +++ b/content/post/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/index.md @@ -2,6 +2,7 @@ series: Tips date: "2020-12-23T08:34:30Z" thumbnail: -lp1-DGiM.png +usePageBundles: true tags: - chromeos title: Burn an ISO to USB with the Chromebook Recovery Utility @@ -9,14 +10,14 @@ toc: false featured: true --- -There are a number of fantastic Windows applications for creating bootable USB drives from ISO images - but those don't work on a Chromebook. Fortunately there's an easily-available tool which will do the trick: Google's own [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) app. +There are a number of fantastic Windows applications for creating bootable USB drives from ISO images - but those don't work on a Chromebook. Fortunately there's an easily-available tool which will do the trick: Google's own [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) app. Normally that tool is used to creating bootable media to [reinstall Chrome OS on a broken Chromebook](https://support.google.com/chromebook/answer/1080595) (hence the name) but it also has the capability to write other arbitrary images as well. So if you find yourself needing to create a USB drive for installing ESXi on a computer in your [home lab](https://twitter.com/johndotbowdre/status/1341767090945077248) (more on that soon!) here's what you'll need to do: 1. Install the [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm). 2. Download the ISO you intend to use. 3. Rename the file to append `.bin` on the end, after the `.iso` bit: -![My renamed ISO for installing ESXi](uoTjgtbN1.png) +![My renamed ISO for installing ESXi](uoTjgtbN1.png) 4. Plug in the USB drive you're going to sacrifice for this effort - remember that ALL data on the drive will be erased. 5. Open the recovery utility, click on the gear icon at the top right, and select the *Use local image* option: ![The CRU menu](vdTpW9t7Q.png) diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210803_new_template_version.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210803_new_template_version.png new file mode 100644 index 0000000..3c8ddf7 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210803_new_template_version.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210803_updating_custom_form.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210803_updating_custom_form.png new file mode 100644 index 0000000..5c5a258 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210803_updating_custom_form.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_creating_bound_variable.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_creating_bound_variable.png new file mode 100644 index 0000000..65101a6 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_creating_bound_variable.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_task_inputs.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_task_inputs.png new file mode 100644 index 0000000..fcf6272 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_task_inputs.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_variables_added.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_variables_added.png new file mode 100644 index 0000000..e3d9b07 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210809_variables_added.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210811_inputproperties.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210811_inputproperties.png new file mode 100644 index 0000000..f79a821 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210811_inputproperties.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210811_new_workflow.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210811_new_workflow.png new file mode 100644 index 0000000..4e5d2aa Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210811_new_workflow.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_delete_dns_record_task.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_delete_dns_record_task.png new file mode 100644 index 0000000..40fdc4a Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_delete_dns_record_task.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_deprovision_variables.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_deprovision_variables.png new file mode 100644 index 0000000..403290e Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_deprovision_variables.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_deprovisioning_subscription.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_deprovisioning_subscription.png new file mode 100644 index 0000000..1921797 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_deprovisioning_subscription.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_test_deploy_request.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_test_deploy_request.png new file mode 100644 index 0000000..afa2f9b Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210812_test_deploy_request.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_delete_deployment.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_delete_deployment.png new file mode 100644 index 0000000..ebededc Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_delete_deployment.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_workflow_deletion.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_workflow_deletion.png new file mode 100644 index 0000000..e4d9e72 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_workflow_deletion.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_workflow_success.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_workflow_success.png new file mode 100644 index 0000000..eadd469 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/20210813_workflow_success.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/Go3D-gemP.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/Go3D-gemP.png new file mode 100644 index 0000000..c47827e Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/Go3D-gemP.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/a5gtUrQbc.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/a5gtUrQbc.png new file mode 100644 index 0000000..72fb139 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/a5gtUrQbc.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/fJswso9KH.png b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/fJswso9KH.png new file mode 100644 index 0000000..29f53f5 Binary files /dev/null and b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/fJswso9KH.png differ diff --git a/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/index.md b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/index.md new file mode 100644 index 0000000..21a0075 --- /dev/null +++ b/content/post/creating-static-records-in-microsoft-dns-from-vrealize-automation/index.md @@ -0,0 +1,421 @@ +--- +series: vRA8 +date: "2021-08-13T00:00:00Z" +lastmod: "2022-01-18" +usePageBundles: true +thumbnail: 20210813_workflow_success.png +tags: +- vmware +- vra +- vro +- javascript +- powershell +- automation +title: Creating static records in Microsoft DNS from vRealize Automation +--- +One of the requirements for my vRA deployments is the ability to automatically create a static `A` records for non-domain-joined systems so that users can connect without needing to know the IP address. The organization uses Microsoft DNS servers to provide resolution on the internal domain. At first glance, this shouldn't be too much of a problem: vRealize Orchestrator 8.x can run PowerShell scripts, and PowerShell can use the [`Add-DnsServerResourceRecord` cmdlet](https://docs.microsoft.com/en-us/powershell/module/dnsserver/add-dnsserverresourcerecord?view=windowsserver2019-ps) to create the needed records. + +Not so fast, though. That cmdlet is provided through the [Remote Server Administration Tools](https://docs.microsoft.com/en-us/troubleshoot/windows-server/system-management-components/remote-server-administration-tools) package so it won't be available within the limited PowerShell environment inside of vRO. A workaround might be to add a Windows machine to vRO as a remote PowerShell host, but then you run into [issues of credential hopping](https://communities.vmware.com/t5/vRealize-Orchestrator/unable-to-run-get-DnsServerResourceRecord-via-vRO-Powershell-wf/m-p/2286685). + +I eventually came across [this blog post](https://www.virtualnebula.com/blog/2017/7/14/microsoft-ad-dns-integration-over-ssh) which described adding a Windows machine as a remote *SSH* host instead. I'll deviate a bit from the described configuration, but that post did at least get me pointed in the right direction. This approach would get around the complicated authentication-tunneling business while still being pretty easy to set up. So let's go! + +### Preparing the SSH host +I deployed a Windows Server 2019 Core VM to use as my SSH host, and I joined it to my AD domain as `win02.lab.bowdre.net`. Once that's taken care of, I need to install the RSAT DNS tools so that I can use the `Add-DnsServerResourceRecord` and associated cmdlets. I can do that through PowerShell like so: +```powershell +# Install RSAT DNS tools +Add-WindowsCapability -online -name Rsat.Dns.Tools~~~~0.0.1.0 +``` + +Instead of using a third-party SSH server, I'll use the OpenSSH Server that's already available in Windows 10 (1809+) and Server 2019: +```powershell +# Install OpenSSH Server +Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0 +``` + +I'll also want to set it so that the default shell upon SSH login is PowerShell (rather than the standard Command Prompt) so that I can have easy access to those DNS cmdlets: +```powershell +# Set PowerShell as the default Shell (for access to DNS cmdlets) +New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" -Name DefaultShell -Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" -PropertyType String -Force +``` + +I'll be using my `lab\vra` service account for managing DNS. I've already given it the appropriate rights on the DNS server, but I'll also add it to the Administrators group on my SSH host: +```powershell +# Add the service account as a local administrator +Add-LocalGroupMember -Group Administrators -Member "lab\vra" +``` + +And I'll modify the OpenSSH configuration so that only members of that Administrators group are permitted to log into the server via SSH: +```powershell +# Restrict SSH access to members in the local Administrators group +(Get-Content "C:\ProgramData\ssh\sshd_config") -Replace "# Authentication:", "$&`nAllowGroups Administrators" | Set-Content "C:\ProgramData\ssh\sshd_config" +``` + +Finally, I'll start the `sshd` service and set it to start up automatically: +```powershell +# Start service and set it to automatic +Set-Service -Name sshd -StartupType Automatic -Status Running +``` + +#### A quick test +At this point, I can log in to the server via SSH and confirm that I can create and delete records in my DNS zone: +```powershell +$ ssh vra@win02.lab.bowdre.net +vra@win02.lab.bowdre.net's password: + +Windows PowerShell +Copyright (C) Microsoft Corporation. All rights reserved. + +PS C:\Users\vra> Add-DnsServerResourceRecordA -ComputerName win01.lab.bowdre.net -Name testy -ZoneName lab.bowdre.net -AllowUpdateAny -IPv4Address 172.16.99.99 + +PS C:\Users\vra> nslookup testy +Server: win01.lab.bowdre.net +Address: 192.168.1.5 + +Name: testy.lab.bowdre.net +Address: 172.16.99.99 + +PS C:\Users\vra> Remove-DnsServerResourceRecord -ComputerName win01.lab.bowdre.net -Name testy -ZoneName lab.bowdre.net -RRType A -Force + +PS C:\Users\vra> nslookup testy +Server: win01.lab.bowdre.net +Address: 192.168.1.5 + +*** win01.lab.bowdre.net can't find testy: Non-existent domain +``` + +Cool! Now I just need to do that same thing, but from vRealize Orchestrator. First, though, I'll update the template so the requester can choose whether or not a static record will get created. + +### Template changes +#### Cloud Template +Similar to the template changes I made for [optionally joining deployed servers to the Active Directory domain](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template), I'll just be adding a simple boolean checkbox to the `inputs` section of the template in Cloud Assembly: +```yaml +formatVersion: 1 +inputs: + [...] + staticDns: + title: Create static DNS record + type: boolean + default: false + [...] +``` + +*Unlike* the AD piece, in the `resources` section I'll just bind a custom property called `staticDns` to the input with the same name: +```yaml +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + [...] + staticDns: '${input.staticDns}' + [...] +``` + +So here's the complete cloud template that I've been working on: +```yaml +formatVersion: 1 +inputs: + site: + type: string + title: Site + enum: + - BOW + - DRE + image: + type: string + title: Operating System + oneOf: + - title: Windows Server 2019 + const: ws2019 + default: ws2019 + size: + title: Resource Size + type: string + oneOf: + - title: 'Micro [1vCPU|1GB]' + const: micro + - title: 'Tiny [1vCPU|2GB]' + const: tiny + - title: 'Small [2vCPU|2GB]' + const: small + default: small + network: + title: Network + type: string + adJoin: + title: Join to AD domain + type: boolean + default: true + staticDns: + title: Create static DNS record + type: boolean + default: false + environment: + type: string + title: Environment + oneOf: + - title: Development + const: D + - title: Testing + const: T + - title: Production + const: P + default: D + function: + type: string + title: Function Code + oneOf: + - title: Application (APP) + const: APP + - title: Desktop (DSK) + const: DSK + - title: Network (NET) + const: NET + - title: Service (SVS) + const: SVS + - title: Testing (TST) + const: TST + default: TST + app: + type: string + title: Application Code + minLength: 3 + maxLength: 3 + default: xxx + description: + type: string + title: Description + description: Server function/purpose + default: Testing and evaluation + poc_name: + type: string + title: Point of Contact Name + default: Jack Shephard + poc_email: + type: string + title: Point of Contact Email + default: jack.shephard@virtuallypotato.com + pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$' + ticket: + type: string + title: Ticket/Request Number + default: 4815162342 +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + site: '${input.site}' + environment: '${input.environment}' + function: '${input.function}' + app: '${input.app}' + ignoreActiveDirectory: '${!input.adJoin}' + activeDirectory: + relativeDN: '${"OU=Servers,OU=Computers,OU=" + input.site + ",OU=LAB"}' + customizationSpec: '${input.adJoin ? "vra-win-domain" : "vra-win-workgroup"}' + staticDns: '${input.staticDns}' + dnsDomain: lab.bowdre.net + poc: '${input.poc_name + " (" + input.poc_email + ")"}' + ticket: '${input.ticket}' + description: '${input.description}' + networks: + - network: '${resource.Cloud_vSphere_Network_1.id}' + assignment: static + constraints: + - tag: 'comp:${to_lower(input.site)}' + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: + networkType: existing + constraints: + - tag: 'net:${input.network}' +``` +I save the template, and then also hit the "Version" button to publish a new version to the catalog: +![Releasing new version](20210803_new_template_version.png) + +#### Service Broker Custom Form +I switch over to the Service Broker UI to update the custom form - but first I stop off at **Content & Policies > Content Sources**, select my Content Source, and hit the **Save & Import** button to force a sync of the cloud templates. I can then move on to the **Content & Policies > Content** section, click the 3-dot menu next to my template name, and select the option to **Customize Form**. + +I'll just drag the new Schema Element called `Create static DNS record` from the Request Inputs panel and on to the form canvas. I'll drop it right below the `Join to AD domain` field: +![Adding the field to the form](20210803_updating_custom_form.png) + +And then I'll hit the **Save** button so that my efforts are preserved. + +That should take care of the front-end changes. Now for the back-end stuff: I need to teach vRO how to connect to my SSH host and run the PowerShell commands, [just like I tested earlier](#a-quick-test). + + +### The vRO solution +I will be adding the DNS action on to my existing "VM Post-Provisioning" workflow (described [here](/adding-vm-notes-and-custom-attributes-with-vra8), which gets triggered after the VM has been successfully deployed. + +#### Configuration Element +But first, I'm going to go to the **Assets > Configurations** section of the Orchestrator UI and create a new Configuration Element to store variables related to the SSH host and DNS configuration. +![Create a new configuration](Go3D-gemP.png) + +I'll call it `dnsConfig` and put it in my `CustomProvisioning` folder. +![Giving it a name](fJswso9KH.png) + +And then I create the following variables: + +| Variable | Value | Type | +| --- | --- | --- | +| `sshHost` | `win02.lab.bowdre.net` | string | +| `sshUser` | `vra` | string | +| `sshPass` | `*****` | secureString | +| `dnsServer` | `[win01.lab.bowdre.net]` | Array/string | +| `supportedDomains` | `[lab.bowdre.net]` | Array/string | + +`sshHost` is my new `win02` server that I'm going to connect to via SSH, and `sshUser` and `sshPass` should explain themselves. The `dnsServer` array will tell the script which DNS servers to try to create the record on; this will just be a single server in my lab, but I'm going to construct the script to support multiple servers in case one isn't reachable. And `supported domains` will be used to restrict where I'll be creating records; again, that's just a single domain in my lab, but I'm building this solution to account for the possibility where a VM might need to be deployed on a domain where I can't create a static record in this way so I want it to fail elegantly. + +Here's what the new configuration element looks like: +![Variables defined](a5gtUrQbc.png) + +#### Workflow to create records +I'll need to tell my workflow about the variables held in the `dnsConfig` Configuration Element I just created. I do that by opening the "VM Post-Provisioning" workflow in the vRO UI, clicking the **Edit** button, and then switching to the **Variables** tab. I create a variable for each member of `dnsConfig`, and enable the toggle to *Bind to configuration* so that I can select the corresponding item. It's important to make sure that the variable type exactly matches what's in the configuration element so that you'll be able to pick it! +![Linking variable to config element](20210809_creating_bound_variable.png) + +I repeat that for each of the remaining variables until all the members of `dnsConfig` are represented in the workflow: +![Variables added](20210809_variables_added.png) + +Now we're ready for the good part: inserting a new scriptable task into the workflow schema. I'll called it `Create DNS Record` and place it directly after the `Set Notes` task. For inputs, the task will take in `inputProperties (Properties)` as well as everything from that `dnsConfig` configuration element: +![Task inputs](20210809_task_inputs.png) + +And here's the JavaScript for the task: +```js +// JavaScript: Create DNS Record task +// Inputs: inputProperties (Properties), dnsServers (Array/string), sshHost (string), sshUser (string), sshPass (secureString), supportedDomains (Array/string) +// Outputs: None + +var staticDns = inputProperties.customProperties.staticDns; +var hostname = inputProperties.resourceNames[0]; +var dnsDomain = inputProperties.customProperties.dnsDomain; +var ipAddress = inputProperties.addresses[0]; +var created = false; + +// check if user requested a record to be created and if the VM's dnsDomain is in the supportedDomains array +if (staticDns == "true" && supportedDomains.indexOf(dnsDomain) >= 0) { + System.log("Attempting to create DNS record for "+hostname+"."+dnsDomain+" at "+ipAddress+"...") + // create the ssh session to the intermediary host + var sshSession = new SSHSession(sshHost, sshUser); + System.debug("Connecting to "+sshHost+"...") + sshSession.connectWithPassword(sshPass) + // loop through DNS servers in case the first one doesn't respond + for each (var dnsServer in dnsServers) { + if (created == false) { + System.debug("Using DNS Server "+dnsServer+"...") + // insert the PowerShell command to create A record + var sshCommand = 'Add-DnsServerResourceRecordA -ComputerName '+dnsServer+' -ZoneName '+dnsDomain+' -Name '+hostname+' -AllowUpdateAny -IPv4Address '+ipAddress; + System.debug("sshCommand: "+sshCommand) + // run the command and check the result + sshSession.executeCommand(sshCommand, true) + var result = sshSession.exitCode; + if (result == 0) { + System.log("Successfully created DNS record!") + // make a note that it was successful so we don't repeat this unnecessarily + created = true; + } + } + } + sshSession.disconnect() + if (created == false) { + System.warn("Error! Unable to create DNS record.") + } +} else { + System.log("Not trying to do DNS") +} +``` + +Now I can just save the workflow, and I'm done! - with this part. Of course, being able to *create* a static record is just one half of the fight; I also need to make sure that vRA will be able to clean up these static records when a deployment gets deleted. + +#### Workflow to delete records +I haven't previously created any workflows that fire on deployment removal, so I'll create a new one and call it `VM Deprovisioning`: +![New workflow](20210811_new_workflow.png) + +This workflow only needs a single input (`inputProperties (Properties)`) so it can receive information about the deployment from vRA: +![Workflow input](20210811_inputproperties.png) + +I'll also need to bind in the variables from the `dnsConfig` element as before: +![Workflow variables](20210812_deprovision_variables.png) + +The schema will include a single scriptable task: +![Delete DNS Record task](20210812_delete_dns_record_task.png) + +And it's going to be *pretty damn similar* to the other one: + +```js +// JavaScript: Delete DNS Record task +// Inputs: inputProperties (Properties), dnsServers (Array/string), sshHost (string), sshUser (string), sshPass (secureString), supportedDomains (Array/string) +// Outputs: None + +var staticDns = inputProperties.customProperties.staticDns; +var hostname = inputProperties.resourceNames[0]; +var dnsDomain = inputProperties.customProperties.dnsDomain; +var ipAddress = inputProperties.addresses[0]; +var deleted = false; + +// check if user requested a record to be created and if the VM's dnsDomain is in the supportedDomains array +if (staticDns == "true" && supportedDomains.indexOf(dnsDomain) >= 0) { + System.log("Attempting to remove DNS record for "+hostname+"."+dnsDomain+" at "+ipAddress+"...") + // create the ssh session to the intermediary host + var sshSession = new SSHSession(sshHost, sshUser); + System.debug("Connecting to "+sshHost+"...") + sshSession.connectWithPassword(sshPass) + // loop through DNS servers in case the first one doesn't respond + for each (var dnsServer in dnsServers) { + if (deleted == false) { + System.debug("Using DNS Server "+dnsServer+"...") + // insert the PowerShell command to delete A record + var sshCommand = 'Remove-DnsServerResourceRecord -ComputerName '+dnsServer+' -ZoneName '+dnsDomain+' -RRType A -Name '+hostname+' -Force'; + System.debug("sshCommand: "+sshCommand) + // run the command and check the result + sshSession.executeCommand(sshCommand, true) + var result = sshSession.exitCode; + if (result == 0) { + System.log("Successfully deleted DNS record!") + // make a note that it was successful so we don't repeat this unnecessarily + deleted = true; + } + } + } + sshSession.disconnect() + if (deleted == false) { + System.warn("Error! Unable to delete DNS record.") + } +} else { + System.log("No need to clean up DNS.") +} +``` + +Since this is a new workflow, I'll also need to head back to **Cloud Assembly > Extensibility > Subscriptions** and add a new subscription to call it when a deployment gets deleted. I'll call it "VM Deprovisioning", assign it to the "Compute Post Removal" Event Topic, and link it to my new "VM Deprovisioning" workflow. I *could* use the Condition option to filter this only for deployments which had a static DNS record created, but I'll later want to use this same workflow for other cleanup tasks so I'll just save it as is for now. +![VM Deprovisioning subscription](20210812_deprovisioning_subscription.png) + +### Testing +Now I can (finally) fire off a quick deployment to see if all this mess actually works: +![Test deploy request](20210812_test_deploy_request.png) + +Once the deployment completes, I go back into vRO, find the most recent item in the **Workflow Runs** view, and click over to the **Logs** tab to see how I did: +![Workflow success!](20210813_workflow_success.png) + +And I can run a quick query to make sure that name actually resolves: +```shell +❯ dig +short bow-ttst-xxx023.lab.bowdre.net A +172.16.30.10 +``` + +It works! + +Now to test the cleanup. For that, I'll head back to Service Broker, navigate to the **Deployments** tab, find my deployment, click the little three-dot menu button, and select the **Delete** option: +![Deleting the deployment](20210813_delete_deployment.png) + +Again, I'll check the **Workflow Runs** in vRO to see that the deprovisioning task completed successfully: +![VM Deprovisioning workflow](20210813_workflow_deletion.png) + +And I can `dig` a little more to make sure the name doesn't resolve anymore: +```shell +❯ dig +short bow-ttst-xxx023.lab.bowdre.net A + +``` + +It *really* works! + +### Conclusion +So there you have it - how I've got vRA/vRO able to create and delete static DNS records as needed, using a Windows SSH host as an intermediary. Cool, right? \ No newline at end of file diff --git a/content/post/docker-on-windows-10-with-wsl2/8p-PSHx1R.png b/content/post/docker-on-windows-10-with-wsl2/8p-PSHx1R.png new file mode 100644 index 0000000..9d91e70 Binary files /dev/null and b/content/post/docker-on-windows-10-with-wsl2/8p-PSHx1R.png differ diff --git a/content/post/docker-on-windows-10-with-wsl2/index.md b/content/post/docker-on-windows-10-with-wsl2/index.md new file mode 100644 index 0000000..caa93b8 --- /dev/null +++ b/content/post/docker-on-windows-10-with-wsl2/index.md @@ -0,0 +1,83 @@ +--- +date: "2020-09-22T08:34:30Z" +thumbnail: 8p-PSHx1R.png +usePageBundles: true +tags: +- docker +- windows +- wsl +- containers +title: Docker on Windows 10 with WSL2 +--- + +Microsoft's Windows Subsystem for Linux (WSL) 2 [was recently updated](https://devblogs.microsoft.com/commandline/wsl-2-support-is-coming-to-windows-10-versions-1903-and-1909/) to bring support for less-bleeding-edge Windows 10 versions (like 1903 and 1909). WSL2 is a big improvement over the first iteration (particularly with [better Docker support](https://www.docker.com/blog/docker-desktop-wsl-2-backport-update/)) so I was really looking forward to getting WSL2 loaded up on my work laptop. + +Here's how. + +### WSL2 + +#### Step Zero: Prereqs +You'll need Windows 10 1903 build 18362 or newer (on x64). You can check by running `ver` from a Command Prompt: +```powershell +C:\> ver +Microsoft Windows [Version 10.0.18363.1082] +``` +We're interested in that third set of numbers. 18363 is bigger than 18362 so we're good to go! + +#### Step One: Enable the WSL feature +*(Not needed if you've already been using WSL1.)* +You can do this by dropping the following into an elevated Powershell prompt: +```powershell +dism.exe /online /enable-feature /featurename:Microsoft-Windows-Subsystem-Linux /all /norestart +``` + +#### Step Two: Enable the Virtual Machine Platform feature +Drop this in an elevated Powershell: +```powershell +dism.exe /online /enable-feature /featurename:VirtualMachinePlatform /all /norestart +``` +And then reboot (this is still Windows, after all). + +#### Step Three: Install the WSL2 kernel update package +Download it from [here](https://wslstorestorage.blob.core.windows.net/wslblob/wsl_update_x64.msi), and double-click the downloaded file to install it. + +#### Step Four: Set WSL2 as your default +Open a Powershell window and run: +```powershell +wsl --set-default-version 2 +``` + +#### Step Five: Install a Linux distro, or upgrade an existing one +If you're brand new to this WSL thing, head over to the [Microsoft Store](https://aka.ms/wslstore) and download your favorite Linux distribution. Once it's installed, launch it and you'll be prompted to set up a Linux username and password. + +If you've already got a WSL1 distro installed, first run `wsl -l -v` in Powershell to make sure you know the distro name: +```powershell +PS C:\Users\jbowdre> wsl -l -v + NAME STATE VERSION +* Debian Running 2 +``` +And then upgrade the distro to WSL2 with `wsl --set-version 2`: +```powershell +PS C:\Users\jbowdre> wsl --set-version Debian 2 +Conversion in progress, this may take a few minutes... +``` +Cool! + +### Docker +#### Step One: Download +Download Docker Desktop for Windows from [here](https://hub.docker.com/editions/community/docker-ce-desktop-windows/), making sure to grab the "Edge" version since it includes support for the backported WSL2 bits. + +#### Step Two: Install +Run the installer, and make sure to tick the box for installing the WSL2 engine. + +#### Step Three: Configure Docker Desktop +Launch Docker Desktop from the Start menu, and you should be presented with this friendly prompt: +![Great news! We're supported.](lY2FTflbK.png) + +Hit that big friendly "gimme WSL2" button. Then open the Docker Settings from the system tray, and make sure that **General > Use the WSL 2 based engine** is enabled. Now navigate to **Resources > WSL Integration**, confirm that **Enable integration with my default WSL distro** is enabled as well. Smash the "Apply & Restart" button if you've made any changes. + +### Test it! +Fire up a WSL session and confirm that everything is working with `docker run hello-world`: +![Hello, world!](8p-PSHx1R.png) + +It's beautiful! \ No newline at end of file diff --git a/content/post/docker-on-windows-10-with-wsl2/lY2FTflbK.png b/content/post/docker-on-windows-10-with-wsl2/lY2FTflbK.png new file mode 100644 index 0000000..585e3dc Binary files /dev/null and b/content/post/docker-on-windows-10-with-wsl2/lY2FTflbK.png differ diff --git a/content/post/enable-tanzu-cli-auto-completion-bash-zsh/index.md b/content/post/enable-tanzu-cli-auto-completion-bash-zsh/index.md new file mode 100644 index 0000000..1d3e329 --- /dev/null +++ b/content/post/enable-tanzu-cli-auto-completion-bash-zsh/index.md @@ -0,0 +1,67 @@ +--- +title: "Enable Tanzu CLI Auto-Completion in bash and zsh" # Title of the blog post. +date: 2022-02-01T08:34:47-06:00 # Date of post creation. +# lastmod: 2022-02-01T08:34:47-06:00 # Date when last modified +description: "How to configure your Linux shell to help you do the Tanzu" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: false # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "tanzu-completion.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "tanzu-completion.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips +tags: + - vmware + - linux + - tanzu + - kubernetes + - shell +comment: true # Disable comment if false. +--- + +Lately I've been spending some time [getting more familiar](/tanzu-community-edition-k8s-homelab/) with VMware's [Tanzu Community Edition](https://tanzucommunityedition.io/) Kubernetes distribution, but I'm still not quite familiar enough with the `tanzu` command line. If only there were a better way for me to discover the available commands for a given context and help me type them correctly... + +Oh, but there is! You see, one of the available Tanzu commands is `tanzu completion [shell]`, which will spit out the necessary code to generate handy context-based auto-completions appropriate for the shell of your choosing (provided that you choose either `bash` or `zsh`, that is). + +Running `tanzu completion --help` will tell you what's needed, and you can just copy/paste the commands appropriate for your shell: + +```shell +# Bash instructions: + + ## Load only for current session: + source <(tanzu completion bash) + + ## Load for all new sessions: + tanzu completion bash > $HOME/.tanzu/completion.bash.inc + printf "\n# Tanzu shell completion\nsource '$HOME/.tanzu/completion.bash.inc'\n" >> $HOME/.bash_profile + +# Zsh instructions: + + ## Load only for current session: + source <(tanzu completion zsh) + + ## Load for all new sessions: + echo "autoload -U compinit; compinit" >> ~/.zshrc + tanzu completion zsh > "${fpath[1]}/_tanzu" +``` + +So to get the completions to load automatically whenever you start a `bash` shell, run: +```shell +tanzu completion bash > $HOME/.tanzu/completion.bash.inc +printf "\n# Tanzu shell completion\nsource '$HOME/.tanzu/completion.bash.inc'\n" >> $HOME/.bash_profile +``` + +For a `zsh` shell, it's: +```shell +echo "autoload -U compinit; compinit" >> ~/.zshrc +tanzu completion zsh > "${fpath[1]}/_tanzu" +``` + +And that's it! The next time you open a shell (or `source` your relevant profile), you'll be able to `[TAB]` your way through the Tanzu CLI! + +![Tanzu CLI completion in zsh](tanzu-completion.gif) diff --git a/content/post/enable-tanzu-cli-auto-completion-bash-zsh/tanzu-completion.gif b/content/post/enable-tanzu-cli-auto-completion-bash-zsh/tanzu-completion.gif new file mode 100644 index 0000000..de48e34 Binary files /dev/null and b/content/post/enable-tanzu-cli-auto-completion-bash-zsh/tanzu-completion.gif differ diff --git a/content/post/enable-tanzu-cli-auto-completion-bash-zsh/tanzu-completion.png b/content/post/enable-tanzu-cli-auto-completion-bash-zsh/tanzu-completion.png new file mode 100644 index 0000000..77923d5 Binary files /dev/null and b/content/post/enable-tanzu-cli-auto-completion-bash-zsh/tanzu-completion.png differ diff --git a/content/post/esxi-arm-on-quartz64/add_host.png b/content/post/esxi-arm-on-quartz64/add_host.png new file mode 100644 index 0000000..6456018 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/add_host.png differ diff --git a/content/post/esxi-arm-on-quartz64/add_host_confirm.png b/content/post/esxi-arm-on-quartz64/add_host_confirm.png new file mode 100644 index 0000000..d9cf2f7 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/add_host_confirm.png differ diff --git a/content/post/esxi-arm-on-quartz64/advertised_subnets.png b/content/post/esxi-arm-on-quartz64/advertised_subnets.png new file mode 100644 index 0000000..41b4bce Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/advertised_subnets.png differ diff --git a/content/post/esxi-arm-on-quartz64/beagle_term_settings.png b/content/post/esxi-arm-on-quartz64/beagle_term_settings.png new file mode 100644 index 0000000..8afdc39 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/beagle_term_settings.png differ diff --git a/content/post/esxi-arm-on-quartz64/bios.png b/content/post/esxi-arm-on-quartz64/bios.png new file mode 100644 index 0000000..62a77e9 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/bios.png differ diff --git a/content/post/esxi-arm-on-quartz64/console_connection.jpg b/content/post/esxi-arm-on-quartz64/console_connection.jpg new file mode 100644 index 0000000..2fe3600 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/console_connection.jpg differ diff --git a/content/post/esxi-arm-on-quartz64/correct_time.png b/content/post/esxi-arm-on-quartz64/correct_time.png new file mode 100644 index 0000000..ed98193 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/correct_time.png differ diff --git a/content/post/esxi-arm-on-quartz64/dcui.png b/content/post/esxi-arm-on-quartz64/dcui.png new file mode 100644 index 0000000..2d6c4fe Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/dcui.png differ diff --git a/content/post/esxi-arm-on-quartz64/dcui_dns.png b/content/post/esxi-arm-on-quartz64/dcui_dns.png new file mode 100644 index 0000000..dcc5996 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/dcui_dns.png differ diff --git a/content/post/esxi-arm-on-quartz64/dcui_ip_address.png b/content/post/esxi-arm-on-quartz64/dcui_ip_address.png new file mode 100644 index 0000000..5be169a Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/dcui_ip_address.png differ diff --git a/content/post/esxi-arm-on-quartz64/dcui_system_customization.png b/content/post/esxi-arm-on-quartz64/dcui_system_customization.png new file mode 100644 index 0000000..978cf7c Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/dcui_system_customization.png differ diff --git a/content/post/esxi-arm-on-quartz64/deploy_from_url.png b/content/post/esxi-arm-on-quartz64/deploy_from_url.png new file mode 100644 index 0000000..9e292d5 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/deploy_from_url.png differ diff --git a/content/post/esxi-arm-on-quartz64/disabling_subnet_on_vyos.png b/content/post/esxi-arm-on-quartz64/disabling_subnet_on_vyos.png new file mode 100644 index 0000000..ef7a5cb Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/disabling_subnet_on_vyos.png differ diff --git a/content/post/esxi-arm-on-quartz64/embedded_host_client_login.png b/content/post/esxi-arm-on-quartz64/embedded_host_client_login.png new file mode 100644 index 0000000..d4efe1c Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/embedded_host_client_login.png differ diff --git a/content/post/esxi-arm-on-quartz64/embedded_host_client_summary.png b/content/post/esxi-arm-on-quartz64/embedded_host_client_summary.png new file mode 100644 index 0000000..098f258 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/embedded_host_client_summary.png differ diff --git a/content/post/esxi-arm-on-quartz64/enabling_subnet_on_pho01.png b/content/post/esxi-arm-on-quartz64/enabling_subnet_on_pho01.png new file mode 100644 index 0000000..8da296c Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/enabling_subnet_on_pho01.png differ diff --git a/content/post/esxi-arm-on-quartz64/enclosure.jpg b/content/post/esxi-arm-on-quartz64/enclosure.jpg new file mode 100644 index 0000000..ccb72dd Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/enclosure.jpg differ diff --git a/content/post/esxi-arm-on-quartz64/esxi_install_1.png b/content/post/esxi-arm-on-quartz64/esxi_install_1.png new file mode 100644 index 0000000..2be96a0 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/esxi_install_1.png differ diff --git a/content/post/esxi-arm-on-quartz64/esxi_install_2.png b/content/post/esxi-arm-on-quartz64/esxi_install_2.png new file mode 100644 index 0000000..102393b Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/esxi_install_2.png differ diff --git a/content/post/esxi-arm-on-quartz64/esxi_install_3.png b/content/post/esxi-arm-on-quartz64/esxi_install_3.png new file mode 100644 index 0000000..04f9026 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/esxi_install_3.png differ diff --git a/content/post/esxi-arm-on-quartz64/esxi_install_4.png b/content/post/esxi-arm-on-quartz64/esxi_install_4.png new file mode 100644 index 0000000..8aa273f Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/esxi_install_4.png differ diff --git a/content/post/esxi-arm-on-quartz64/first_login.png b/content/post/esxi-arm-on-quartz64/first_login.png new file mode 100644 index 0000000..85f9535 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/first_login.png differ diff --git a/content/post/esxi-arm-on-quartz64/hooked_up.jpg b/content/post/esxi-arm-on-quartz64/hooked_up.jpg new file mode 100644 index 0000000..ef994d3 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/hooked_up.jpg differ diff --git a/content/post/esxi-arm-on-quartz64/host_added.png b/content/post/esxi-arm-on-quartz64/host_added.png new file mode 100644 index 0000000..2d011c6 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/host_added.png differ diff --git a/content/post/esxi-arm-on-quartz64/index.md b/content/post/esxi-arm-on-quartz64/index.md new file mode 100644 index 0000000..786c9ca --- /dev/null +++ b/content/post/esxi-arm-on-quartz64/index.md @@ -0,0 +1,416 @@ +--- +title: "ESXi ARM Edition on the Quartz64 SBC" # Title of the blog post. +date: 2022-04-23 # Date of post creation. +lastmod: 2022-12-14 +description: "Getting started with the experimental ESXi Arm Edition fling to run a VMware hypervisor on the PINE64 Quartz64 single-board computer, and installing a Tailscale node on Photon OS to facilitate improved remote access to my home network." # Description used for search engine. +featured: true # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +featureImage: "quartz64.jpg" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "quartz64.jpg" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Projects +tags: + - vmware + - linux + - chromeos + - homelab + - tailscale + - photon + - vpn +comment: true # Disable comment if false. +--- +{{% notice info "ESXi-ARM Fling v1.10 Update" %}} +On July 20, 2022, VMware released a [major update](https://blogs.vmware.com/arm/2022/07/20/1-10/) for the ESXi-ARM Fling. Among [other fixes and improvements](https://flings.vmware.com/esxi-arm-edition#changelog), this version enables **in-place ESXi upgrades** and [adds support for the Quartz64's **on-board NIC**](https://twitter.com/jmcwhatever/status/1549935971822706688). To update, I: +1. Wrote the new ISO installer to another USB drive. +2. Attached the installer drive to the USB hub, next to the existing ESXi drive. +3. Booted the installer and selected to upgrade ESXi on the existing device. +4. Powered-off post-install, unplugged the hub, and attached the ESXi drive directly to the USB2 port on the Quart64. +5. Connected the ethernet cable to the onboard NIC. +6. Booted to ESXi. +7. Once booted, I used the DCUI to (re)configure the management network and activate the onboard network adapter. + +Now I've got directly-attached USB storage, and the onboard NIC provides gigabit connectivity. I've made a few tweaks to the rest of the article to reflect the lifting of those previous limitations. +{{% /notice %}} + +Up until this point, [my homelab](/vmware-home-lab-on-intel-nuc-9/) has consisted of just a single Intel NUC9 ESXi host running a bunch of VMs. It's served me well but lately I've been thinking that it would be good to have an additional host for some of my workloads. In particular, I'd like to have a [Tailscale node](/secure-networking-made-simple-with-tailscale/) on my home network which _isn't_ hosted on the NUC so that I can patch ESXi remotely without cutting off my access. I appreciate the small footprint of the NUC so I'm not really interested in a large "grown-up" server at this time. So for now I thought it might be fun to experiment with [VMware's ESXi on ARM fling](https://flings.vmware.com/esxi-arm-edition) which makes it possible to run a full-fledged VMWare hypervisor on a Raspbery Pi. + +Of course, I decided to embark upon this project at a time when Raspberry Pis are basically impossible to get. So instead I picked up a [PINE64 Quartz64](https://wiki.pine64.org/wiki/Quartz64) single-board computer (SBC) which seems like a potentially very-capable piece of hardware.... but there is a prominent warning at the bottom of the [store page](https://pine64.com/product/quartz64-model-a-8gb-single-board-computer/): + +{{% notice warning "Be Advised" %}} +"The Quartz64 SBC still in early development stage, only suitable for developers and advanced users wishing to contribute to early software development. Both mainline and Rockchip’s BSP fork of Linux have already been booted on the platform and development is proceeding quickly, but it will be months before end-users and industry partners can reliably deploy it. If you need a single board computer for a private or industrial application today, we encourage you to choose a different board from our existing lineup or wait a few months until Quartz64 software reaches a sufficient degree of maturity." +{{% /notice %}} + +More specifically, for my use case there will be a number of limitations (at least for now - this SBC is still pretty new to the market so hopefully support will be improving further over time): +- ~~The onboard NIC is not supported by ESXi.~~[^v1.10] +- Onboard storage (via eMMC, eSATA, or PCIe) is not supported. +- The onboard microSD slot is only used for loading firmware on boot, not for any other storage. +- Only two (of the four) USB ports are documented to work reliably. +- Of the remaining two ports, the lower USB3 port [shouldn't be depended upon either](https://wiki.pine64.org/wiki/Quartz64_Development#Confirmed_Broken) so I'm really just stuck with a single USB2 interface ~~which will need to handle both networking and storage~~[^v1.10].[^usb3] + +All that is to say that (as usual) I'll be embarking upon this project in Hard Mode - and I'll make it extra challenging (as usual) by doing all of the work from a Chromebook. In any case, here's how I managed to get ESXi running on the the Quartz64 SBC and then deploy a small workload. + +[^usb3]: Jared McNeill, the maintainer of the firmware image I'm using *just* [pushed a commit](https://github.com/jaredmcneill/quartz64_uefi/commit/4bda76e9fce5ed153ac49fa9d51ff34e5dd56d52) which sounds like it may address this flaky USB3 issue but that was after I had gotten everything else working as described below. I'll check that out once a new release gets published. + +[^v1.10]: Fixed in the v1.10 release. +### Bill of Materials +Let's start with the gear (hardware and software) I needed to make this work: + +| Hardware | Purpose | +| --- | --- | +| [PINE64 Quartz64 Model-A 8GB Single Board Computer](https://pine64.com/product/quartz64-model-a-8gb-single-board-computer/) | kind of the whole point | +| [ROCKPro64 12V 5A US Power Supply](https://pine64.com/product/rockpro64-12v-5a-us-power-supply/) | provies power for the the SBC | +| [Serial Console “Woodpecker” Edition](https://pine64.com/product/serial-console-woodpecker-edition/) | allows for serial console access | +| [Google USB-C Adapter](https://www.amazon.com/dp/B071G6NLHJ/) | connects the console adapter to my Chromebook | +| [Sandisk 64GB Micro SD Memory Card](https://www.amazon.com/dp/B00M55C1I2) | only holds the firmware; a much smaller size would be fine | +| [Monoprice USB-C MicroSD Reader](https://www.amazon.com/dp/B00YQM8352/) | to write firmware to the SD card from my Chromebook | +| [Samsung MUF-256AB/AM FIT Plus 256GB USB 3.1 Drive](https://www.amazon.com/dp/B07D7Q41PM) | ESXi boot device and local VMFS datastore | +| ~~[Cable Matters 3 Port USB 3.0 Hub with Ethernet](https://www.amazon.com/gp/product/B01J6583NK)~~ | ~~for network connectivity and to host the above USB drive~~[^v1.10] | +| [3D-printed open enclosure for QUARTZ64](https://www.thingiverse.com/thing:5308499) | protect the board a little bit while allowing for plenty of passive airflow | + +| Downloads | Purpose | +| --- | --- | +| [ESXi ARM Edition](https://customerconnect.vmware.com/downloads/get-download?downloadGroup=ESXI-ARM) (v1.10) | hypervisor | +| [Tianocore EDK II firmware for Quartz64](https://github.com/jaredmcneill/quartz64_uefi/releases) (2022-07-20) | firmare image | +| [Chromebook Recovery Utility](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) | easy way to write filesystem images to external media | +| [Beagle Term](https://chrome.google.com/webstore/detail/beagle-term/gkdofhllgfohlddimiiildbgoggdpoea) | for accessing the Quartz64 serial console | + +### Preparation +#### Firmware media +The very first task is to write the required firmware image (download [here](https://github.com/jaredmcneill/quartz64_uefi/releases)) to a micro SD card. I used a 64GB card that I had lying around but you could easily get by with a *much* smaller one; the firmware image is tiny, and the card can't be used for storing anything else. Since I'm doing this on a Chromebook, I'll be using the [Chromebook Recovery Utility (CRU)](https://chrome.google.com/webstore/detail/chromebook-recovery-utili/pocpnlppkickgojjlmhdmidojbmbodfm) for writing the images to external storage as described [in another post](/burn-an-iso-to-usb-with-the-chromebook-recovery-utility/). + +After downloading [`QUARTZ64_EFI.img.gz`](https://github.com/jaredmcneill/quartz64_uefi/releases/download/2022-07-20/QUARTZ64_EFI.img.gz), I need to get it into a format recognized by CRU and, in this case, that means extracting the gzipped archive and then compressing the `.img` file into a standard `.zip`: +``` +gunzip QUARTZ64_EFI.img.gz +zip QUARTZ64_EFI.img.zip QUARTZ64_EFI.img +``` + +I can then write it to the micro SD card by opening CRU, clicking on the gear icon, and selecting the *Use local image* option. + +![Writing the firmware image](writing_firmware.png) + +#### ESXi installation media +I'll also need to prepare the ESXi installation media (download [here](https://customerconnect.vmware.com/downloads/get-download?downloadGroup=ESXI-ARM)). For that, I'll be using a 256GB USB drive. Due to the limited storage options on the Quartz64, I'll be installing ESXi onto the same drive I use to boot the installer so, in this case, the more storage the better. By default, ESXi 7.0 will consume up to 128GB for the new `ESX-OSData` partition; whatever is leftover will be made available as a VMFS datastore. That could be problematic given the unavailable/flaky USB support of the Quartz64. (While you *can* install ESXi onto a smaller drive, down to about ~20GB, the lack of additional storage on this hardware makes it pretty important to take advantage of as much space as you can.) + +In any case, to make the downloaded `VMware-VMvisor-Installer-7.0-20133114.aarch64.iso` writeable with CRU all I need to do is add `.bin` to the end of the filename: +``` +mv VMware-VMvisor-Installer-7.0-20133114.aarch64.iso{,.bin} +``` + +Then it's time to write the image onto the USB drive: +![Writing the ESXi installer image](writing_esxi.png) + + +#### Console connection +I'll need to use the Quartz64 serial console interface and ["Woodpecker" edition console USB adapter](https://pine64.com/product/serial-console-woodpecker-edition/) to interact with the board until I get ESXi installed and can connect to it with the web interface or SSH. The adapter comes with a short breakout cable, and I connect it thusly: + +| Quartz64 GPIO pin | Console adapter pin | Wire color | +| --- | --- | --- | +| 6 | `GND` | Brown | +| 8 | `RXD` | Red | +| 10 | `TXD` | Orange | + +I leave the yellow wire dangling free on both ends since I don't need a `+V` connection for the console to work. +![Console connection](console_connection.jpg) + +To verify that I've got things working, I go ahead and pop the micro SD card containing the firmware into its slot on the bottom side of the Quartz64 board, connect the USB console adapter to my Chromebook, and open the [Beagle Term](https://chrome.google.com/webstore/detail/beagle-term/gkdofhllgfohlddimiiildbgoggdpoea) app to set up the serial connection. + +I'll need to use these settings for the connection (which are the defaults selected by Beagle Term): + +| Setting | Value | +| -- | --- | +| Port | `/dev/ttyUSB0` | +| Bitrate | `115200` | +| Data Bit | `8 bit` | +| Parity | `none` | +| Stop Bit | `1` | +| Flow Control | `none` | + +![Beagle Term settings](beagle_term_settings.png) + +I hit **Connect** and then connect the Quartz64's power supply. I watch as it loads the firmware and then launches the BIOS menu: +![BIOS menu](bios.png) + +### Host creation +#### ESXi install +Now that I've got everything in order I can start the install. A lot of experimentation on my part confirmed the sad news about the USB ports: of the four USB ports, only the top-right USB2 port works reliably for me. So I connect my ~~USB NIC+hub to that port, and plug in my 256GB drive to the hub~~[^v1.10] 256GB USB drive there. This isn't ideal from a performance aspect, of course, but slow storage is more useful than no storage. + +On that note, remember what I mentioned earlier about how the ESXi installer would want to fill up ~128GB worth of whatever drive it targets? The ESXi ARM instructions say that you can get around that by passing the `autoPartitionOSDataSize` advanced option to the installer by pressing `[Shift] + O` in the ESXi bootloader, but the Quartz64-specific instructions say that you can't do that with this board since only the serial console is available... It turns out this is a (happy) lie. + +I hooked up a monitor to the board's HDMI port and a USB keyboard to a free port on the hub and verified that the keyboard let me maneuver through the BIOS menu. From here, I hit the **Reset** button on the Quartz64 to restart it and let it boot from the connected USB drive. When I got to the ESXi pre-boot countdown screen, I pressed `[Shift] + O` as instructed and added `autoPartitionOSDataSize=8192` to the boot options. This limits the size of the new-for-ESXi7 ESX-OSData VMFS-L volume to 8GB and will give me much more space for the local datastore. + +Beyond that it's a fairly typical ESXi install process: +![Hi, welcome to the ESXi for ARM installer. I'll be your UI this evening.](esxi_install_1.png) +![Just to be sure, I'm going to clobber everything on this USB drive.](esxi_install_2.png) +![Hold on to your butts, here we go!](esxi_install_3.png) +![Whew, we made it!](esxi_install_4.png) + +#### Initial configuration +After the installation completed, I rebooted the host and watched for the Direct Console User Interface (DCUI) to come up: +![ESXi DCUI](dcui.png) + +I hit `[F2]` and logged in with the root credentials to get to the System Customization menu: +![DCUI System Customization](dcui_system_customization.png) + +The host automatically received an IP issued by DHCP but I'd like for it to instead use a static IP. I'll also go ahead and configure the appropriate DNS settings. +![Setting the IP address](dcui_ip_address.png) +![Configuring DNS settings](dcui_dns.png) + +I also create the appropriate matching `A` and `PTR` records in my local DNS, and (after bouncing the management network) I can access the ESXi Embedded Host Client at `https://quartzhost.lab.bowdre.net`: +![ESXi Embedded Host Client login screen](embedded_host_client_login.png) +![Summary view of my new host!](embedded_host_client_summary.png) + +That's looking pretty good... but what's up with that date and time? Time has kind of lost all meaning in the last couple of years but I'm *reasonably* certain that January 1, 2001 was at least a few years ago. And I know from past experience that incorrect host time will prevent it from being successfully imported to a vCenter inventory. + +Let's clear that up by enabling the Network Time Protocol (NTP) service on this host. I'll do that by going to **Manage > System > Time & Date** and clicking the **Edit NTP Settings** button. I don't run a local NTP server so I'll point it at `pool.ntp.org` and set the service to start and stop with the host: +![NTP configuration](ntp_configuration.png) + +Now I hop over to the **Services** tab, select the `ntpd` service, and then click the **Start** button there. Once it's running, I then *restart* `ntpd` to help encourage the system to update the time immediately. +![Starting the NTP service](services.png) + +Once the service is started I can go back to **Manage > System > Time & Date**, click the **Refresh** button, and confirm that the host has been updated with the correct time: +![Correct time!](correct_time.png) + +With the time sorted, I'm just about ready to join this host to my vCenter, but first I'd like to take a look at the storage situation - after all, I did jump through those hoops with the installer to make sure that I would wind up with a useful local datastore. Upon going to **Storage > More storage > Devices** and clicking on the single listed storage device, I can see in the Partition Diagram that the ESX-OSData VMFS-L volume was indeed limited to 8GB, and the free space beyond that was automatically formatted as a VMFS datastore: +![Reviewing the partition diagram](storage_device.png) + +And I can also take a peek at that local datastore: +![Local datastore](storage_datastore.png) + +With 200+ gigabytes of free space on the datastore I should have ample room for a few lightweight VMs. + +#### Adding to vCenter +Alright, let's go ahead and bring the new host into my vCenter environment. That starts off just like any other host, by right-clicking an inventory location in the *Hosts & Clusters* view and selecting **Add Host**. +![Starting the process](add_host.png) + +![Reviewing the host details](add_host_confirm.png) + +![Successfully added to the vCenter](host_added.png) + +Success! I've now got a single-board hypervisor connected to my vCenter. Now let's give that host a workload.[^workloads] + +[^workloads]: Hosts *love* workloads. + +### Workload creation +As I mentioned earlier, my initial goal is to deploy a Tailscale node on my new host so that I can access my home network from outside of the single-host virtual lab environment. I've become a fan of using VMware's [Photon OS](https://vmware.github.io/photon/) so I'll get a VM deployed and then install the Tailscale agent. + +#### Deploying Photon OS +VMware provides Photon in a few different formats, as described on the [download page](https://github.com/vmware/photon/wiki/Downloading-Photon-OS). I'm going to use the "OVA with virtual hardware v13 arm64" version so I'll kick off that download of `photon_uefi.ova`. I'm actually going to download that file straight to my `deb01` Linux VM: +```shell +wget https://packages.vmware.com/photon/4.0/Rev2/ova/photon_uefi.ova +``` +and then spawn a quick Python web server to share it out: +```shell +❯ python3 -m http.server +Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ... +``` + +That will let me deploy from a resource already inside my lab network instead of transferring the OVA from my laptop. So now I can go back to my vSphere Client and go through the steps to **Deploy OVF Template** to the new host, and I'll plug in the URL `http://deb01.lab.bowdre.net:8000/photon_uefi.ova`: +![Deploying a template from URL](deploy_from_url.png) + +I'll name it `pho01` and drop it in an appropriate VM folder: +![Naming the new VM](name_vm.png) + +And place it on the new Quartz64 host: +![Host placement](vm_placement.png) + +The rest of the OVF deployment is basically just selecting the default options and clicking through to finish it. And then once it's deployed, I'll go ahead and power on the new VM. +![The newly-created Photon VM](new_vm.png) + +#### Configuring Photon +There are just a few things I'll want to configure on this VM before I move on to installing Tailscale, and I'll start out simply by logging in with the remote console. + +{{% notice info "Default credentials" %}} +The default password for Photon's `root` user is `changeme`. You'll be forced to change that at first login. +{{% /notice %}} + +![First login, and the requisite password change](first_login.png) + +Now that I'm in, I'll set the hostname appropriately: +```bash +hostnamectl set-hostname pho01 +``` + +For now, the VM pulled an IP from DHCP but I would like to configure that statically instead. To do that, I'll create a new interface file: +```bash +cat > /etc/systemd/network/10-static-en.network << "EOF" + +[Match] +Name = eth0 + +[Network] +Address = 192.168.1.17/24 +Gateway = 192.168.1.1 +DNS = 192.168.1.5 +DHCP = no +IPForward = yes + +EOF + +chmod 644 /etc/systemd/network/10-static-en.network +systemctl restart systemd-networkd +``` + +I'm including `IPForward = yes` to [enable IP forwarding](https://tailscale.com/kb/1104/enable-ip-forwarding/) for Tailscale. + +With networking sorted, it's probably a good idea to check for and apply any available updates: +```bash +tdnf update -y +``` + +I'll also go ahead and create a normal user account (with sudo privileges) for me to use: +```bash +useradd -G wheel -m john +passwd john +``` + +Now I can use SSH to connect to the VM and ditch the web console: +```bash +❯ ssh pho01.lab.bowdre.net +Password: +john@pho01 [ ~ ]$ sudo whoami + +We trust you have received the usual lecture from the local System +Administrator. It usually boils down to these three things: + + #1) Respect the privacy of others. + #2) Think before you type. + #3) With great power comes great responsibility. + +[sudo] password for john +root +``` + +Looking good! I'll now move on to the justification[^justification] for this entire exercise: + +[^justification]: Entirely arbitrary and fabricated justification. +#### Installing Tailscale +If I *weren't* doing this on hard mode, I could use Tailscale's [install script](https://tailscale.com/download) like I do on every other Linux system. Hard mode is what I do though, and the installer doesn't directly support Photon OS. I'll instead consult the [manual install instructions](https://tailscale.com/download/linux/static) which tell me to download the appropriate binaries from [https://pkgs.tailscale.com/stable/#static](https://pkgs.tailscale.com/stable/#static). So I'll grab the link for the latest `arm64` build and pull the down to the VM: + +```bash +curl https://pkgs.tailscale.com/stable/tailscale_1.22.2_arm64.tgz --output tailscale_arm64.tgz +``` + +Then I can unpack it: +```bash +sudo tdnf install tar +tar xvf tailscale_arm64.tgz +cd tailscale_1.22.2_arm64/ +``` + +So I've got the `tailscale` and `tailscaled` binaries as well as some sample service configs in the `systemd` directory: +```bash +john@pho01 [ ~/tailscale_1.22.2_arm64 ]$ +.: +total 32288 +drwxr-x--- 2 john users 4096 Mar 18 02:44 systemd +-rwxr-x--- 1 john users 12187139 Mar 18 02:44 tailscale +-rwxr-x--- 1 john users 20866538 Mar 18 02:44 tailscaled + +./systemd: +total 8 +-rw-r----- 1 john users 287 Mar 18 02:44 tailscaled.defaults +-rw-r----- 1 john users 674 Mar 18 02:44 tailscaled.service +``` + +Dealing with the binaries is straight-forward. I'll drop them into `/usr/bin/` and `/usr/sbin/` (respectively) and set the file permissions: +```bash +sudo install -m 755 tailscale /usr/bin/ +sudo install -m 755 tailscaled /usr/sbin/ +``` + +Then I'll descend to the `systemd` folder and see what's up: +```bash +john@pho01 [ ~/tailscale_1.22.2_arm64/ ]$ cd systemd/ + +john@pho01 [ ~/tailscale_1.22.2_arm64/systemd ]$ cat tailscaled.defaults +# Set the port to listen on for incoming VPN packets. +# Remote nodes will automatically be informed about the new port number, +# but you might want to configure this in order to set external firewall +# settings. +PORT="41641" + +# Extra flags you might want to pass to tailscaled. +FLAGS="" + +john@pho01 [ ~/tailscale_1.22.2_arm64/systemd ]$ cat tailscaled.service +[Unit] +Description=Tailscale node agent +Documentation=https://tailscale.com/kb/ +Wants=network-pre.target +After=network-pre.target NetworkManager.service systemd-resolved.service + +[Service] +EnvironmentFile=/etc/default/tailscaled +ExecStartPre=/usr/sbin/tailscaled --cleanup +ExecStart=/usr/sbin/tailscaled --state=/var/lib/tailscale/tailscaled.state --socket=/run/tailscale/tailscaled.sock --port $PORT $FLAGS +ExecStopPost=/usr/sbin/tailscaled --cleanup + +Restart=on-failure + +RuntimeDirectory=tailscale +RuntimeDirectoryMode=0755 +StateDirectory=tailscale +StateDirectoryMode=0700 +CacheDirectory=tailscale +CacheDirectoryMode=0750 +Type=notify + +[Install] +WantedBy=multi-user.target +``` + +`tailscaled.defaults` contains the default configuration that will be referenced by the service, and `tailscaled.service` tells me that it expects to find it at `/etc/defaults/tailscaled`. So I'll copy it there and set the perms: +```bash +sudo install -m 644 tailscaled.defaults /etc/defaults/tailscaled +``` + +`tailscaled.service` will get dropped in `/usr/lib/systemd/system/`: +```bash +sudo install -m 644 tailscaled.service /usr/lib/systemd/system/ +``` + +Then I'll enable the service and start it: +```bash +sudo systemctl enable tailscaled.service +sudo systemctl start tailscaled.service +``` + +And finally log in to Tailscale, including my `tag:home` tag for [ACL purposes](/secure-networking-made-simple-with-tailscale/#acls) and a route advertisement for my home network so that my other Tailscale nodes can use this one to access other devices as well: +```bash +sudo tailscale up --advertise-tags "tag:home" --advertise-route "192.168.1.0/24" +``` + +That will return a URL I can use to authenticate, and I'll then able to to view and manage the new Tailscale node from the `login.tailscale.com` admin portal: +![Success!](new_tailscale_node.png) + +You might remember [from last time](/secure-networking-made-simple-with-tailscale/#subnets-and-exit-nodes) that the "Subnets (!)" label indicates that this node is attempting to advertise a subnet route but that route hasn't yet been accepted through the admin portal. You may also remember that the `192.168.1.0/24` subnet is already being advertised by my `vyos` node:[^hassos] +![Actively-routed subnets show up black, while advertised-but-not-currently-routed subnets appear grey](advertised_subnets.png) + +Things could potentially get messy if I have two nodes advertising routes for the same subnet[^failover] so I'm going to use the admin portal to disable that route on `vyos` before enabling it for `pho01`. I'll let `vyos` continue to route the `172.16.0.0/16` subnet (which only exists inside the NUC's vSphere environment after all) and it can continue to function as an Exit Node as well. +![Disabling the subnet on vyos](disabling_subnet_on_vyos.png) + +![Enabling the subnet on pho01](enabling_subnet_on_pho01.png) + +![Updated subnets](updated_subnets.png) + +Now I can remotely access the VM (and thus my homelab!) from any of my other Tailscale-enrolled devices! + +[^hassos]: The [Tailscale add-on for Home Assistant](https://github.com/hassio-addons/addon-tailscale) also tries to advertise its subnets by default, but I leave that disabled in the admin portal as well. + +[^failover]: Tailscale does offer a [subnet router failover feature](https://tailscale.com/kb/1115/subnet-failover/) but it is only available starting on the [Business ($15/month) plan](https://tailscale.com/pricing/) and not the $48/year Personal Pro plan that I'm using. + + +### Conclusion +I actually received the Quartz64 waay back on March 2nd, and it's taken me until this week to get all the pieces in place and working the way I wanted. +{{< tweet user="johndotbowdre" id="1499194756148125701" >}} + +As is so often the case, a lot of time and effort would have been saved if I had RTFM'd[^rtfm] before diving in to the deep end. I definitely hadn't anticipated all the limitations that would come with the Quartz64 SBC before ordering mine. Now that it's done, though, I'm pretty pleased with the setup, and I feel like I learned quite a bit along the way. I keep reminding myself that this is still a very new hardware platform. I'm excited to see how things improve with future development efforts. + +[^rtfm]: Read The *Friendly* Manual. Yeah. + diff --git a/content/post/esxi-arm-on-quartz64/name_vm.png b/content/post/esxi-arm-on-quartz64/name_vm.png new file mode 100644 index 0000000..ffea984 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/name_vm.png differ diff --git a/content/post/esxi-arm-on-quartz64/new_tailscale_node.png b/content/post/esxi-arm-on-quartz64/new_tailscale_node.png new file mode 100644 index 0000000..5aa6687 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/new_tailscale_node.png differ diff --git a/content/post/esxi-arm-on-quartz64/new_vm.png b/content/post/esxi-arm-on-quartz64/new_vm.png new file mode 100644 index 0000000..2f2f25c Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/new_vm.png differ diff --git a/content/post/esxi-arm-on-quartz64/ntp_configuration.png b/content/post/esxi-arm-on-quartz64/ntp_configuration.png new file mode 100644 index 0000000..5f18dd6 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/ntp_configuration.png differ diff --git a/content/post/esxi-arm-on-quartz64/quart64_sbc.jpg b/content/post/esxi-arm-on-quartz64/quart64_sbc.jpg new file mode 100644 index 0000000..c46c685 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/quart64_sbc.jpg differ diff --git a/content/post/esxi-arm-on-quartz64/quartz64.jpg b/content/post/esxi-arm-on-quartz64/quartz64.jpg new file mode 100644 index 0000000..328bef6 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/quartz64.jpg differ diff --git a/content/post/esxi-arm-on-quartz64/services.png b/content/post/esxi-arm-on-quartz64/services.png new file mode 100644 index 0000000..c6651d8 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/services.png differ diff --git a/content/post/esxi-arm-on-quartz64/storage_datastore.png b/content/post/esxi-arm-on-quartz64/storage_datastore.png new file mode 100644 index 0000000..2aad34a Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/storage_datastore.png differ diff --git a/content/post/esxi-arm-on-quartz64/storage_device.png b/content/post/esxi-arm-on-quartz64/storage_device.png new file mode 100644 index 0000000..7224da0 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/storage_device.png differ diff --git a/content/post/esxi-arm-on-quartz64/updated_subnets.png b/content/post/esxi-arm-on-quartz64/updated_subnets.png new file mode 100644 index 0000000..93725c4 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/updated_subnets.png differ diff --git a/content/post/esxi-arm-on-quartz64/vm_placement.png b/content/post/esxi-arm-on-quartz64/vm_placement.png new file mode 100644 index 0000000..841b4ba Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/vm_placement.png differ diff --git a/content/post/esxi-arm-on-quartz64/writing_esxi.png b/content/post/esxi-arm-on-quartz64/writing_esxi.png new file mode 100644 index 0000000..3293f25 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/writing_esxi.png differ diff --git a/content/post/esxi-arm-on-quartz64/writing_firmware.png b/content/post/esxi-arm-on-quartz64/writing_firmware.png new file mode 100644 index 0000000..ab4d0a0 Binary files /dev/null and b/content/post/esxi-arm-on-quartz64/writing_firmware.png differ diff --git a/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_export_selfsigned_ca.png b/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_export_selfsigned_ca.png new file mode 100644 index 0000000..da2fe22 Binary files /dev/null and b/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_export_selfsigned_ca.png differ diff --git a/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_get_salty.png b/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_get_salty.png new file mode 100644 index 0000000..fb5e751 Binary files /dev/null and b/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_get_salty.png differ diff --git a/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_ssc_403.png b/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_ssc_403.png new file mode 100644 index 0000000..588ad27 Binary files /dev/null and b/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_ssc_403.png differ diff --git a/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_vidm_login.png b/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_vidm_login.png new file mode 100644 index 0000000..42a8b80 Binary files /dev/null and b/content/post/fixing-403-error-ssc-8-6-vra-idm/20211105_vidm_login.png differ diff --git a/content/post/fixing-403-error-ssc-8-6-vra-idm/index.md b/content/post/fixing-403-error-ssc-8-6-vra-idm/index.md new file mode 100644 index 0000000..4e527c1 --- /dev/null +++ b/content/post/fixing-403-error-ssc-8-6-vra-idm/index.md @@ -0,0 +1,120 @@ +--- +series: vRA8 +date: "2021-11-05T00:00:00Z" +thumbnail: 20211105_ssc_403.png +usePageBundles: true +tags: +- vmware +- vra +- lcm +- salt +- openssl +- certs +title: Fixing 403 error on SaltStack Config 8.6 integrated with vRA and vIDM +--- +I've been wanting to learn a bit more about [SaltStack Config](https://www.vmware.com/products/vrealize-automation/saltstack-config.html) so I recently deployed SSC 8.6 to my environment (using vRealize Suite Lifecycle Manager to do so as [described here](https://cosmin.gq/2021/02/02/deploying-saltstack-config-via-lifecycle-manager-in-a-vra-environment/)). I selected the option to integrate with my pre-existing vRA and vIDM instances so that I wouldn't have to manage authentication directly since I recall that the LDAP authentication piece was a little clumsy the last time I tried it. + +### The Problem +Unfortunately I ran into a problem immediately after the deployment completed: +![403 error from SSC](20211105_ssc_403.png) + +Instead of being redirected to the vIDM authentication screen, I get a 403 Forbidden error. + +I used SSH to log in to the SSC appliance as `root`, and I found this in the `/var/log/raas/raas` log file: +``` +2021-11-05 18:37:47,705 [var.lib.raas.unpack._MEIV8zDs3.raas.mods.vra.params ][ERROR :252 ][Webserver:6170] SSL Exception - https://vra.lab.bowdre.net/csp/gateway/am/api/auth/discovery may be using a self-signed certificate HTTPSConnectionPool(host='vra.lab.bowdre.net', port=443): Max retries exceeded with url: /csp/gateway/am/api/auth/discovery?username=service_type&state=aHR0cHM6Ly9zc2MubGFiLmJvd2RyZS5uZXQvaWRlbnRpdHkvYXBpL2NvcmUvYXV0aG4vY3Nw&redirect_uri=https%3A%2F%2Fssc.lab.bowdre.net%2Fidentity%2Fapi%2Fcore%2Fauthn%2Fcsp&client_id=ssc-299XZv71So (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:1076)'))) +2021-11-05 18:37:47,928 [tornado.application ][ERROR :1792][Webserver:6170] Uncaught exception GET /csp/gateway/am/api/loggedin/user/profile (192.168.1.100) +HTTPServerRequest(protocol='https', host='ssc.lab.bowdre.net', method='GET', uri='/csp/gateway/am/api/loggedin/user/profile', version='HTTP/1.1', remote_ip='192.168.1.100') +Traceback (most recent call last): + File "urllib3/connectionpool.py", line 706, in urlopen + File "urllib3/connectionpool.py", line 382, in _make_request + File "urllib3/connectionpool.py", line 1010, in _validate_conn + File "urllib3/connection.py", line 421, in connect + File "urllib3/util/ssl_.py", line 429, in ssl_wrap_socket + File "urllib3/util/ssl_.py", line 472, in _ssl_wrap_socket_impl + File "ssl.py", line 423, in wrap_socket + File "ssl.py", line 870, in _create + File "ssl.py", line 1139, in do_handshake +ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:1076) +``` + +Further, attempting to pull down that URL with `curl` also failed: +```sh +root@ssc [ ~ ]# curl https://vra.lab.bowdre.net/csp/gateway/am/api/auth/discovery +curl: (60) SSL certificate problem: self signed certificate in certificate chain +More details here: https://curl.se/docs/sslcerts.html + +curl failed to verify the legitimacy of the server and therefore could not +establish a secure connection to it. To learn more about this situation and +how to fix it, please visit the web page mentioned above. +``` + +In my homelab, I am indeed using self-signed certificates. I also encountered the same issue in my lab at work, though, and I'm using certs issued by our enterprise CA there. I had run into a similar problem with previous versions of SSC, but the [quick-and-dirty workaround to disable certificate verification](https://communities.vmware.com/t5/VMware-vRealize-Discussions/SaltStack-Config-Integration-show-Blank-Page/td-p/2863973) doesn't seem to work anymore. + +### The Solution +Clearly I needed to import either the vRA system's certificate (for my homelab) or the certificate chain for my enterprise CA (for my work environment) into SSC's certificate store so that it will trust vRA. But how? + +I fumbled around for a bit and managed to get the required certs added to the system certificate store so that my `curl` test would succeed, but trying to access the SSC web UI still gave me a big middle finger. I eventually found [this documentation](https://docs.vmware.com/en/VMware-vRealize-Automation-SaltStack-Config/8.6/install-configure-saltstack-config/GUID-21A87CE2-8184-4F41-B71B-0FCBB93F21FC.html#troubleshooting-saltstack-config-environments-with-vrealize-automation-that-use-selfsigned-certificates-3) which describes how to configure SSC to work with self-signed certs, and it held the missing detail of how to tell the SaltStack Returner-as-a-Service (RaaS) component that it should use that system certificate store. + +So here's what I did to get things working in my homelab: +1. Point a browser to my vRA instance, click on the certificate error to view the certificate details, and then export the _CA_ certificate to a local file. (For a self-signed cert issued by LCM, this will likely be called something like `Automatically generated one-off CA authority for vRA`.) +![Exporting the self-signed CA cert](20211105_export_selfsigned_ca.png) +2. Open the file in a text editor, and copy the contents into a new file on the SSC appliance. I used `~/vra.crt`. +3. Append the certificate to the end of the system `ca-bundle.crt`: +```sh +cat > /etc/pki/tls/certs/ca-bundle.crt +``` +4. Test that I can now `curl` from vRA without a certificate error: +```sh +root@ssc [ ~ ]# curl https://vra.lab.bowdre.net/csp/gateway/am/api/auth/discovery +{"timestamp":1636139143260,"type":"CLIENT_ERROR","status":"400 BAD_REQUEST","error":"Bad Request","serverMessage":"400 BAD_REQUEST \"Required String parameter 'state' is not present\""} +``` +5. Edit `/usr/lib/systemd/system/raas.service` to update the service definition so it will look to the `ca-bundle.crt` file by adding +``` +Environment=REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt +``` +above the `ExecStart` line: +```sh +root@ssc [ ~ ]# cat /usr/lib/systemd/system/raas.service +[Unit] +Description=The SaltStack Enterprise API Server +After=network.target +[Service] +Type=simple +User=raas +Group=raas +# to be able to bind port < 1024 +AmbientCapabilities=CAP_NET_BIND_SERVICE +NoNewPrivileges=yes +RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX AF_NETLINK +PermissionsStartOnly=true +ExecStartPre=/bin/sh -c 'systemctl set-environment FIPS_MODE=$(/opt/vmware/bin/ovfenv -q --key fips-mode)' +ExecStartPre=/bin/sh -c 'systemctl set-environment NODE_TYPE=$(/opt/vmware/bin/ovfenv -q --key node-type)' +Environment=REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt +ExecStart=/usr/bin/raas +TimeoutStopSec=90 +[Install] +WantedBy=multi-user.target +``` +6. Stop and restart the `raas` service: +```sh +systemctl daemon-reload +systemctl stop raas +systemctl start raas +``` +7. And then try to visit the SSC URL again. This time, it redirects successfully to vIDM: +![Successful vIDM redirect](20211105_vidm_login.png) +8. Log in and get salty: +![Get salty!](20211105_get_salty.png) + +The steps for doing this at work with an enterprise CA were pretty similar, with just slightly-different steps 1 and 2: +1. Access the enterprise CA and download the CA chain, which came in `.p7b` format. +2. Use `openssl` to extract the individual certificates: +```sh +openssl pkcs7 -inform PEM -outform PEM -in enterprise-ca-chain.p7b -print_certs > enterprise-ca-chain.pem +``` +Copy it to the SSC appliance, and then pick up with Step 3 above. + +I'm eager to dive deeper with SSC and figure out how best to leverage it with vRA. I'll let you know if/when I figure out any cool tricks! + +In the meantime, maybe my struggles today can help you get past similar hurdles in your SSC deployments. \ No newline at end of file diff --git a/content/post/getting-started-vra-rest-api/add_rest_host_auth.png b/content/post/getting-started-vra-rest-api/add_rest_host_auth.png new file mode 100644 index 0000000..f406304 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/add_rest_host_auth.png differ diff --git a/content/post/getting-started-vra-rest-api/add_rest_host_properties.png b/content/post/getting-started-vra-rest-api/add_rest_host_properties.png new file mode 100644 index 0000000..02f88dd Binary files /dev/null and b/content/post/getting-started-vra-rest-api/add_rest_host_properties.png differ diff --git a/content/post/getting-started-vra-rest-api/authorize_1.png b/content/post/getting-started-vra-rest-api/authorize_1.png new file mode 100644 index 0000000..76b39aa Binary files /dev/null and b/content/post/getting-started-vra-rest-api/authorize_1.png differ diff --git a/content/post/getting-started-vra-rest-api/authorize_2.png b/content/post/getting-started-vra-rest-api/authorize_2.png new file mode 100644 index 0000000..5624ce6 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/authorize_2.png differ diff --git a/content/post/getting-started-vra-rest-api/automation-api-docs.png b/content/post/getting-started-vra-rest-api/automation-api-docs.png new file mode 100644 index 0000000..abfcd4a Binary files /dev/null and b/content/post/getting-started-vra-rest-api/automation-api-docs.png differ diff --git a/content/post/getting-started-vra-rest-api/blank_template.png b/content/post/getting-started-vra-rest-api/blank_template.png new file mode 100644 index 0000000..f474bb2 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/blank_template.png differ diff --git a/content/post/getting-started-vra-rest-api/config_element_1.png b/content/post/getting-started-vra-rest-api/config_element_1.png new file mode 100644 index 0000000..eccfde6 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/config_element_1.png differ diff --git a/content/post/getting-started-vra-rest-api/config_element_2.png b/content/post/getting-started-vra-rest-api/config_element_2.png new file mode 100644 index 0000000..24bed7d Binary files /dev/null and b/content/post/getting-started-vra-rest-api/config_element_2.png differ diff --git a/content/post/getting-started-vra-rest-api/created_configuration.png b/content/post/getting-started-vra-rest-api/created_configuration.png new file mode 100644 index 0000000..ee12d9e Binary files /dev/null and b/content/post/getting-started-vra-rest-api/created_configuration.png differ diff --git a/content/post/getting-started-vra-rest-api/flavor_mappings_swagger_request.png b/content/post/getting-started-vra-rest-api/flavor_mappings_swagger_request.png new file mode 100644 index 0000000..5d96f0f Binary files /dev/null and b/content/post/getting-started-vra-rest-api/flavor_mappings_swagger_request.png differ diff --git a/content/post/getting-started-vra-rest-api/getConfigValue_action.png b/content/post/getting-started-vra-rest-api/getConfigValue_action.png new file mode 100644 index 0000000..20e0abd Binary files /dev/null and b/content/post/getting-started-vra-rest-api/getConfigValue_action.png differ diff --git a/content/post/getting-started-vra-rest-api/image_input.png b/content/post/getting-started-vra-rest-api/image_input.png new file mode 100644 index 0000000..871320b Binary files /dev/null and b/content/post/getting-started-vra-rest-api/image_input.png differ diff --git a/content/post/getting-started-vra-rest-api/index.md b/content/post/getting-started-vra-rest-api/index.md new file mode 100644 index 0000000..556eb19 --- /dev/null +++ b/content/post/getting-started-vra-rest-api/index.md @@ -0,0 +1,735 @@ +--- +title: "Getting Started with the vRealize Automation REST API" # Title of the blog post. +date: 2022-06-03 # Date of post creation. +# lastmod: 2022-05-20T16:21:22-05:00 # Date when last modified +description: "Using HTTPie and Swagger to learn about interacting with the VMware vRealize Automation REST API, and then leveraging vRealize Orchestrator actions with API queries to dynamically populate a vRA request form." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: vRA8 # Projects, Scripts, vRA8 +tags: + - vmware + - vra + - javascript + - vro + - automation + - rest + - api +comment: true # Disable comment if false. +--- +I've been doing a bit of work lately to make my vRealize Automation setup more flexible and dynamic and less dependent upon hardcoded values. To that end, I thought it was probably about time to learn how to interact with the vRA REST API. I wrote this post to share what I've learned and give a quick crash course on how to start doing things with the API. + +### Exploration Toolkit +#### Swagger +It can be difficult to figure out where to start when learning a new API. Fortunately, VMware thoughtfully included a [Swagger](https://swagger.io/) specification for the API so that we can explore it in an interactive way. This is available for on-prem vRA environments at `https://{vra-fqdn}/automation-ui/api-docs/` (so `https://vra.lab.bowdre.net/automation-ui/api-docs/` in my case). You can also browse most of it online at [www.mgmt.cloud.vmware.com/automation-ui/api-docs/](https://www.mgmt.cloud.vmware.com/automation-ui/api-docs/)[^vracloud]. Playing with Swagger on your on-prem instance will even let you perform requests straight from the browser, which can be a great way to gain familiarity with how requests should be structured. + +![The vRA Automation UI API Docs](automation-api-docs.png) + +I'm ultimately going to be working with the Infrastructure as a Service API but before I can do that I'll need to talk to the Identity API to log in. So let's start the exploration there, with the Login Controller. + +![Login Controller Request](login_controller_1.png) + +That tells me that I'll need to send a `POST` request to the endpoint at `/csp/gateway/am/api/login`, and I'll need to include `username`, `password`, `domain`, and `scope` in the request body. I can click the **Try it out** button to take this endpoint for a spin and just insert appropriate values in the request:[^password] +![Trying it out](login_controller_2.png) + +After hitting **Execute**, the Swagger UI will populate the *Responses* section with some useful information, like how the request would be formatted for use with `curl`: +![curl request format](login_controller_3.png) + +So I could easily replicate this using the `curl` utility by just copying and pasting the following into a shell: +```shell +curl -X 'POST' \ + 'https://vra.lab.bowdre.net/csp/gateway/am/api/login' \ + -H 'accept: */*' \ + -H 'Content-Type: application/json' \ + -d '{ + "username": "vra", + "password": "********", + "domain": "lab.bowdre.net", + "scope": "" +}' +``` + +Scrolling further reveals the authentication token returned by the identity service: +![Authentication token](login_controller_4.png) + +I can copy the contents of that `cspAuthToken` field and use it for authenticating other API operations. For instance, I'll go to the Infrastructure as a Service API Swagger UI and click the **Authorize** button at the top of the screen: +![Authorize me!](authorize_1.png) +And then paste the token into the header as `Bearer [token]`: +![Bearer bearing the token](authorize_2.png) + +Now I can go find an IaaS API that I'm interested in querying (like `/iaas/api/flavor-profiles` to see which flavor mappings are defined in vRA), and hit **Try it out** and then **Execute**: +![Using Swagger to query for flavor mappings](flavor_mappings_swagger_request.png) + +And here's the result: +```json {hl_lines=[6,10,14,44,48,52,56,60,64]} +{ + "content": [ + { + "flavorMappings": { + "mapping": { + "1vCPU | 2GB [tiny]": { + "cpuCount": 1, + "memoryInMB": 2048 + }, + "1vCPU | 1GB [micro]": { + "cpuCount": 1, + "memoryInMB": 1024 + }, + "2vCPU | 4GB [small]": { + "cpuCount": 2, + "memoryInMB": 4096 + } + }, + "_links": { + "region": { + "href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f" + } + } + }, + "externalRegionId": "Datacenter:datacenter-39056", + "cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68", + "name": "", + "id": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9-3617c011-39db-466e-a7f3-029f4523548f", + "updatedAt": "2022-05-05", + "organizationId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9", + "orgId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9", + "_links": { + "self": { + "href": "/iaas/api/flavor-profiles/61ebe5bf-5f55-4dee-8533-7ad05c067dd9-3617c011-39db-466e-a7f3-029f4523548f" + }, + "region": { + "href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f" + } + } + }, + { + "flavorMappings": { + "mapping": { + "2vCPU | 8GB [medium]": { + "cpuCount": 2, + "memoryInMB": 8192 + }, + "1vCPU | 2GB [tiny]": { + "cpuCount": 1, + "memoryInMB": 2048 + }, + "8vCPU | 16GB [giant]": { + "cpuCount": 8, + "memoryInMB": 16384 + }, + "1vCPU | 1GB [micro]": { + "cpuCount": 1, + "memoryInMB": 1024 + }, + "2vCPU | 4GB [small]": { + "cpuCount": 2, + "memoryInMB": 4096 + }, + "4vCPU | 12GB [large]": { + "cpuCount": 4, + "memoryInMB": 12288 + } + }, + "_links": { + "region": { + "href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" + } + } + }, + "externalRegionId": "Datacenter:datacenter-1001", + "cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68", + "name": "", + "id": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9-c0d2a662-9ee5-4a27-9a9e-e92a72668136", + "updatedAt": "2022-05-05", + "organizationId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9", + "orgId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9", + "_links": { + "self": { + "href": "/iaas/api/flavor-profiles/61ebe5bf-5f55-4dee-8533-7ad05c067dd9-c0d2a662-9ee5-4a27-9a9e-e92a72668136" + }, + "region": { + "href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" + } + } + } + ], + "totalElements": 2, + "numberOfElements": 2 +} +``` +So that API call will tell me about the Flavor Profiles as well as which Region the profiles belong to. + +As you can see, Swagger can really help to jump-start the exploration of a new API, but it can get a bit clumsy for repeated queries. And while I _could_ just use `curl` for further API exercises, I'd rather use a tool built specifically for API tomfoolery: + +[^vracloud]: The online version is really intended for the vRealize Automation Cloud hosted solution. It can be a useful reference but some APIs are missing. +[^password]: This request form is pure plaintext so you'd never have known that my password is actually `********` if I hadn't mentioned it. Whoops! +#### HTTPie +[HTTPie](https://httpie.io/) is a handy command-line utility optimized for interacting with web APIs. This will make things easier as I dig deeper. + +Installing the [Debian package](https://httpie.io/docs/cli/debian-and-ubuntu) is a piece of ~~cake~~ _pie_[^pie]: +```shell +curl -SsL https://packages.httpie.io/deb/KEY.gpg | sudo apt-key add - +sudo curl -SsL -o /etc/apt/sources.list.d/httpie.list https://packages.httpie.io/deb/httpie.list +sudo apt update +sudo apt install httpie +``` + +Once installed, running `http` will give me a quick overview of how to use this new tool: +```shell {hl_lines=[3]} +; http +usage: + http [METHOD] URL [REQUEST_ITEM ...] + +error: + the following arguments are required: URL + +for more information: + run 'http --help' or visit https://httpie.io/docs/cli +``` +HTTPie cleverly interprets anything passed after the URL as a [request item](https://httpie.io/docs/cli/request-items), and it determines the item type based on a simple key/value syntax: +> Each request item is simply a key/value pair separated with the following characters: `:` (headers), `=` (data field, e.g., JSON, form), `:=` (raw data field), `==` (query parameters), `@` (file upload). + +So my earlier request for an authentication token becomes: +```shell +https POST vra.lab.bowdre.net/csp/gateway/am/api/login username='vra' password='********' domain='lab.bowdre.net' +``` +{{% notice tip "Working with Self-Signed Certificates" %}} +If your vRA endpoint is using a self-signed or otherwise untrusted certificate, pass the HTTPie option `--verify=no` to ignore certificate errors: +``` +https --verify=no POST [URL] [REQUEST_ITEMS] +``` +{{% /notice %}} + +Running that will return a bunch of interesting headers but I'm mainly interested in the response body: +```json +{ + "cspAuthToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjI4NDY0MjAzMzA2NDQwMTQ2NDQifQ.eyJpc3MiOiJDTj1QcmVsdWRlIElkZW50aXR5IFNlcnZpY2UsT1U9Q01CVSxPPVZNd2FyZSxMPVNvZmlhLFNUPVNvZmlhLEM9QkciLCJpYXQiOjE2NTQwMjQw[...]HBOQQwEepXTNAaTv9gWMKwvPzktmKWyJFmC64FGomRyRyWiJMkLy3xmvYQERwxaDj_15-ErjC6F3c2mV1qIqES2oZbEpjxar16ZVSPshIaOoWRXe5uZB21tkuwVMgZuuwgmpliG_JBa1Y6Oh0FZBbI7o0ERro9qOW-s2npz4Csv5FwcXt0fa4esbXXIKINjqZMh9NDDb23bUabSag" +} +``` + +There's the auth token[^token] that I'll need for subsequent requests. I'll store that in a variable so that it's easier to wield: +```shell +token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjI4NDY0MjAzMzA2NDQwMTQ2NDQifQ.eyJpc3MiOiJDTj1QcmVsdWRlIElkZW50aXR5IFNlcnZpY2UsT1U9Q01CVSxPPVZNd2FyZSxMPVNvZmlhLFNUPVNvZmlhLEM9QkciLCJpYXQiOjE2NTQwMjQw[...]HBOQQwEepXTNAaTv9gWMKwvPzktmKWyJFmC64FGomRyRyWiJMkLy3xmvYQERwxaDj_15-ErjC6F3c2mV1qIqES2oZbEpjxar16ZVSPshIaOoWRXe5uZB21tkuwVMgZuuwgmpliG_JBa1Y6Oh0FZBbI7o0ERro9qOW-s2npz4Csv5FwcXt0fa4esbXXIKINjqZMh9NDDb23bUabSag +``` + +So now if I want to find out which images have been configured in vRA, I can ask: +```shell +https GET vra.lab.bowdre.net/iaas/api/images "Authorization: Bearer $token" +``` +{{% notice info "Request Items" %}} +Remember from above that HTTPie will automatically insert key/value pairs separated by a colon into the request header. +{{% /notice %}} + +And I'll get back some headers followed by an JSON object detailing the defined image mappings broken up by region: +```json {hl_lines=[11,14,37,40,53,56]} +{ + "content": [ + { + "_links": { + "region": { + "href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f" + } + }, + "externalRegionId": "Datacenter:datacenter-39056", + "mapping": { + "Photon 4": { + "_links": { + "region": { + "href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f" + } + }, + "cloudConfig": "", + "constraints": [], + "description": "photon-arm", + "externalId": "50023810-ae56-3c58-f374-adf6e0645886", + "externalRegionId": "Datacenter:datacenter-39056", + "id": "8885e87d8a5898cf12b5abc3e5c715e5a65f7179", + "isPrivate": false, + "name": "photon-arm", + "osFamily": "LINUX" + } + } + }, + { + "_links": { + "region": { + "href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" + } + }, + "externalRegionId": "Datacenter:datacenter-1001", + "mapping": { + "Photon 4": { + "_links": { + "region": { + "href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" + } + }, + "cloudConfig": "", + "constraints": [], + "description": "photon", + "externalId": "50028cf1-88b8-52e8-58a1-b8354d4207b0", + "externalRegionId": "Datacenter:datacenter-1001", + "id": "d417648249e9740d7561188fa2a3a3ab4e8ccf85", + "isPrivate": false, + "name": "photon", + "osFamily": "LINUX" + }, + "Windows Server 2019": { + "_links": { + "region": { + "href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" + } + }, + "cloudConfig": "", + "constraints": [], + "description": "ws2019", + "externalId": "500235ad-1022-fec3-8ad1-00433beee103", + "externalRegionId": "Datacenter:datacenter-1001", + "id": "7e05f4e57ac55135cf7a7f8b951aa8ccfcc335d8", + "isPrivate": false, + "name": "ws2019", + "osFamily": "WINDOWS" + } + } + } + ], + "numberOfElements": 2, + "totalElements": 2 +} +``` +This doesn't give me the *name* of the regions, but I could use the `_links.region.href` data to quickly match up images which exist in a given region.[^foreshadowing] + +You'll notice that HTTPie also prettifies the JSON response to make it easy for humans to parse. This is great for experimenting with requests against different API endpoints and getting a feel for what data can be found where. And firing off tests in HTTPie can be a lot quicker (and easier to format) than with other tools. + +Now let's take what we've learned and see about implementing it as vRO actions. +[^pie]: ![](pie.gif) +[^token]: Well, most of it. +[^foreshadowing]: That knowledge will come in handy later. +### vRealize Orchestrator actions +My immediate goal for this exercise is create a set of vRealize Orchestrator actions which take in a zone/location identifier from the Cloud Assembly request and return a list of images which are available for deployment there. I'll start with some utility actions to do the heavy lifting, and then I'll be able to call them from other actions as things get more complicated/interesting. Before I can do that, though, I'll need to add the vRA instance as an HTTP REST endpoint in vRO. + +{{% notice info "This post brought to you by..." %}} +A lot of what follows was borrowed *heavily* from a [very helpful post by Oktawiusz Poranski over at Automate Clouds](https://automateclouds.com/2021/vrealize-automation-8-rest-api-how-to/) so be sure to check out that site for more great tips on working with APIs! +{{% /notice %}} + +#### Creating the endpoint +I can use the predefined **Add a REST host** workflow to create the needed endpoint. +![Add a REST host workflow - host properties](add_rest_host_properties.png) + +Configuring the host properties here is very simple: just give it a name and the URL of the vRA instance. + +![Add a REST host workflow - authentication](add_rest_host_auth.png) +On the Authentication tab, I set the auth type to `NONE`; I'll handle the authentication steps directly. + +With those parameters in place I can kick off the workflow. Once it completes, I can check **Administration > Inventory > HTTP-REST** to see the newly-created endpoint: +![New HTTP-REST endpoint](new_rest_host_in_inventory.png) + +#### Creating a Configuration for the endpoint +I don't want to hardcode any endpoints or credentials into my vRO actions so I'm also going to create a Configuration to store those details. This will make it easy to reference those same details from multiple actions as well. + +To create a new Configuration, I'll head to **Assets > Configurations**, select a folder (or create a new one) where I want the Configuration to live, and then click the **New Configuration** button. +![Location of the Configurations](config_element_1.png) + +I'm going to call this new Configuration `Endpoints` since I plan to use it for holding host/credentials for additional endpoints in the future. After giving it a name, I'll go ahead and click **Save** to preserve my efforts thus far. + +![Creating the new Configuration](config_element_2.png) + +I'll then click over to the **Variables** tab and create a new variable to store my vRA endpoint details; I'll call it `vRAHost`, and hit the *Type* dropdown and select **New Composite Type**. + +![Creating the new variable](vrahost_variable_1.png) + +This new composite type will let me use a single variable to store multiple values - basically everything I'll need to interact with a single REST endpoint: + +| Variable | Type | +|:--- |:--- | +| `host` | `REST:RESTHost` | +| `username` | `string` | +| `domain` | `string` | +| `password` | `SecureString` | + +![Creating a new composite type](new_composite_type.png) + +I can then map the appropriate values for this new variable and hit **Create**. +![Assigning values to the vRAHost](vrahost_variable_2.png) + +I make sure to **Save** my work and then gaze at the new Configuration's majesty: +![Newly-created Configuration](created_configuration.png) + +Okay, enough prep work - let's get into some Actions! +#### Utility actions +##### `getConfigValue` action +I'll head into **Library > Actions** to create a new action inside my `com.virtuallypotato.utility` module. This action's sole purpose will be to extract the details out of the configuration element I just created. Right now I'm only concerned with retrieving the one `vRAHost` configuration but I'm a fan of using generic pluggable modules where possible. This one will work to retrieve the value of *any* variable defined in *any* configuration element so I'll call it `getConfigValue`. + +![getConfigValue action](getConfigValue_action.png) + +| Input | Type | Description | +|:--- |:--- |:--- | +| `path` | `string` | Path to Configuration folder | +| `configurationName` | `string` | Name of Configuration | +| `variableName` | `string` | Name of desired variable inside Configuration | + +```javascript +/* +JavaScript: getConfigValue action + Inputs: path (string), configurationName (string), variableName (string) + Return type: string +*/ + +var configElement = null; +for each (configElement in Server.getConfigurationElementCategoryWithPath(path).configurationElements) { + if (configElement.name.indexOf(configurationName) === 0) { break; }; +} +var attribValue = configElement.getAttributeWithKey(variableName).value; +return attribValue; +``` + +##### `vraLogin` action +Next, I'll create another action in my `com.virtuallypotato.utility` module which will use the `getConfigValue` action to retrieve the endpoint details. It will then submit a `POST` request to that endpoint to log in, and it will return the authentication token. Later actions will be able to call upon `vraLogin` to grab a token and then pass that back to the IaaS API in the request headers - but I'm getting ahead of myself. Let's get the login sorted: + +![vraLogin action](vraLogin_action.png) + +```javascript +/* +JavaScript: vraLogin action + Inputs: none + Return type: string +*/ +var restHost = System.getModule("com.virtuallypotato.utility").getConfigValue("vPotato", "Endpoints", "vRAHost"); +var host = restHost.host; +var loginObj = { + domain: restHost.domain, + password: restHost.password, + username: restHost.username +}; +var loginJson = JSON.stringify(loginObj); +var request = host.createRequest("POST", "/csp/gateway/am/api/login", loginJson); +request.setHeader("Content-Type", "application/json"); +var response = request.execute(); +var token = JSON.parse(response.contentAsString).cspAuthToken; +System.debug("Created vRA API session: " + token); + +return token; +``` + +##### `vraLogout` action +I like to clean up after myself so I'm also going to create a `vraLogout` action in my `com.virtuallypotato.utility` module to tear down the API session when I'm finished with it. + +![vraLogout action](vraLogout_action.png) + +| Input | Type | Description | +|:--- |:--- |:--- | +| `token` | `string` | Auth token of the session to destroy | + +```javascript +/* +JavaScript: vraLogout action + Inputs: token (string) + Return type: string +*/ +var host = System.getModule("com.virtuallypotato.utility").getConfigValue("vPotato", "Endpoints", "vRAHost").host; +var logoutObj = { + idToken: token +}; +var logoutJson = JSON.stringify(logoutObj); + +var request = host.createRequest("POST", "/csp/gateway/am/api/auth/logout", logoutJson); +request.setHeader("Content-Type", "application/json"); +request.execute().statusCode; +System.debug("Terminated vRA API session: " + token); +``` + +##### `vraExecute` action +My final "utility" action for this effort will run in between `vraLogin` and `vraLogout`, and it will handle making the actual API call and returning the results. This way I won't have to implement the API handler in every single action which needs to talk to the API - they can just call my new action, `vraExecute`. + +![vraExecute action](vraExecute_action.png) + +| Input | Type | Description | +|:--- |:--- |:--- | +|`token`|`string`|Auth token from `vraLogin`| +|`method`|`string`|REST Method (`GET`, `POST`, etc.)| +|`uri`|`string`|Path to API controller (`/iaas/api/flavor-profiles`)| +|`content`|`string`|Any additional data to pass with the request| + +```javascript +/* +JavaScript: vraExecute action + Inputs: token (string), method (string), uri (string), content (string) + Return type: string +*/ +var host = System.getModule("com.virtuallypotato.utility").getConfigValue("vPotato", "Endpoints", "vRAHost").host; +System.log(host); +if (content) { + var request = host.createRequest(method, uri, content); +} else { + var request = host.createRequest(method, uri); +} +request.setHeader("Content-Type", "application/json"); +request.setHeader("Authorization", "Bearer " + token); +var response = request.execute(); +var statusCode = response.statusCode; +var responseContent = response.contentAsString; +if (statusCode > 399) { + System.error(responseContent); + throw "vraExecute action failed, status code: " + statusCode; +} + +return responseContent; +``` + +##### Bonus: `vraTester` action +That's it for the core utility actions - but wouldn't it be great to know that this stuff works before moving on to handling the request input? Enter `vraTester`! It will be handy to have an action I can test vRA REST requests in before going all-in on a solution. + +This action will: +1. Call `vraLogin` to get an API token. +2. Call `vraExecute` with that token, a REST method, and an API endpoint to retrieve some data. +3. Call `vraLogout` to terminate the API session. +4. Return the data so we can see if it worked. + +Other actions wanting to interact with the vRA REST API will follow the same basic formula, though with some more logic and capability baked in. + +Anyway, here's my first swing: +```JavaScript +/* +JavaScript: vraTester action + Inputs: none + Return type: string +*/ +var token = System.getModule('com.virtuallypotato.utility').vraLogin(); +var result = JSON.parse(System.getModule('com.virtuallypotato.utility').vraExecute(token, 'GET', '/iaas/api/zones')).content; +System.log(JSON.stringify(result)); +System.getModule('com.virtuallypotato.utility').vraLogout(token); +return JSON.stringify(result); +``` + +Pretty simple, right? Let's see if it works: +![vraTester action](vraTester_action.png) + +It did! Though that result is a bit hard to parse visually, so I'm going to prettify it a bit: +```json {hl_lines=[17,35,56,74]} +[ + { + "tags": [], + "tagsToMatch": [ + { + "key": "compute", + "value": "nuc" + } + ], + "placementPolicy": "DEFAULT", + "customProperties": { + "zone_overlapping_migrated": "true" + }, + "folder": "vRA_Deploy", + "externalRegionId": "Datacenter:datacenter-1001", + "cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68", + "name": "NUC", + "id": "3d4f048a-385d-4759-8c04-117a170d060c", + "updatedAt": "2022-06-02", + "organizationId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9", + "orgId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9", + "_links": { + "projects": { + "hrefs": [ + "/iaas/api/projects/9c3d1e73-1276-42e7-8d8e-cac40251a29e" + ] + }, + "computes": { + "href": "/iaas/api/zones/3d4f048a-385d-4759-8c04-117a170d060c/computes" + }, + "self": { + "href": "/iaas/api/zones/3d4f048a-385d-4759-8c04-117a170d060c" + }, + "region": { + "href": "/iaas/api/regions/c0d2a662-9ee5-4a27-9a9e-e92a72668136" + }, + "cloud-account": { + "href": "/iaas/api/cloud-accounts/75d29635-f128-4b85-8cf9-95a9e5981c68" + } + } + }, + { + "tags": [], + "tagsToMatch": [ + { + "key": "compute", + "value": "qtz" + } + ], + "placementPolicy": "DEFAULT", + "customProperties": { + "zone_overlapping_migrated": "true" + }, + "externalRegionId": "Datacenter:datacenter-39056", + "cloudAccountId": "75d29635-f128-4b85-8cf9-95a9e5981c68", + "name": "QTZ", + "id": "84470591-74a2-4659-87fd-e5d174a679a2", + "updatedAt": "2022-06-02", + "organizationId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9", + "orgId": "61ebe5bf-5f55-4dee-8533-7ad05c067dd9", + "_links": { + "projects": { + "hrefs": [ + "/iaas/api/projects/9c3d1e73-1276-42e7-8d8e-cac40251a29e" + ] + }, + "computes": { + "href": "/iaas/api/zones/84470591-74a2-4659-87fd-e5d174a679a2/computes" + }, + "self": { + "href": "/iaas/api/zones/84470591-74a2-4659-87fd-e5d174a679a2" + }, + "region": { + "href": "/iaas/api/regions/3617c011-39db-466e-a7f3-029f4523548f" + }, + "cloud-account": { + "href": "/iaas/api/cloud-accounts/75d29635-f128-4b85-8cf9-95a9e5981c68" + } + } + } +] +``` + +I can see that it returned two Cloud Zones, named `NUC` (for my [NUC 9 host](/vmware-home-lab-on-intel-nuc-9/)) and `QTZ` (for my [Quartz64 SBC running ESXi-ARM](/esxi-arm-on-quartz64/)). Each Zone also includes data about other objects associated with the Zone, such as that `_links.region.href` property I mentioned earlier. + +My compute targets live at the Zone level, each Zone lives inside a given Region, and the Image Profiles reside within a Region. See where this is going? I now have the information I need to link a Zone to the Image Profiles available within that Zone. + +#### Input actions +So now I'm ready to work on the actions that will handle passing information between the vRA REST API and the Cloud Assembly deployment request form. For organization purposes, I'll stick them in a new module which I'll call `com.virtuallypotato.inputs`. And for the immediate purposes, I'm going to focus on just two fields in the request form: `Zone` (for where the VM should be created) and `Image` (for what VM template it will be spawned from). The `Zone` dropdown will be automatically populated when the form loads, and `Image` options will show up as soon as a the user has selected a Zone. + +##### `vraGetZones` action +This action will basically just repeat the call that I tested above in `vraTester`, but parse the result to extract just the Zone names. It will pop those into an array of strings which can be rendered as a dropdown on the Cloud Assembly side. + +![vraGetZones action](vraGetZones_action.png) + +```javascript +/* +JavaScript: vraGetZones action + Inputs: none + Return type: Array/string +*/ +var zoneNames = new Array(); +var token = System.getModule("com.virtuallypotato.utility").vraLogin(); +var zones = JSON.parse(System.getModule("com.virtuallypotato.utility").vraExecute(token, "GET", "/iaas/api/zones", null)).content; +zones.forEach( + function (zone) { + zoneNames.push(zone.name); + } +); +zoneNames.sort(); +System.getModule("com.virtuallypotato.utility").vraLogout(token); +return zoneNames; +``` + +##### `vraGetImages` action +Once the user has selected a Zone from the dropdown, the `vraGetImages` action will first contact the same `/iaas/api/zones` API to get the same list of available Zones. It will look through that list to find the one with the matching name, and then extract the `._links.region.href` URI for the Zone. + +Next it will reach out to `/iaas/api/images` to retrieve all the available images. For each image, it will compare its associated `._links.region.href` URI to that of the designated Zone; if there's a match, the action will add the image to an array of strings which will then be returned back to the request form. + +Oh, and the whole thing is wrapped in a conditional so that the code only executes when `zoneName` has been set on the request form; otherwise it simply returns an empty string. + +| Input | Type | Description | +|:--- |:--- |:--- | +| `zoneName` | `string` | The name of the Zone selected in the request form | + +```javascript +/* JavaScript: vraGetImages action + Inputs: zoneName (string) + Return type: array/string +*/ +if (!(zoneName == "" || zoneName == null)) { + var arrImages = new Array(); + var regionUri = null; + var token = System.getModule("com.virtuallypotato.utility").vraLogin(); + var zones = JSON.parse(System.getModule("com.virtuallypotato.utility").vraExecute(token, "GET", "/iaas/api/zones", null)).content; + System.debug("Zones: " + JSON.stringify(zones)); + for each (zone in zones) { + if (zone.name === zoneName) { + System.debug("Matching zone: " + zone.name); + regionUri = zone._links.region.href; + } + if (regionUri != null) { break; }; + } + System.debug("Matching region URI: " + regionUri); + var images = JSON.parse(System.getModule("com.virtuallypotato.utility").vraExecute(token, "GET", "/iaas/api/images", null)).content; + System.debug("Images: " + JSON.stringify(images)); + images.forEach( + function (image) { + if (image._links.region.href === regionUri) { + System.debug("Images in region: " + JSON.stringify(image.mapping)); + for (var i in image.mapping) { + System.debug("Image: " + i); + arrImages.push(i); + } + } + } + ); + arrImages.sort(); + System.getModule("com.virtuallypotato.utility").vraLogout(token); + return arrImages; +} else { + return [""]; +} +``` + +I'll use the **Debug** button to test this action real quick-like, providing the `NUC` Zone as the input: +![vraGetImages action](vraGetImages_action.png) + +It works! Well, at least when called directly. Let's see how it does when called from Cloud Assembly. + +### Cloud Assembly request +For now I'm really only testing using my new vRO actions so my Cloud Template is going to be pretty basic. I'm not even going to add any resources to the template; I don't even need it to be deployable. + +![Completely blank template](blank_template.png) + +What I do need are two inputs. I'd normally just write the inputs directly as YAML, but the syntax for referencing vRO actions can be a bit tricky and I don't want to mess it up. So I pop over to the **Inputs** tab in the editor pane on the right and click the **New Cloud Template Input** button. + +![Need input!](need_input.png) + +I'll start with an input called `zoneName`, which will be a string: +![zoneName input](zoneName_input_1.png) + +I'll then click on **More Options** and scroll to the bottom to select that the field data should come from an *External Source*: +![zoneName more options](zoneName_input_2.png) + +I click the **Select** button and can then search for the `vraGetZones` action I wish to use: +![zoneName selecting the action](zoneName_input_3.png) + +And then hit **Create** to add the new input to the template. + +Next I'll repeat the same steps to create a new `image` input. This time, though, when I select the `vraGetImages` action I'll also need to select another input to bind to the `zoneName` parameter: +![Binding the input](image_input.png) + +The full code for my template now looks like this: +```yaml +formatVersion: 1 +inputs: + zoneName: + type: string + title: Zone + $dynamicEnum: /data/vro-actions/com.virtuallypotato.inputs/vraGetZones + image: + type: string + title: Image + $dynamicEnum: /data/vro-actions/com.virtuallypotato.inputs/vraGetImages?zoneName={{zoneName}} +resources: {} +``` +And I can use the **Test** button at the bottom of the Cloud Assembly template editor to confirm that everything works: + +![Testing the request fields](recording.gif) + +It does! + +### Conclusion +This has been a very quick introduction on how to start pulling data from the vRA APIs, but it (hopefully) helps to consolidate all the knowledge and information I had to find when I started down this path - and maybe it will give you some ideas on how you can use this ability within your own vRA environment. + +In the near future, I'll also have a post on how to do the same sort of things with the vCenter REST API, and I hope to follow that up with a deeper dive on all the tricks I've used to make my request forms as dynamic as possible with the absolute minimum of hardcoded data in the templates. Let me know in the comments if there are any particular use cases you'd like me to explore further. + +Until next time! \ No newline at end of file diff --git a/content/post/getting-started-vra-rest-api/login_controller_1.png b/content/post/getting-started-vra-rest-api/login_controller_1.png new file mode 100644 index 0000000..da9857a Binary files /dev/null and b/content/post/getting-started-vra-rest-api/login_controller_1.png differ diff --git a/content/post/getting-started-vra-rest-api/login_controller_2.png b/content/post/getting-started-vra-rest-api/login_controller_2.png new file mode 100644 index 0000000..72f9f28 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/login_controller_2.png differ diff --git a/content/post/getting-started-vra-rest-api/login_controller_3.png b/content/post/getting-started-vra-rest-api/login_controller_3.png new file mode 100644 index 0000000..ec01d4e Binary files /dev/null and b/content/post/getting-started-vra-rest-api/login_controller_3.png differ diff --git a/content/post/getting-started-vra-rest-api/login_controller_4.png b/content/post/getting-started-vra-rest-api/login_controller_4.png new file mode 100644 index 0000000..a04a058 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/login_controller_4.png differ diff --git a/content/post/getting-started-vra-rest-api/need_input.png b/content/post/getting-started-vra-rest-api/need_input.png new file mode 100644 index 0000000..dff3359 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/need_input.png differ diff --git a/content/post/getting-started-vra-rest-api/new_composite_type.png b/content/post/getting-started-vra-rest-api/new_composite_type.png new file mode 100644 index 0000000..b82e7b2 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/new_composite_type.png differ diff --git a/content/post/getting-started-vra-rest-api/new_rest_host_in_inventory.png b/content/post/getting-started-vra-rest-api/new_rest_host_in_inventory.png new file mode 100644 index 0000000..54b3dca Binary files /dev/null and b/content/post/getting-started-vra-rest-api/new_rest_host_in_inventory.png differ diff --git a/content/post/getting-started-vra-rest-api/pie.gif b/content/post/getting-started-vra-rest-api/pie.gif new file mode 100644 index 0000000..4af8dcd Binary files /dev/null and b/content/post/getting-started-vra-rest-api/pie.gif differ diff --git a/content/post/getting-started-vra-rest-api/recording.gif b/content/post/getting-started-vra-rest-api/recording.gif new file mode 100644 index 0000000..8f98fa8 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/recording.gif differ diff --git a/content/post/getting-started-vra-rest-api/thumbnail.png b/content/post/getting-started-vra-rest-api/thumbnail.png new file mode 100644 index 0000000..b1bcc60 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/thumbnail.png differ diff --git a/content/post/getting-started-vra-rest-api/vraExecute_action.png b/content/post/getting-started-vra-rest-api/vraExecute_action.png new file mode 100644 index 0000000..c056e27 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/vraExecute_action.png differ diff --git a/content/post/getting-started-vra-rest-api/vraGetImages_action.png b/content/post/getting-started-vra-rest-api/vraGetImages_action.png new file mode 100644 index 0000000..c2a8b1b Binary files /dev/null and b/content/post/getting-started-vra-rest-api/vraGetImages_action.png differ diff --git a/content/post/getting-started-vra-rest-api/vraGetZones_action.png b/content/post/getting-started-vra-rest-api/vraGetZones_action.png new file mode 100644 index 0000000..12374b5 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/vraGetZones_action.png differ diff --git a/content/post/getting-started-vra-rest-api/vraLogin_action.png b/content/post/getting-started-vra-rest-api/vraLogin_action.png new file mode 100644 index 0000000..8216ab5 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/vraLogin_action.png differ diff --git a/content/post/getting-started-vra-rest-api/vraLogout_action.png b/content/post/getting-started-vra-rest-api/vraLogout_action.png new file mode 100644 index 0000000..ad3f954 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/vraLogout_action.png differ diff --git a/content/post/getting-started-vra-rest-api/vraTester_action.png b/content/post/getting-started-vra-rest-api/vraTester_action.png new file mode 100644 index 0000000..1adbb32 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/vraTester_action.png differ diff --git a/content/post/getting-started-vra-rest-api/vrahost_variable_1.png b/content/post/getting-started-vra-rest-api/vrahost_variable_1.png new file mode 100644 index 0000000..abb9fda Binary files /dev/null and b/content/post/getting-started-vra-rest-api/vrahost_variable_1.png differ diff --git a/content/post/getting-started-vra-rest-api/vrahost_variable_2.png b/content/post/getting-started-vra-rest-api/vrahost_variable_2.png new file mode 100644 index 0000000..cfbd6f9 Binary files /dev/null and b/content/post/getting-started-vra-rest-api/vrahost_variable_2.png differ diff --git a/content/post/getting-started-vra-rest-api/zoneName_input_1.png b/content/post/getting-started-vra-rest-api/zoneName_input_1.png new file mode 100644 index 0000000..eb7529e Binary files /dev/null and b/content/post/getting-started-vra-rest-api/zoneName_input_1.png differ diff --git a/content/post/getting-started-vra-rest-api/zoneName_input_2.png b/content/post/getting-started-vra-rest-api/zoneName_input_2.png new file mode 100644 index 0000000..a77e29a Binary files /dev/null and b/content/post/getting-started-vra-rest-api/zoneName_input_2.png differ diff --git a/content/post/getting-started-vra-rest-api/zoneName_input_3.png b/content/post/getting-started-vra-rest-api/zoneName_input_3.png new file mode 100644 index 0000000..d813e3b Binary files /dev/null and b/content/post/getting-started-vra-rest-api/zoneName_input_3.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/-PHf9oUyM.png b/content/post/integrating-phpipam-with-vrealize-automation-8/-PHf9oUyM.png new file mode 100644 index 0000000..d5a7ba8 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/-PHf9oUyM.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/-aPGJhSvz.png b/content/post/integrating-phpipam-with-vrealize-automation-8/-aPGJhSvz.png new file mode 100644 index 0000000..e0f2676 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/-aPGJhSvz.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/09RIXJc12.png b/content/post/integrating-phpipam-with-vrealize-automation-8/09RIXJc12.png new file mode 100644 index 0000000..fdf8978 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/09RIXJc12.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/2otDJvqRP.png b/content/post/integrating-phpipam-with-vrealize-automation-8/2otDJvqRP.png new file mode 100644 index 0000000..59039cc Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/2otDJvqRP.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/3BQnEd0bY.png b/content/post/integrating-phpipam-with-vrealize-automation-8/3BQnEd0bY.png new file mode 100644 index 0000000..99f5317 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/3BQnEd0bY.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/4WQ8HWJ2N.png b/content/post/integrating-phpipam-with-vrealize-automation-8/4WQ8HWJ2N.png new file mode 100644 index 0000000..865d252 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/4WQ8HWJ2N.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/6yo39lXI7.png b/content/post/integrating-phpipam-with-vrealize-automation-8/6yo39lXI7.png new file mode 100644 index 0000000..70705e1 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/6yo39lXI7.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/7_QI-Ti8g.png b/content/post/integrating-phpipam-with-vrealize-automation-8/7_QI-Ti8g.png new file mode 100644 index 0000000..7c62265 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/7_QI-Ti8g.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/DiqyOlf5S.png b/content/post/integrating-phpipam-with-vrealize-automation-8/DiqyOlf5S.png new file mode 100644 index 0000000..23a00b4 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/DiqyOlf5S.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/QoxVKC11t.png b/content/post/integrating-phpipam-with-vrealize-automation-8/QoxVKC11t.png new file mode 100644 index 0000000..10ba5d9 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/QoxVKC11t.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/SR7oD0jsG.png b/content/post/integrating-phpipam-with-vrealize-automation-8/SR7oD0jsG.png new file mode 100644 index 0000000..b021d7f Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/SR7oD0jsG.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/bpx8iKUHF.png b/content/post/integrating-phpipam-with-vrealize-automation-8/bpx8iKUHF.png new file mode 100644 index 0000000..1e309f9 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/bpx8iKUHF.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/e4PTJxfqH.png b/content/post/integrating-phpipam-with-vrealize-automation-8/e4PTJxfqH.png new file mode 100644 index 0000000..2872715 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/e4PTJxfqH.png differ diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/index.md b/content/post/integrating-phpipam-with-vrealize-automation-8/index.md new file mode 100644 index 0000000..0228426 --- /dev/null +++ b/content/post/integrating-phpipam-with-vrealize-automation-8/index.md @@ -0,0 +1,748 @@ +--- +series: vRA8 +date: "2021-02-22T08:34:30Z" +lastmod: 2022-07-25 +thumbnail: 7_QI-Ti8g.png +usePageBundles: true +tags: +- python +- rest +- vmware +- vra +- networking +title: Integrating {php}IPAM with vRealize Automation 8 +--- + +In a [previous post](/vmware-home-lab-on-intel-nuc-9), I described some of the steps I took to stand up a homelab including vRealize Automation (vRA) on an Intel NUC 9. One of my initial goals for that lab was to use it for developing and testing a way for vRA to leverage [phpIPAM](https://phpipam.net/) for static IP assignments. The homelab worked brilliantly for that purpose, and those extra internal networks were a big help when it came to testing. I was able to deploy and configure a new VM to host the phpIPAM instance, install the [VMware vRealize Third-Party IPAM SDK](https://code.vmware.com/web/sdk/1.1.0/vmware-vrealize-automation-third-party-ipam-sdk) on my [Chromebook's Linux environment](/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications), develop and build the integration component, import it to my vRA environment, and verify that deployments got addressed accordingly. + +The resulting integration is available on Github [here](https://github.com/jbowdre/phpIPAM-for-vRA8). This was actually the second integration I'd worked on, having fumbled my way through a [Solarwinds integration](https://github.com/jbowdre/SWIPAMforvRA8) earlier last year. [VMware's documentation](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-4A5A481C-FC45-47FB-A120-56B73EB28F01.html) on how to build these things is pretty good, but I struggled to find practical information on how a novice like me could actually go about developing the integration. So maybe these notes will be helpful to anyone seeking to write an integration for a different third-party IP Address Management solution. + +If you'd just like to import a working phpIPAM integration into your environment without learning how the sausage is made, you can grab my latest compiled package [here](https://github.com/jbowdre/phpIPAM-for-vRA8/releases/latest). You'll probably still want to look through Steps 0-2 to make sure your IPAM instance is set up similarly to mine. + +### Step 0: phpIPAM installation and base configuration +Before even worrying about the SDK, I needed to [get a phpIPAM instance ready](https://phpipam.net/documents/installation/). I started with a small (1vCPU/1GB RAM/16GB HDD) VM attached to my "Home" network (`192.168.1.0/24`). I installed Ubuntu 20.04.1 LTS, and then used [this guide](https://computingforgeeks.com/install-and-configure-phpipam-on-ubuntu-debian-linux/) to install phpIPAM. + +Once phpIPAM was running and accessible via the web interface, I then used `openssl` to generate a self-signed certificate to be used for the SSL API connection: +```shell +sudo mkdir /etc/apache2/certificate +cd /etc/apache2/certificate/ +sudo openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out apache-certificate.crt -keyout apache.key +``` +I edited the apache config file to bind that new certificate on port 443, and to redirect requests on port 80 to port 443: +```xml + + ServerName ipam.lab.bowdre.net + Redirect permanent / https://ipam.lab.bowdre.net + + + DocumentRoot "/var/www/html/phpipam" + ServerName ipam.lab.bowdre.net + + Options Indexes FollowSymLinks + AllowOverride All + Require all granted + + ErrorLog "/var/log/apache2/phpipam-error_log" + CustomLog "/var/log/apache2/phpipam-access_log" combined + SSLEngine on + SSLCertificateFile /etc/apache2/certificate/apache-certificate.crt + SSLCertificateKeyFile /etc/apache2/certificate/apache.key + +``` +After restarting apache, I verified that hitting `http://ipam.lab.bowdre.net` redirected me to `https://ipam.lab.bowdre.net`, and that the connection was secured with the shiny new certificate. + +Remember how I've got a "Home" network as well as [several internal networks](/vmware-home-lab-on-intel-nuc-9#networking) which only exist inside the lab environment? I dropped the phpIPAM instance on the Home network to make it easy to connect to, but it doesn't know how to talk to the internal networks where vRA will actually be deploying the VMs. So I added a static route to let it know that traffic to `172.16.0.0/16` would have to go through the Vyos router at `192.168.1.100`. + +This is Ubuntu, so I edited `/etc/netplan/99-netcfg-vmware.yaml` to add the `routes` section at the bottom: +```yaml +network: + version: 2 + renderer: networkd + ethernets: + ens160: + dhcp4: no + dhcp6: no + addresses: + - 192.168.1.14/24 + gateway4: 192.168.1.1 + nameservers: + search: + - lab.bowdre.net + addresses: + - 192.168.1.5 + routes: + - to: 172.16.0.0/16 + via: 192.168.1.100 + metric: 100 +``` +I then ran `sudo netplan apply` so the change would take immediate effect and confirmed the route was working by pinging the vCenter's interface on the `172.16.10.0/24` network: +``` +john@ipam:~$ sudo netplan apply +john@ipam:~$ ip route +default via 192.168.1.1 dev ens160 proto static +172.16.0.0/16 via 192.168.1.100 dev ens160 proto static metric 100 +192.168.1.0/24 dev ens160 proto kernel scope link src 192.168.1.14 +john@ipam:~$ ping 172.16.10.12 +PING 172.16.10.12 (172.16.10.12) 56(84) bytes of data. +64 bytes from 172.16.10.12: icmp_seq=1 ttl=64 time=0.282 ms +64 bytes from 172.16.10.12: icmp_seq=2 ttl=64 time=0.256 ms +64 bytes from 172.16.10.12: icmp_seq=3 ttl=64 time=0.241 ms +^C +--- 172.16.10.12 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 2043ms +rtt min/avg/max/mdev = 0.241/0.259/0.282/0.016 ms +``` + +Now would also be a good time to go ahead and enable cron jobs so that phpIPAM will automatically scan its defined subnets for changes in IP availability and device status. phpIPAM includes a pair of scripts in `INSTALL_DIR/functions/scripts/`: one for discovering new hosts, and the other for checking the status of previously discovered hosts. So I ran `sudo crontab -e` to edit root's crontab and pasted in these two lines to call both scripts every 15 minutes: +``` +*/15 * * * * /usr/bin/php /var/www/html/phpipam/functions/scripts/discoveryCheck.php +*/15 * * * * /usr/bin/php /var/www/html/phpipam/functions/scripts/pingCheck.php +``` + +### Step 1: Configuring phpIPAM API access +Okay, let's now move on to the phpIPAM web-based UI to continue the setup. After logging in at `https://ipam.lab.bowdre.net/`, I clicked on the red **Administration** menu at the right side and selected **phpIPAM Settings**. Under the **Site Settings** section, I enabled the *Prettify links* option, and under the **Feature Settings** section I toggled on the *API* component. I then hit *Save* at the bottom of the page to apply the changes. + +Next, I went to the **Users** item on the left-hand menu to create a new user account which will be used by vRA. I named it `vra`, set a password for the account, and made it a member of the `Operators` group, but didn't grant any special module access. +![Creating vRA service account in phpIPAM](DiqyOlf5S.png) +![Creating vRA service account in phpIPAM](QoxVKC11t.png) + +The last step in configuring API access is to create an API key. This is done by clicking the **API** item on that left side menu and then selecting *Create API key*. I gave it the app ID `vra`, granted Read/Write permissions, and set the *App Security* option to "SSL with User token". +![Generating the API key](-aPGJhSvz.png) + +Once we get things going, our API calls will authenticate with the username and password to get a token and bind that to the app ID. + +### Step 2: Configuring phpIPAM subnets +Our fancy new IPAM solution is ready to go - except for the whole bit about managing IPs. We need to tell it about the network segments we'd like it to manage. phpIPAM uses "Sections" to group subnets together, so we start by creating a new Section at **Administration > IP related management > Sections**. I named my new section `Lab`, and pretty much left all the default options. Be sure that the `Operators` group has read/write access to this section and the subnets we're going to create inside it! +![Creating a section to hold the subnets](6yo39lXI7.png) + +We should also go ahead and create a Nameserver set so that phpIPAM will be able to tell its clients (vRA) what server(s) to use for DNS. Do this at **Administration > IP related management > Nameservers**. I created a new entry called `Lab` and pointed it at my internal DNS server, `192.168.1.5`. +![Designating the nameserver](pDsEh18bx.png) + +Okay, we're finally ready to start entering our subnets at **Administration > IP related management > Subnets**. For each one, I entered the Subnet in CIDR format, gave it a useful description, and associated it with my `Lab` section. I expanded the *VLAN* dropdown and used the *Add new VLAN* option to enter the corresponding VLAN information, and also selected the Nameserver I had just created. +![Entering the first subnet](-PHf9oUyM.png) +I also enabled the options ~~*Mark as pool*~~, *Check hosts status*, *Discover new hosts*, and *Resolve DNS names*. +![Subnet options](SR7oD0jsG.png) + +{{% notice info "Update" %}} +Since releasing this integration, I've learned that phpIPAM intends for the `isPool` field to identify networks where the entire range (including the subnet and broadcast addresses) are available for assignment. As a result, I no longer recommend using that field. Instead, consider [creating a custom field](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/docs/custom_field.md) for tagging networks for vRA availability. +{{% /notice %}} + +I then used the *Scan subnets for new hosts* button to run a discovery scan against the new subnet. +![Scanning for new hosts](4WQ8HWJ2N.png) + +The scan only found a single host, `172.16.20.1`, which is the subnet's gateway address hosted by the Vyos router. I used the pencil icon to edit the IP and mark it as the gateway: +![Identifying the gateway](2otDJvqRP.png) + +phpIPAM now knows the network address, mask, gateway, VLAN, and DNS configuration for this subnet - all things that will be useful for clients seeking an address. I then repeated these steps for the remaining subnets. +![More subnets!](09RIXJc12.png) + +Now for the *real* fun! + +### Step 3: Testing the API +Before moving on to developing the integration, it would be good to first get a little bit familiar with the phpIPAM API - and, in the process, validate that everything is set up correctly. First I read through the [API documentation](https://phpipam.net/api/api_documentation/) and some [example API calls](https://phpipam.net/news/api_example_curl/) to get a feel for it. I then started by firing up a `python3` interpreter and defining a few variables as well as importing the `requests` module for interacting with the REST API: +```python +>>> username = 'vra' +>>> password = 'passw0rd' +>>> hostname = 'ipam.lab.bowdre.net' +>>> apiAppId = 'vra' +>>> uri = f'https://{hostname}/api/{apiAppId}/' +>>> auth = (username, password) +>>> import requests +``` +Based on reading the API docs, I'll need to use the username and password for initial authentication which will provide me with a token to use for subsequent calls. So I'll construct the URI used for auth, submit a `POST` to authenticate, verify that the authentication was successful (`status_code == 200`), and take a look at the response to confirm that I got a token. (For testing, I'm calling `requests` with `verify=False`; we'll be able to use certificate verification when these calls are made from vRA.) +```python +>>> auth_uri = f'{uri}/user/' +>>> req = requests.post(auth_uri, auth=auth, verify=False) +/usr/lib/python3/dist-packages/urllib3/connectionpool.py:849: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings + InsecureRequestWarning) +>>> req.status_code +200 +>>> req.json() +{'code': 200, 'success': True, 'data': {'token': 'Q66bVm8FTpnmBEJYhl5I4ITp', 'expires': '2021-02-22 00:52:35'}, 'time': 0.01} +``` +Sweet! There's our token! Let's save it to `token` to make it easier to work with: +```python +>>> token = {"token": req.json()['data']['token']} +>>> token +{'token': 'Q66bVm8FTpnmBEJYhl5I4ITp'} +``` +Let's see if we can use our new token against the `subnets` controller to get a list of subnets known to phpIPAM: +```python +subnet_uri = f'{uri}/subnets/' +>>> subnets = requests.get(subnet_uri, headers=token, verify=False) +/usr/lib/python3/dist-packages/urllib3/connectionpool.py:849: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings + InsecureRequestWarning) +>>> req.status_code +200 +>>> subnets.json() +{'code': 200, 'success': True, 'data': [{'id': '7', 'subnet': '192.168.1.0', 'mask': '24', 'sectionId': '1', 'description': 'Home Network', 'linked_subnet': None, 'firewallAddressObject': None, 'vrfId': None, 'masterSubnetId': '0', 'allowRequests': '0', 'vlanId': None, 'showName': '0', 'device': None, 'permissions': [{'group_id': 3, 'permission': '1', 'name': 'Guests', 'desc': 'default Guest group (viewers)', 'members': False}, {'group_id': 2, 'permission': '2', 'name': 'Operators', 'desc': 'default Operator group', 'members': [{'username': 'vra'}]}], 'pingSubnet': '1', 'discoverSubnet': '1', 'resolveDNS': '1', 'DNSrecursive': '0', 'DNSrecords': '0', 'nameserverId': '1', 'scanAgent': '1', 'customer_id': None, 'isFolder': '0', 'isFull': '0', 'isPool': '0', 'tag': '2', 'threshold': '0', 'location': [], 'editDate': '2021-02-21 22:45:01', 'lastScan': '2021-02-21 22:45:01', 'lastDiscovery': '2021-02-21 22:45:01', 'nameservers': {'id': '1', 'name': 'Google NS', 'namesrv1': '8.8.8.8;8.8.4.4', 'description': 'Google public nameservers', 'permissions': '1;2', 'editDate': None}},... +``` +Nice! Let's make it a bit more friendly: +```python +>>> subnets = subnets.json()['data'] +>>> for subnet in subnets: +... print("Found subnet: " + subnet['description']) +... +Found subnet: Home Network +Found subnet: 1610-Management +Found subnet: 1620-Servers-1 +Found subnet: 1630-Servers-2 +Found subnet: VPN Subnet +Found subnet: 1640-Servers-3 +Found subnet: 1650-Servers-4 +Found subnet: 1660-Servers-5 +``` +We're in business! + +Now that I know how to talk to phpIPAM via its RESP API, it's time to figure out how to get vRA to speak that language. + +### Step 4: Getting started with the vRA Third-Party IPAM SDK +I downloaded the SDK from [here](https://code.vmware.com/web/sdk/1.1.0/vmware-vrealize-automation-third-party-ipam-sdk). It's got a pretty good [README](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/README_VMware.md) which describes the requirements (Java 8+, Maven 3, Python3, Docker, internet access) as well as how to build the package. I also consulted [this white paper](https://docs.vmware.com/en/vRealize-Automation/8.2/ipam_integration_contract_reqs.pdf) which describes the inputs provided by vRA and the outputs expected from the IPAM integration. + +The README tells you to extract the .zip and make a simple modification to the `pom.xml` file to "brand" the integration: +```xml + + phpIPAM + phpIPAM integration for vRA + 1.0.3 + + false + true + false + + 1000 + +``` +You can then kick off the build with `mvn package -PcollectDependencies -Duser.id=${UID}`, which will (eventually) spit out `./target/phpIPAM.zip`. You can then [import the package to vRA](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-410899CA-1B02-4507-96AD-DFE622D2DD47.html) and test it against the `httpbin.org` hostname to validate that the build process works correctly. + +You'll notice that the form includes fields for Username, Password, and Hostname; we'll also need to specify the API app ID. This can be done by editing `./src/main/resources/endpoint-schema.json`. I added an `apiAppId` field: +```json +{ + "layout":{ + "pages":[ + { + "id":"Sample IPAM", + "title":"Sample IPAM endpoint", + "sections":[ + { + "id":"section_1", + "fields":[ + { + "id":"apiAppId", + "display":"textField" + }, + { + "id":"privateKeyId", + "display":"textField" + }, + { + "id":"privateKey", + "display":"passwordField" + }, + { + "id":"hostName", + "display":"textField" + } + ] + } + ] + } + ] + }, + "schema":{ + "apiAppId":{ + "type":{ + "dataType":"string" + }, + "label":"API App ID", + "constraints":{ + "required":true + } + }, + "privateKeyId":{ + "type":{ + "dataType":"string" + }, + "label":"Username", + "constraints":{ + "required":true + } + }, + "privateKey":{ + "label":"Password", + "type":{ + "dataType":"secureString" + }, + "constraints":{ + "required":true + } + }, + "hostName":{ + "type":{ + "dataType":"string" + }, + "label":"Hostname", + "constraints":{ + "required":true + } + } + }, + "options":{ + + } +} +``` +{{% notice info "Update" %}} +Check out the [source on GitHub](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/resources/endpoint-schema.json) to see how I adjusted the schema to support custom field input. +{{% /notice %}} + +We've now got the framework in place so let's move on to the first operation we'll need to write. Each operation has its own subfolder under `./src/main/python/`, and each contains (among other things) a `requirements.txt` file which will tell Maven what modules need to be imported and a `source.py` file which is where the magic happens. + +### Step 5: 'Validate Endpoint' action +We already basically wrote this earlier with the manual tests against the phpIPAM API. This operation basically just needs to receive the endpoint details and credentials from vRA, test the connection against the API, and let vRA know whether or not it was able to authenticate successfully. So let's open `./src/main/python/validate_endpoint/source.py` and get to work. + +It's always a good idea to start by reviewing the example payload section so that we'll know what data we have to work with: +```python +''' +Example payload: + +"inputs": { + "authCredentialsLink": "/core/auth/credentials/13c9cbade08950755898c4b89c4a0", + "endpointProperties": { + "hostName": "sampleipam.sof-mbu.eng.vmware.com" + } + } +''' +``` + +The `do_validate_endpoint` function has a handy comment letting us know that's where we'll drop in our code: +```python +def do_validate_endpoint(self, auth_credentials, cert): + # Your implemention goes here + + username = auth_credentials["privateKeyId"] + password = auth_credentials["privateKey"] + + try: + response = requests.get("https://" + self.inputs["endpointProperties"]["hostName"], verify=cert, auth=(username, password)) +``` +The example code gives us a nice start at how we'll get our inputs from vRA. So let's expand that a bit: +```python +def do_validate_endpoint(self, auth_credentials, cert): + # Build variables + username = auth_credentials["privateKeyId"] + password = auth_credentials["privateKey"] + hostname = self.inputs["endpointProperties"]["hostName"] + apiAppId = self.inputs["endpointProperties"]["apiAppId"] +``` +As before, we'll construct the "base" URI by inserting the `hostname` and `apiAppId`, and we'll combine the `username` and `password` into our `auth` variable: +```python +uri = f'https://{hostname}/api/{apiAppId}/ +auth = (username, password) +``` +I realized that I'd be needing to do the same authentication steps for each one of these operations, so I created a new `auth_session()` function to do the heavy lifting. Other operations will also need to return the authorization token but for this run we really just need to know whether the authentication was successful, which we can do by checking `req.status_code`. +```python +def auth_session(uri, auth, cert): + auth_uri = f'{uri}/user/' + req = requests.post(auth_uri, auth=auth, verify=cert) + return req +``` +And we'll call that function from `do_validate_endpoint()`: +```python +# Test auth connection +try: + response = auth_session(uri, auth, cert) + + if response.status_code == 200: + return { + "message": "Validated successfully", + "statusCode": "200" + } +``` +You can view the full code [here](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/python/validate_endpoint/source.py). + +After completing each operation, run `mvn package -PcollectDependencies -Duser.id=${UID}` to build again, and then import the package to vRA again. This time, you'll see the new "API App ID" field on the form: +![Validating the new IPAM endpoint](bpx8iKUHF.png) + +Confirm that everything worked correctly by hopping over to the **Extensibility** tab, selecting **Action Runs** on the left, and changing the **User Runs** filter to say *Integration Runs*. +![Extensibility action runs](e4PTJxfqH.png) +Select the newest `phpIPAM_ValidateEndpoint` action and make sure it has a happy green *Completed* status. You can also review the Inputs to make sure they look like what you expected: +```json +{ + "__metadata": { + "headers": { + "tokenId": "c/FqqI+i9WF47JkCxBsy8uoQxjyq+nlH0exxLYDRzTk=" + }, + "sourceType": "ipam" + }, + "endpointProperties": { + "dcId": "onprem", + "apiAppId": "vra", + "hostName": "ipam.lab.bowdre.net", + "properties": "[{\"prop_key\":\"phpIPAM.IPAM.apiAppId\",\"prop_value\":\"vra\"}]", + "providerId": "301de00f-d267-4be2-8065-fabf48162dc1", +``` +And we can see that the Outputs reflect our successful result: +```json +{ + "message": "Validated successfully", + "statusCode": "200" +} +``` + +That's one operation in the bank! + +### Step 6: 'Get IP Ranges' action +So vRA can authenticate against phpIPAM; next, let's actually query to get a list of available IP ranges. This happens in `./src/main/python/get_ip_ranges/source.py`. We'll start by pulling over our `auth_session()` function and flesh it out a bit more to return the authorization token: +```python +def auth_session(uri, auth, cert): + auth_uri = f'{uri}/user/' + req = requests.post(auth_uri, auth=auth, verify=cert) + if req.status_code != 200: + raise requests.exceptions.RequestException('Authentication Failure!') + token = {"token": req.json()['data']['token']} + return token +``` +We'll then modify `do_get_ip_ranges()` with our needed variables, and then call `auth_session()` to get the necessary token: +```python +def do_get_ip_ranges(self, auth_credentials, cert): + # Build variables + username = auth_credentials["privateKeyId"] + password = auth_credentials["privateKey"] + hostname = self.inputs["endpoint"]["endpointProperties"]["hostName"] + apiAppId = self.inputs["endpoint"]["endpointProperties"]["apiAppId"] + uri = f'https://{hostname}/api/{apiAppId}/' + auth = (username, password) + + # Auth to API + token = auth_session(uri, auth, cert) +``` +We can then query for the list of subnets, just like we did earlier: +```python +# Request list of subnets +subnet_uri = f'{uri}/subnets/' +ipRanges = [] +subnets = requests.get(f'{subnet_uri}?filter_by=isPool&filter_value=1', headers=token, verify=cert) +subnets = subnets.json()['data'] + ``` +I decided to add the extra `filter_by=isPool&filter_value=1` argument to the query so that it will only return subnets marked as a pool in phpIPAM. This way I can use phpIPAM for monitoring address usage on a much larger set of subnets while only presenting a handful of those to vRA. + +{{% notice info "Update" %}} +I now filter for networks identified by the designated custom field like so: +```python + # Request list of subnets + subnet_uri = f'{uri}/subnets/' + if enableFilter == "true": + queryFilter = f'filter_by={filterField}&filter_value={filterValue}' + logging.info(f"Searching for subnets matching filter: {queryFilter}") + else: + queryFilter = '' + logging.info(f"Searching for all known subnets") + ipRanges = [] + subnets = requests.get(f'{subnet_uri}?{queryFilter}', headers=token, verify=cert) + subnets = subnets.json()['data'] +``` +{{% /notice %}} + +Now is a good time to consult [that white paper](https://docs.vmware.com/en/VMware-Cloud-services/1.0/ipam_integration_contract_reqs.pdf) to confirm what fields I'll need to return to vRA. That lets me know that I'll need to return `ipRanges` which is a list of `IpRange` objects. `IpRange` requires `id`, `name`, `startIPAddress`, `endIPAddress`, `ipVersion`, and `subnetPrefixLength` properties. It can also accept `description`, `gatewayAddress`, and `dnsServerAddresses` properties, among others. Some of these properties are returned directly by the phpIPAM API, but others will need to be computed on the fly. + +For instance, these are pretty direct matches: +```python +ipRange['id'] = str(subnet['id']) +ipRange['description'] = str(subnet['description']) +ipRange['subnetPrefixLength'] = str(subnet['mask']) +``` +phpIPAM doesn't return a `name` field but I can construct one that will look like `172.16.20.0/24`: +```python +ipRange['name'] = f"{str(subnet['subnet'])}/{str(subnet['mask'])}" +``` + +Working with IP addresses in Python can be greatly simplified by use of the `ipaddress` module, so I added an `import ipaddress` statement near the top of the file. I also added it to `requirements.txt` to make sure it gets picked up by the Maven build. I can then use that to figure out the IP version as well as computing reasonable start and end IP addresses: +```python +network = ipaddress.ip_network(str(subnet['subnet']) + '/' + str(subnet['mask'])) +ipRange['ipVersion'] = 'IPv' + str(network.version) +ipRange['startIPAddress'] = str(network[1]) +ipRange['endIPAddress'] = str(network[-2]) +``` +I'd like to try to get the DNS servers from phpIPAM if they're defined, but I also don't want the whole thing to puke if a subnet doesn't have that defined. phpIPAM returns the DNS servers as a semicolon-delineated string; I need them to look like a Python list: +```python +try: + ipRange['dnsServerAddresses'] = [server.strip() for server in str(subnet['nameservers']['namesrv1']).split(';')] +except: + ipRange['dnsServerAddresses'] = [] +``` +I can also nest another API request to find which address is marked as the gateway for a given subnet: +```python +gw_req = requests.get(f"{subnet_uri}/{subnet['id']}/addresses/?filter_by=is_gateway&filter_value=1", headers=token, verify=cert) +if gw_req.status_code == 200: + gateway = gw_req.json()['data'][0]['ip'] + ipRange['gatewayAddress'] = gateway +``` +And then I merge each of these `ipRange` objects into the `ipRanges` list which will be returned to vRA: +```python +ipRanges.append(ipRange) +``` +After rearranging a bit and tossing in some logging, here's what I've got: +```python +for subnet in subnets: + ipRange = {} + ipRange['id'] = str(subnet['id']) + ipRange['name'] = f"{str(subnet['subnet'])}/{str(subnet['mask'])}" + ipRange['description'] = str(subnet['description']) + logging.info(f"Found subnet: {ipRange['name']} - {ipRange['description']}.") + network = ipaddress.ip_network(str(subnet['subnet']) + '/' + str(subnet['mask'])) + ipRange['ipVersion'] = 'IPv' + str(network.version) + ipRange['startIPAddress'] = str(network[1]) + ipRange['endIPAddress'] = str(network[-2]) + ipRange['subnetPrefixLength'] = str(subnet['mask']) + # return empty set if no nameservers are defined in IPAM + try: + ipRange['dnsServerAddresses'] = [server.strip() for server in str(subnet['nameservers']['namesrv1']).split(';')] + except: + ipRange['dnsServerAddresses'] = [] + # try to get the address marked as the gateway in IPAM + gw_req = requests.get(f"{subnet_uri}/{subnet['id']}/addresses/?filter_by=is_gateway&filter_value=1", headers=token, verify=cert) + if gw_req.status_code == 200: + gateway = gw_req.json()['data'][0]['ip'] + ipRange['gatewayAddress'] = gateway + logging.debug(ipRange) + ipRanges.append(ipRange) +# Return results to vRA +result = { + "ipRanges" : ipRanges +} +return result +``` +The full code can be found [here](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/python/get_ip_ranges/source.py). You may notice that I removed all the bits which were in the VMware-provided skeleton about paginating the results. I honestly wasn't entirely sure how to implement that, and I also figured that since I'm already limiting the results by the `is_pool` filter I shouldn't have a problem with the IPAM server returning an overwhelming number of IP ranges. That could be an area for future improvement though. + +In any case, it's time to once again use `mvn package -PcollectDependencies -Duser.id=${UID}` to fire off the build, and then import `phpIPAM.zip` into vRA. + +vRA runs the `phpIPAM_GetIPRanges` action about every ten minutes so keep checking back on the **Extensibility > Action Runs** view until it shows up. You can then select the action and review the Log to see which IP ranges got picked up: +```log +[2021-02-21 23:14:04,026] [INFO] - Querying for auth credentials +[2021-02-21 23:14:04,051] [INFO] - Credentials obtained successfully! +[2021-02-21 23:14:04,089] [INFO] - Found subnet: 172.16.10.0/24 - 1610-Management. +[2021-02-21 23:14:04,101] [INFO] - Found subnet: 172.16.20.0/24 - 1620-Servers-1. +[2021-02-21 23:14:04,114] [INFO] - Found subnet: 172.16.30.0/24 - 1630-Servers-2. +[2021-02-21 23:14:04,126] [INFO] - Found subnet: 172.16.40.0/24 - 1640-Servers-3. +[2021-02-21 23:14:04,138] [INFO] - Found subnet: 172.16.50.0/24 - 1650-Servers-4. +[2021-02-21 23:14:04,149] [INFO] - Found subnet: 172.16.60.0/24 - 1660-Servers-5. +``` +Note that it *did not* pick up my "Home Network" range since it wasn't set to be a pool. + +We can also navigate to **Infrastructure > Networks > IP Ranges** to view them in all their glory: +![Reviewing the discovered IP ranges](7_QI-Ti8g.png) + +You can then follow [these instructions](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-410899CA-1B02-4507-96AD-DFE622D2DD47.html) to associate the external IP ranges with networks available for vRA deployments. + +Next, we need to figure out how to allocate an IP. + +### Step 7: 'Allocate IP' action +I think we've got a rhythm going now. So we'll dive in to `./src/main/python/allocate_ip/source.py`, create our `auth_session()` function, and add our variables to the `do_allocate_ip()` function. I also created a new `bundle` object to hold the `uri`, `token`, and `cert` items so that I don't have to keep typing those over and over and over. +```python +def auth_session(uri, auth, cert): + auth_uri = f'{uri}/user/' + req = requests.post(auth_uri, auth=auth, verify=cert) + if req.status_code != 200: + raise requests.exceptions.RequestException('Authentication Failure!') + token = {"token": req.json()['data']['token']} + return token + +def do_allocate_ip(self, auth_credentials, cert): + # Build variables + username = auth_credentials["privateKeyId"] + password = auth_credentials["privateKey"] + hostname = self.inputs["endpoint"]["endpointProperties"]["hostName"] + apiAppId = self.inputs["endpoint"]["endpointProperties"]["apiAppId"] + uri = f'https://{hostname}/api/{apiAppId}/' + auth = (username, password) + + # Auth to API + token = auth_session(uri, auth, cert) + bundle = { + 'uri': uri, + 'token': token, + 'cert': cert + } +``` +I left the remainder of `do_allocate_ip()` intact but modified its calls to other functions so that my new `bundle` would be included: +```python +allocation_result = [] +try: + resource = self.inputs["resourceInfo"] + for allocation in self.inputs["ipAllocations"]: + allocation_result.append(allocate(resource, allocation, self.context, self.inputs["endpoint"], bundle)) +except Exception as e: + try: + rollback(allocation_result, bundle) + except Exception as rollback_e: + logging.error(f"Error during rollback of allocation result {str(allocation_result)}") + logging.error(rollback_e) + raise e +``` +I also added `bundle` to the `allocate()` function: +```python +def allocate(resource, allocation, context, endpoint, bundle): + + last_error = None + for range_id in allocation["ipRangeIds"]: + + logging.info(f"Allocating from range {range_id}") + try: + return allocate_in_range(range_id, resource, allocation, context, endpoint, bundle) + except Exception as e: + last_error = e + logging.error(f"Failed to allocate from range {range_id}: {str(e)}") + + logging.error("No more ranges. Raising last error") + raise last_error +``` +The heavy lifting is actually handled in `allocate_in_range()`. Right now, my implementation only supports doing a single allocation so I added an escape in case someone asks to do something crazy like allocate *2* IPs. I then set up my variables: +```python +def allocate_in_range(range_id, resource, allocation, context, endpoint, bundle): + if int(allocation['size']) ==1: + vmName = resource['name'] + owner = resource['owner'] + uri = bundle['uri'] + token = bundle['token'] + cert = bundle['cert'] + else: + # TODO: implement allocation of continuous block of IPs + pass + raise Exception("Not implemented") +``` +I construct a `payload` that will be passed to the phpIPAM API when an IP gets allocated to a VM: +```python +payload = { + 'hostname': vmName, + 'description': f'Reserved by vRA for {owner} at {datetime.now()}' +} +``` +That timestamp will be handy when reviewing the reservations from the phpIPAM side of things. Be sure to add an appropriate `import datetime` statement at the top of this file, and include `datetime` in `requirements.txt`. + +So now we'll construct the URI and post the allocation request to phpIPAM. We tell it which `range_id` to use and it will return the first available IP. +```python +allocate_uri = f'{uri}/addresses/first_free/{str(range_id)}/' +allocate_req = requests.post(allocate_uri, data=payload, headers=token, verify=cert) +allocate_req = allocate_req.json() +``` +Per the white paper, we'll need to return `ipAllocationId`, `ipAddresses`, `ipRangeId`, and `ipVersion` to vRA in an `AllocationResult`. Once again, I'll leverage the `ipaddress` module for figuring the version (and, once again, I'll add it as an import and to the `requirements.txt` file). +```python +if allocate_req['success']: + version = ipaddress.ip_address(allocate_req['data']).version + result = { + "ipAllocationId": allocation['id'], + "ipRangeId": range_id, + "ipVersion": "IPv" + str(version), + "ipAddresses": [allocate_req['data']] + } + logging.info(f"Successfully reserved {str(result['ipAddresses'])} for {vmName}.") +else: + raise Exception("Unable to allocate IP!") + +return result +``` +I also implemented a hasty `rollback()` in case something goes wrong and we need to undo the allocation: +```python +def rollback(allocation_result, bundle): + uri = bundle['uri'] + token = bundle['token'] + cert = bundle['cert'] + for allocation in reversed(allocation_result): + logging.info(f"Rolling back allocation {str(allocation)}") + ipAddresses = allocation.get("ipAddresses", None) + for ipAddress in ipAddresses: + rollback_uri = f'{uri}/addresses/{allocation.get("id")}/' + requests.delete(rollback_uri, headers=token, verify=cert) + + return +``` +The full `allocate_ip` code is [here](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/python/allocate_ip/source.py). Once more, run `mvn package -PcollectDependencies -Duser.id=${UID}` and import the new `phpIPAM.zip` package into vRA. You can then open a Cloud Assembly Cloud Template associated with one of the specified networks and hit the "Test" button to see if it works. You should see a new `phpIPAM_AllocateIP` action run appear on the **Extensibility > Action runs** tab. Check the Log for something like this: +```log +[2021-02-22 01:31:41,729] [INFO] - Querying for auth credentials +[2021-02-22 01:31:41,757] [INFO] - Credentials obtained successfully! +[2021-02-22 01:31:41,773] [INFO] - Allocating from range 12 +[2021-02-22 01:31:41,790] [INFO] - Successfully reserved ['172.16.40.2'] for BOW-VLTST-XXX41. +``` +You can also check for a reserved address in phpIPAM: +![The reserved address in phpIPAM](3BQnEd0bY.png) + +Almost done! + +### Step 8: 'Deallocate IP' action +The last step is to remove the IP allocation when a vRA deployment gets destroyed. It starts just like the `allocate_ip` action with our `auth_session()` function and variable initialization: +```python +def auth_session(uri, auth, cert): + auth_uri = f'{uri}/user/' + req = requests.post(auth_uri, auth=auth, verify=cert) + if req.status_code != 200: + raise requests.exceptions.RequestException('Authentication Failure!') + token = {"token": req.json()['data']['token']} + return token + +def do_deallocate_ip(self, auth_credentials, cert): + # Build variables + username = auth_credentials["privateKeyId"] + password = auth_credentials["privateKey"] + hostname = self.inputs["endpoint"]["endpointProperties"]["hostName"] + apiAppId = self.inputs["endpoint"]["endpointProperties"]["apiAppId"] + uri = f'https://{hostname}/api/{apiAppId}/' + auth = (username, password) + + # Auth to API + token = auth_session(uri, auth, cert) + bundle = { + 'uri': uri, + 'token': token, + 'cert': cert + } + + deallocation_result = [] + for deallocation in self.inputs["ipDeallocations"]: + deallocation_result.append(deallocate(self.inputs["resourceInfo"], deallocation, bundle)) + + assert len(deallocation_result) > 0 + return { + "ipDeallocations": deallocation_result + } +``` +And the `deallocate()` function is basically a prettier version of the `rollback()` function from the `allocate_ip` action: +```python +def deallocate(resource, deallocation, bundle): + uri = bundle['uri'] + token = bundle['token'] + cert = bundle['cert'] + ip_range_id = deallocation["ipRangeId"] + ip = deallocation["ipAddress"] + + logging.info(f"Deallocating ip {ip} from range {ip_range_id}") + + deallocate_uri = f'{uri}/addresses/{ip}/{ip_range_id}/' + requests.delete(deallocate_uri, headers=token, verify=cert) + return { + "ipDeallocationId": deallocation["id"], + "message": "Success" + } +``` +You can review the full code [here](https://github.com/jbowdre/phpIPAM-for-vRA8/blob/main/src/main/python/deallocate_ip/source.py). Build the package with Maven, import to vRA, and run another test deployment. The `phpIPAM_DeallocateIP` action should complete successfully. Something like this will be in the log: +```log +[2021-02-22 01:36:29,438] [INFO] - Querying for auth credentials +[2021-02-22 01:36:29,461] [INFO] - Credentials obtained successfully! +[2021-02-22 01:36:29,476] [INFO] - Deallocating ip 172.16.40.3 from range 12 +``` +And the Outputs section of the Details tab will show: +```json +{ + "ipDeallocations": [ + { + "message": "Success", + "ipDeallocationId": "/resources/network-interfaces/8e149a2c-d7aa-4e48-b6c6-153ed288aef3" + } + ] +} +``` + +### Success! +That's it! You can now use phpIPAM for assigning IP addresses to VMs deployed from vRealize Automation 8.x. VMware provides a few additional operations that could be added to this integration in the future (like updating existing records or allocating entire ranges rather than individual IPs) but what I've written so far satisfies the basic requirements, and it works well for my needs. + +And maybe, *just maybe*, the steps I went through developing this integration might help with integrating another IPAM solution. diff --git a/content/post/integrating-phpipam-with-vrealize-automation-8/pDsEh18bx.png b/content/post/integrating-phpipam-with-vrealize-automation-8/pDsEh18bx.png new file mode 100644 index 0000000..b0d0223 Binary files /dev/null and b/content/post/integrating-phpipam-with-vrealize-automation-8/pDsEh18bx.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/0ZYcORuiU.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/0ZYcORuiU.png new file mode 100644 index 0000000..8836de5 Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/0ZYcORuiU.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-adding-ad-integration.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-adding-ad-integration.png new file mode 100644 index 0000000..0e24eaf Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-adding-ad-integration.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-adding-project-to-integration.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-adding-project-to-integration.png new file mode 100644 index 0000000..568983f Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-adding-project-to-integration.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-successful-ad_machine.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-successful-ad_machine.png new file mode 100644 index 0000000..22fbdda Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-successful-ad_machine.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-test-deploy-request.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-test-deploy-request.png new file mode 100644 index 0000000..6fecafc Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-test-deploy-request.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-joined-2.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-joined-2.png new file mode 100644 index 0000000..2f8614e Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-joined-2.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-joined.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-joined.png new file mode 100644 index 0000000..3d6713b Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-joined.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-not-joined.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-not-joined.png new file mode 100644 index 0000000..65389d7 Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/20210721-vm-not-joined.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/AzAna5Dda.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/AzAna5Dda.png new file mode 100644 index 0000000..559d4e3 Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/AzAna5Dda.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/HHiShFlnT.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/HHiShFlnT.png new file mode 100644 index 0000000..e2c1f4d Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/HHiShFlnT.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/Zz0D9wjYr.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/Zz0D9wjYr.png new file mode 100644 index 0000000..620996c Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/Zz0D9wjYr.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/gOTzVawJE.png b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/gOTzVawJE.png new file mode 100644 index 0000000..309e0f3 Binary files /dev/null and b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/gOTzVawJE.png differ diff --git a/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/index.md b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/index.md new file mode 100644 index 0000000..cca5a3b --- /dev/null +++ b/content/post/joining-vms-to-active-directory-in-site-specific-ous-with-vra8/index.md @@ -0,0 +1,232 @@ +--- +series: vRA8 +date: "2021-07-21T00:00:00Z" +thumbnail: 20210721-successful-ad_machine.png +usePageBundles: true +tags: +- vmware +- vra +- abx +- activedirectory +- automation +- windows +title: Joining VMs to Active Directory in site-specific OUs with vRA8 +--- +Connecting a deployed Windows VM to an Active Directory domain is pretty easy; just apply an appropriately-configured [customization spec](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-CAEB6A70-D1CF-446E-BC64-EC42CDB47117.html) and vCenter will take care of it for you. Of course, you'll likely then need to move the newly-created computer object to the correct Organizational Unit so that it gets all the right policies and such. + +Fortunately, vRA 8 supports adding an Active Directory integration to handle staging computer objects in a designated OU. And vRA 8.3 even [introduced the ability](https://blogs.vmware.com/management/2021/02/whats-new-with-vrealize-automation-8-3-technical-overview.html#:~:text=New%20Active%20Directory%20Cloud%20Template%20Properties) to let blueprints override the relative DN path. That will be helpful in my case since I'll want the servers to be placed in different OUs depending on which site they get deployed to: + +| **Site** | **OU** | +| --- | --- | +| `BOW` | `lab.bowdre.net/LAB/BOW/Computers/Servers` | +| `DRE` | `lab.bowre.net/LAB/DRE/Computers/Servers` | + + +I didn't find a lot of documentation on how make this work, though, so here's how I've implemented it in my lab (now [running vRA 8.4.2](https://twitter.com/johndotbowdre/status/1416037317052178436)). + +### Adding the AD integration +First things first: connecting vRA to AD. I do this by opening the Cloud Assembly interface, navigating to **Infrastructure > Connections > Integrations**, and clicking the **Add Integration** button. I'm then prompted to choose the integration type so I select the **Active Directory** one, and then I fill in the required information: a name (`Lab AD` seems appropriate), my domain controller as the LDAP host (`ldap://win01.lab.bowdre.net:389`), credentials for an account with sufficient privileges to create and delete computer objects (`lab\vra`), and finally the base DN to be used for the LDAP connection (`DC=lab,DC=bowdre,DC=net`). + +![Creating the new AD integration](20210721-adding-ad-integration.png) + +Clicking the **Validate** button quickly confirms that I've entered the information correctly, and then I can click **Add** to save my work. + +I'll then need to associate the integration with a project by opening the new integration, navigating to the **Projects** tab, and clicking **Add Project**. Now I select the project name from the dropdown, enter a valid relative OU (`OU=LAB`), and enable the options to let me override the relative OU and optionally skip AD actions from the cloud template. + +![Project options for the AD integration](20210721-adding-project-to-integration.png) + + +### Customization specs +As mentioned above, I'll leverage the customization specs in vCenter to handle the actual joining of a computer to the domain. I maintain two specs for Windows deployments (one to join the domain and one to stay on the workgroup), and I can let the vRA cloud template decide which should be applied to a given deployment. + +First, the workgroup spec, appropriately called `vra-win-workgroup`: +![Workgroup spec](AzAna5Dda.png) + +It's about as basic as can be, including using DHCP for the network configuration (which doesn't really matter since the VM will eventually get a [static IP assigned from {php}IPAM](integrating-phpipam-with-vrealize-automation-8)). + +`vra-win-domain` is basically the same, with one difference: +![Domain spec](0ZYcORuiU.png) + +Now to reference these specs from a cloud template... + +### Cloud template +I want to make sure that users requesting a deployment are able to pick whether or not a system should be joined to the domain, so I'm going to add that as an input option on the template: + +```yaml +inputs: + [...] + adJoin: + title: Join to AD domain + type: boolean + default: true + [...] +``` + +This new `adJoin` input is a boolean so it will appear on the request form as a checkbox, and it will default to `true`; we'll assume that any Windows deployment should be automatically joined to AD unless this option gets unchecked. + +In the `resources` section of the template, I'll set a new property called `ignoreActiveDirectory` to be the inverse of the `adJoin` input; that will tell the AD integration not to do anything if the box to join the VM to the domain is unchecked. I'll also use `activeDirectory: relativeDN` to insert the appropriate site code into the DN where the computer object will be created. And, finally, I'll reference the `customizationSpec` and use [cloud template conditional syntax](https://docs.vmware.com/en/vRealize-Automation/8.4/Using-and-Managing-Cloud-Assembly/GUID-12F0BC64-6391-4E5F-AA48-C5959024F3EB.html#conditions-4) to apply the correct spec based on whether it's a domain or workgroup deployment. (These conditionals take the pattern `'${conditional-expresion ? true-value : false-value}'`). + +```yaml +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + [...] + ignoreActiveDirectory: '${!input.adJoin}' + activeDirectory: + relativeDN: '${"OU=Servers,OU=Computers,OU=" + input.site + ",OU=LAB"}' + customizationSpec: '${input.adJoin ? "vra-win-domain" : "vra-win-workgroup"}' + [...] +``` + +Here's the current cloud template in its entirety: + +```yaml +formatVersion: 1 +inputs: + site: + type: string + title: Site + enum: + - BOW + - DRE + image: + type: string + title: Operating System + oneOf: + - title: Windows Server 2019 + const: ws2019 + default: ws2019 + size: + title: Resource Size + type: string + oneOf: + - title: 'Micro [1vCPU|1GB]' + const: micro + - title: 'Tiny [1vCPU|2GB]' + const: tiny + - title: 'Small [2vCPU|2GB]' + const: small + default: small + network: + title: Network + type: string + adJoin: + title: Join to AD domain + type: boolean + default: true + environment: + type: string + title: Environment + oneOf: + - title: Development + const: D + - title: Testing + const: T + - title: Production + const: P + default: D + function: + type: string + title: Function Code + oneOf: + - title: Application (APP) + const: APP + - title: Desktop (DSK) + const: DSK + - title: Network (NET) + const: NET + - title: Service (SVS) + const: SVS + - title: Testing (TST) + const: TST + default: TST + app: + type: string + title: Application Code + minLength: 3 + maxLength: 3 + default: xxx + description: + type: string + title: Description + description: Server function/purpose + default: Testing and evaluation + poc_name: + type: string + title: Point of Contact Name + default: Jack Shephard + poc_email: + type: string + title: Point of Contact Email + default: jack.shephard@virtuallypotato.com + pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$' + ticket: + type: string + title: Ticket/Request Number + default: 4815162342 +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + site: '${input.site}' + environment: '${input.environment}' + function: '${input.function}' + app: '${input.app}' + ignoreActiveDirectory: '${!input.adJoin}' + activeDirectory: + relativeDN: '${"OU=Servers,OU=Computers,OU=" + input.site + ",OU=LAB"}' + customizationSpec: '${input.adJoin ? "vra-win-domain" : "vra-win-workgroup"}' + dnsDomain: lab.bowdre.net + poc: '${input.poc_name + " (" + input.poc_email + ")"}' + ticket: '${input.ticket}' + description: '${input.description}' + networks: + - network: '${resource.Cloud_vSphere_Network_1.id}' + assignment: static + constraints: + - tag: 'comp:${to_lower(input.site)}' + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: + networkType: existing + constraints: + - tag: 'net:${input.network}' +``` + +The last thing I need to do before leaving the Cloud Assembly interface is smash that **Version** button at the bottom of the cloud template editor so that the changes will be visible to Service Broker: +![New version](gOTzVawJE.png) + +### Service Broker custom form updates +... and the *first* thing to do after entering the Service Broker UI is to navigate to **Content Sources**, click on my Lab content source, and then click **Save & Import** to bring in the new version. I can then go to **Content**, click the little three-dot menu icon next to my `WindowsDemo` cloud template, and pick the **Customize form** option. + +This bit will be pretty quick. I just need to look for the new `Join to AD domain` element on the left: +![New element on left](Zz0D9wjYr.png) + +And drag-and-drop it onto the canvas in the middle. I'll stick it directly after the `Network` field: +![New element on the canvas](HHiShFlnT.png) + +I don't need to do anything else here since I'm not trying to do any fancy logic or anything, so I'll just hit **Save** and move on to... + +### Testing +Now to submit the request through Service Broker to see if this actually works: +![Submitting the request](20210721-test-deploy-request.png) + +After a few minutes, I can go into Cloud Assembly and navigate to **Extensibility > Activity > Actions Runs** and look at the **Integration Runs** to see if the `ad_machine` action has completed yet. +![Successful ad_machine action](20210721-successful-ad_machine.png) + +Looking good! And once the deployment completes, I can look at the VM in vCenter to see that it has registered a fully-qualified DNS name since it was automatically joined to the domain: +![Domain-joined VM](20210721-vm-joined.png) + +I can also repeat the test for a VM deployed to the `DRE` site just to confirm that it gets correctly placed in that site's OU: +![Another domain-joined VM](20210721-vm-joined-2.png) + +And I'll fire off another deployment with the `adJoin` box *unchecked* to test that I can also skip the AD configuration completely: +![VM not joined to the domain](20210721-vm-not-joined.png) + +### Conclusion +Confession time: I had actually started writing this posts weeks ago. At that point, my efforts to bend the built-in AD integration to my will had been fairly unsuccessful, so I was instead working on a custom vRO workflow to accomplish the same basic thing. I circled back to try the AD integration again after upgrading the vRA environment to the latest 8.4.2 release, and found that it actually works quite well now. So I happily scrapped my ~50 lines of messy vRO JavaScript in favor of *just three lines* of YAML in the cloud template. + +I love it when things work out! \ No newline at end of file diff --git a/content/post/k8s-on-vsphere-node-template-with-packer/index.md b/content/post/k8s-on-vsphere-node-template-with-packer/index.md new file mode 100644 index 0000000..9a3eb50 --- /dev/null +++ b/content/post/k8s-on-vsphere-node-template-with-packer/index.md @@ -0,0 +1,1314 @@ +--- +title: "K8s on vSphere: Building a Kubernetes Node Template With Packer" # Title of the blog post. +date: 2022-12-10T17:00:00-06:00 # Date of post creation. +# lastmod: 2022-12-03T10:41:17-08:00 # Date when last modified +description: "Using HashiCorp Packer to automatically build Kubernetes node templates on vSphere." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "thumbnail.jpg" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: K8s on vSphere +tags: + - vmware + - linux + - shell + - automation + - kubernetes + - containers + - infrastructure-as-code + - packer +comment: true # Disable comment if false. +--- +I've been leveraging the open-source Tanzu Community Edition Kubernetes distribution for a little while now, both [in my home lab](/tanzu-community-edition-k8s-homelab) and at work, so I was disappointed to learn that VMware was [abandoning the project](https://github.com/vmware-tanzu/community-edition). TCE had been a pretty good fit for my needs, and now I needed to search for a replacement. VMware is offering a free version of Tanzu Kubernetes Grid as a replacement, but it comes with a license solely for non-commercial use so I wouldn't be able to use it at work. And I'd really like to use the same solution in both environments to make development and testing easier on me. + +There are a bunch of great projects for running Kubernetes in development/lab environments, and others optimized for much larger enterprise environments, but I struggled to find a product that felt like a good fit for both in the way TCE was. My workloads are few and pretty simple so most enterprise K8s variants (Tanzu included) would feel like overkill, but I do need to ensure everything remains highly-available in the data centers at work. + +Plus, I thought it would be a fun learning experience to roll my own Kubernetes on vSphere! + +In the next couple of posts, I'll share the details of how I'm using Terraform to provision production-ready vanilla Kubernetes clusters on vSphere (complete with the vSphere Container Storage Interface plugin!) in a consistent and repeatable way. I also plan to document one of the ways I'm leveraging these clusters, which is using them as a part of a Gitlab CI/CD pipeline to churn out weekly VM template builds so I never again have to worry about my templates being out of date. + +I have definitely learned a ton in the process (and still have a lot more to learn), but today I'll start by describing how I'm leveraging Packer to create a single VM template ready to enter service as a Kubernetes compute node. + +## What's Packer, and why? +[HashiCorp Packer](https://www.packer.io/) is a free open-source tool designed to create consistent, repeatable machine images. It's pretty killer as a part of a CI/CD pipeline to kick off new builds based on a schedule or code commits, but also works great for creating builds on-demand. Packer uses the [HashiCorp Configuration Language (HCL)](https://developer.hashicorp.com/packer/docs/templates/hcl_templates) to describe all of the properties of a VM build in a concise and readable format. + +You might ask why I would bother with using a powerful tool like Packer if I'm just going to be building a single template. Surely I could just do that by hand, right? And of course, you'd be right - but using an Infrastructure as Code tool even for one-off builds has some pretty big advantages. + +- **It's fast.** Packer is able to build a complete VM (including pulling in all available OS and software updates) in just a few minutes, much faster than I could click through an installer on my own. +- **It's consistent.** Packer will follow the exact same steps for every build, removing the small variations (and typos!) that would surely show up if I did the builds manually. +- **It's great for testing changes.** Since Packer builds are so fast and consistent, it makes it incredibly easy to test changes as I go. I can be confident that the *only* changes between two builds will be the changes I deliberately introduced. +- **It's self-documenting.** The entire VM (and its guest OS) is described completely within the Packer HCL file(s), which I can review to remember which packages were installed, which user account(s) were created, what partition scheme was used, and anything else I might need to know. +- **It supports change tracking.** A Packer build is just a set of HCL files so it's easy to sync them with a version control system like Git to track (and revert) changes as needed. + +Packer is also extremely versatile, and a broad set of [external plugins](https://developer.hashicorp.com/packer/plugins) expand its capabilities to support creating machines for basically any environment. For my needs, I'll be utilizing the [vsphere-iso](https://developer.hashicorp.com/packer/plugins/builders/vsphere/vsphere-iso) builder which uses the vSphere API to remotely build VMs directly on the hypervisors. + +Sounds pretty cool, right? I'm not going to go too deep into "how to Packer" in this post, but HashiCorp does provide some [pretty good tutorials](https://developer.hashicorp.com/packer/tutorials) to help you get started. + +## Prerequisites +### Install Packer +Before being able to *use* Packer, you have to install it. On Debian/Ubuntu Linux, this process consists of adding the HashiCorp GPG key and software repository, and then simply installing the package: +```shell +curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - +sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +sudo apt-get update && sudo apt-get install packer +``` + +You can learn how to install Packer on other systems by following [this tutorial from HashiCorp](https://developer.hashicorp.com/packer/tutorials/docker-get-started/get-started-install-cli). + +### Configure privileges +Packer will need a user account with sufficient privileges in the vSphere environment to be able to create and manage a VM. I'd recommend using an account dedicated to automation tasks, and assigning it the required privileges listed in [the `vsphere-iso` documentation](https://developer.hashicorp.com/packer/plugins/builders/vsphere/vsphere-iso#required-vsphere-privileges). + +### Gather installation media +My Kubernetes node template will use Ubuntu 20.04 LTS as the OS so I'll go ahead and download the [server installer ISO](https://releases.ubuntu.com/20.04.5/) and upload it to a vSphere datastore to make it available to Packer. + +## Template build +After the OS is installed and minimimally configured, I'll need to add in Kubernetes components like `containerd`, `kubectl`, `kubelet`, and `kubeadm`, and then apply a few additional tweaks to get it fully ready. + +You can see the entirety of my Packer configuration [on GitHub](https://github.com/jbowdre/vsphere-k8s/tree/main/packer), but I'll talk through each file as we go along. + +### File/folder layout +After quite a bit of experimentation, I've settled on a preferred way to organize my Packer build files. I've found that this structure makes the builds modular enough that it's easy to reuse components in other builds, but still consolidated enough to be easily manageable. This layout is, of course, largely subjective - it's just what works well *for me*: +``` +. +├── certs +│   ├── ca.cer +├── data +│   ├── meta-data +│   └── user-data.pkrtpl.hcl +├── scripts +│   ├── cleanup-cloud-init.sh +│   ├── cleanup-subiquity.sh +│   ├── configure-sshd.sh +│   ├── disable-multipathd.sh +│   ├── disable-release-upgrade-motd.sh +│   ├── enable-vmware-customization.sh +│   ├── generalize.sh +│   ├── install-ca-certs.sh +│   ├── install-k8s.sh +│   ├── persist-cloud-init-net.sh +│   ├── update-packages.sh +│   ├── wait-for-cloud-init.sh +│   └── zero-disk.sh +├── ubuntu-k8s.auto.pkrvars.hcl +├── ubuntu-k8s.pkr.hcl +└── variables.pkr.hcl +``` + +- The `certs` folder holds the Base64-encoded PEM-formatted certificate of my [internal Certificate Authority](/ldaps-authentication-tanzu-community-edition/#prequisite) which will be automatically installed in the provisioned VM's trusted certificate store. +- The `data` folder stores files for [generating the `cloud-init` configuration](#user-datapkrtplhcl) that will automate the OS installation and configuration. +- The `scripts` directory holds a [collection of scripts](#post_install_scripts) used for post-install configuration tasks. Sure, I could just use a single large script, but using a bunch of smaller ones helps keep things modular and easy to reuse elsewhere. +- `variables.pkr.hcl` declares [all of the variables](#variablespkrhcl) which will be used in the Packer build, and sets the default values for some of them. +- `ubuntu-k8s.auto.pkrvars.hcl` [assigns values](#ubuntu-k8sautopkrvarshcl) to those variables. This is where most of the user-facing options will be configured, such as usernames, passwords, and environment settings. +- `ubuntu-k8s.pkr.hcl` is where the [build process](#ubuntu-k8spkrhcl) is actually described. + +Let's quickly run through that build process, and then I'll back up and examine some other components in detail. + +### `ubuntu-k8s.pkr.hcl` +#### `packer` block +The first block in the file tells Packer about the minimum version requirements for Packer as well as the external plugins used for the build: +``` +// BLOCK: packer +// The Packer configuration. +packer { + required_version = ">= 1.8.2" + required_plugins { + vsphere = { + version = ">= 1.0.8" + source = "github.com/hashicorp/vsphere" + } + sshkey = { + version = ">= 1.0.3" + source = "github.com/ivoronin/sshkey" + } + } +} +``` +As I mentioned above, I'll be using the official [`vsphere` plugin](https://github.com/hashicorp/packer-plugin-vsphere) to handle the provisioning on my vSphere environment. I'll also make use of the [`sshkey` plugin](https://github.com/ivoronin/packer-plugin-sshkey) to dynamically generate SSH keys for the build process. + +#### `data` block +This section would be used for loading information from various data sources, but I'm only using it for the `sshkey` plugin (as mentioned above). +```text +// BLOCK: data +// Defines data sources. +data "sshkey" "install" { + type = "ed25519" + name = "packer_key" +} +``` + +This will generate an ECDSA keypair, and the public key will include the identifier `packer_key` to make it easier to manage later on. Using this plugin to generate keys means that I don't have to worry about storing a private key somewhere in the build directory. + +#### `locals` block +Locals are a type of Packer variable which aren't explicitly declared in the `variables.pkr.hcl` file. They only exist within the context of a single build (hence the "local" name). Typical Packer variables are static and don't support string manipulation; locals, however, do support expressions that can be used to change their value on the fly. This makes them very useful when you need to combine variables into a single string or concatenate lists of SSH public keys (such as in the highlighted lines): +```text {hl_lines=[10,17]} +// BLOCK: locals +// Defines local variables. +locals { + ssh_public_key = data.sshkey.install.public_key + ssh_private_key_file = data.sshkey.install.private_key_path + build_tool = "HashiCorp Packer ${packer.version}" + build_date = formatdate("YYYY-MM-DD hh:mm ZZZ", timestamp()) + build_description = "Kubernetes Ubuntu 20.04 Node template\nBuild date: ${local.build_date}\nBuild tool: ${local.build_tool}" + iso_paths = ["[${var.common_iso_datastore}] ${var.iso_path}/${var.iso_file}"] + iso_checksum = "${var.iso_checksum_type}:${var.iso_checksum_value}" + data_source_content = { + "/meta-data" = file("data/meta-data") + "/user-data" = templatefile("data/user-data.pkrtpl.hcl", { + build_username = var.build_username + build_password = bcrypt(var.build_password) + ssh_keys = concat([local.ssh_public_key], var.ssh_keys) + vm_guest_os_language = var.vm_guest_os_language + vm_guest_os_keyboard = var.vm_guest_os_keyboard + vm_guest_os_timezone = var.vm_guest_os_timezone + vm_guest_os_hostname = var.vm_name + apt_mirror = var.cloud_init_apt_mirror + apt_packages = var.cloud_init_apt_packages + }) + } +} +``` + +This block also makes use of the built-in [`templatefile()` function](https://developer.hashicorp.com/packer/docs/templates/hcl_templates/functions/file/templatefile) to insert build-specific variables into the `user-data` file for [`cloud-init`](https://cloud-init.io/) (more on that in a bit). + +#### `source` block +The `source` block tells the `vsphere-iso` builder how to connect to vSphere, what hardware specs to set on the VM, and what to do with the VM once the build has finished (convert it to template, export it to OVF, and so on). + +You'll notice that most of this is just mapping user-defined variables (with the `var.` prefix) to properties used by `vsphere-iso`: + +```text +// BLOCK: source +// Defines the builder configuration blocks. +source "vsphere-iso" "ubuntu-k8s" { + + // vCenter Server Endpoint Settings and Credentials + vcenter_server = var.vsphere_endpoint + username = var.vsphere_username + password = var.vsphere_password + insecure_connection = var.vsphere_insecure_connection + + // vSphere Settings + datacenter = var.vsphere_datacenter + cluster = var.vsphere_cluster + datastore = var.vsphere_datastore + folder = var.vsphere_folder + + // Virtual Machine Settings + vm_name = var.vm_name + vm_version = var.common_vm_version + guest_os_type = var.vm_guest_os_type + firmware = var.vm_firmware + CPUs = var.vm_cpu_count + cpu_cores = var.vm_cpu_cores + CPU_hot_plug = var.vm_cpu_hot_add + RAM = var.vm_mem_size + RAM_hot_plug = var.vm_mem_hot_add + cdrom_type = var.vm_cdrom_type + remove_cdrom = var.common_remove_cdrom + disk_controller_type = var.vm_disk_controller_type + storage { + disk_size = var.vm_disk_size + disk_thin_provisioned = var.vm_disk_thin_provisioned + } + network_adapters { + network = var.vsphere_network + network_card = var.vm_network_card + } + tools_upgrade_policy = var.common_tools_upgrade_policy + notes = local.build_description + configuration_parameters = { + "devices.hotplug" = "FALSE" + } + + // Removable Media Settings + iso_url = var.iso_url + iso_paths = local.iso_paths + iso_checksum = local.iso_checksum + cd_content = local.data_source_content + cd_label = var.cd_label + + // Boot and Provisioning Settings + boot_order = var.vm_boot_order + boot_wait = var.vm_boot_wait + boot_command = var.vm_boot_command + ip_wait_timeout = var.common_ip_wait_timeout + shutdown_command = var.vm_shutdown_command + shutdown_timeout = var.common_shutdown_timeout + + // Communicator Settings and Credentials + communicator = "ssh" + ssh_username = var.build_username + ssh_private_key_file = local.ssh_private_key_file + ssh_clear_authorized_keys = var.build_remove_keys + ssh_port = var.communicator_port + ssh_timeout = var.communicator_timeout + + // Snapshot Settings + create_snapshot = var.common_snapshot_creation + snapshot_name = var.common_snapshot_name + + // Template and Content Library Settings + convert_to_template = var.common_template_conversion + dynamic "content_library_destination" { + for_each = var.common_content_library_name != null ? [1] : [] + content { + library = var.common_content_library_name + description = local.build_description + ovf = var.common_content_library_ovf + destroy = var.common_content_library_destroy + skip_import = var.common_content_library_skip_export + } + } + + // OVF Export Settings + dynamic "export" { + for_each = var.common_ovf_export_enabled == true ? [1] : [] + content { + name = var.vm_name + force = var.common_ovf_export_overwrite + options = [ + "extraconfig" + ] + output_directory = "${var.common_ovf_export_path}/${var.vm_name}" + } + } +} +``` + +#### `build` block +This block brings everything together and executes the build. It calls the `source.vsphere-iso.ubuntu-k8s` block defined above, and also ties in a `file` and a few `shell` provisioners. `file` provisioners are used to copy files (like SSL CA certificates) into the VM, while the `shell` provisioners run commands and execute scripts. Those will be handy for the post-deployment configuration tasks, like updating and installing packages. + +```text +// BLOCK: build +// Defines the builders to run, provisioners, and post-processors. +build { + sources = [ + "source.vsphere-iso.ubuntu-k8s" + ] + + provisioner "file" { + source = "certs" + destination = "/tmp" + } + + provisioner "shell" { + execute_command = "export KUBEVERSION=${var.k8s_version}; bash {{ .Path }}" + expect_disconnect = true + environment_vars = [ + "KUBEVERSION=${var.k8s_version}" + ] + scripts = var.post_install_scripts + } + + provisioner "shell" { + execute_command = "bash {{ .Path }}" + expect_disconnect = true + scripts = var.pre_final_scripts + } +} +``` + +So you can see that the `ubuntu-k8s.pkr.hcl` file primarily focuses on the structure and form of the build, and it's written in such a way that it can be fairly easily adapted for building other types of VMs. Very few things in this file would have to be changed since so many of the properties are derived from the variables. + +You can view the full file [here](https://github.com/jbowdre/vsphere-k8s/blob/main/packer/ubuntu-k8s.pkr.hcl). + +### `variables.pkr.hcl` +Before looking at the build-specific variable definitions, let's take a quick look at the variables I've told Packer that I intend to use. After all, Packer requires that variables be declared before they can be used. + +Most of these carry descriptions with them so I won't restate them outside of the code block here: + +```text +/* + DESCRIPTION: + Ubuntu Server 20.04 LTS variables using the Packer Builder for VMware vSphere (vsphere-iso). +*/ + +// BLOCK: variable +// Defines the input variables. + +// vSphere Credentials +variable "vsphere_endpoint" { + type = string + description = "The fully qualified domain name or IP address of the vCenter Server instance. ('vcenter.lab.local')" +} + +variable "vsphere_username" { + type = string + description = "The username to login to the vCenter Server instance. ('packer')" + sensitive = true +} + +variable "vsphere_password" { + type = string + description = "The password for the login to the vCenter Server instance." + sensitive = true +} + +variable "vsphere_insecure_connection" { + type = bool + description = "Do not validate vCenter Server TLS certificate." + default = true +} + +// vSphere Settings +variable "vsphere_datacenter" { + type = string + description = "The name of the target vSphere datacenter. ('Lab Datacenter')" +} + +variable "vsphere_cluster" { + type = string + description = "The name of the target vSphere cluster. ('cluster-01')" +} + +variable "vsphere_datastore" { + type = string + description = "The name of the target vSphere datastore. ('datastore-01')" +} + +variable "vsphere_network" { + type = string + description = "The name of the target vSphere network. ('network-192.168.1.0')" +} + +variable "vsphere_folder" { + type = string + description = "The name of the target vSphere folder. ('_Templates')" +} + +// Virtual Machine Settings +variable "vm_name" { + type = string + description = "Name of the new VM to create." +} + +variable "vm_guest_os_language" { + type = string + description = "The guest operating system lanugage." + default = "en_US" +} + +variable "vm_guest_os_keyboard" { + type = string + description = "The guest operating system keyboard input." + default = "us" +} + +variable "vm_guest_os_timezone" { + type = string + description = "The guest operating system timezone." + default = "UTC" +} + +variable "vm_guest_os_type" { + type = string + description = "The guest operating system type. ('ubuntu64Guest')" +} + +variable "vm_firmware" { + type = string + description = "The virtual machine firmware. ('efi-secure'. 'efi', or 'bios')" + default = "efi-secure" +} + +variable "vm_cdrom_type" { + type = string + description = "The virtual machine CD-ROM type. ('sata', or 'ide')" + default = "sata" +} + +variable "vm_cpu_count" { + type = number + description = "The number of virtual CPUs. ('2')" +} + +variable "vm_cpu_cores" { + type = number + description = "The number of virtual CPUs cores per socket. ('1')" +} + +variable "vm_cpu_hot_add" { + type = bool + description = "Enable hot add CPU." + default = true +} + +variable "vm_mem_size" { + type = number + description = "The size for the virtual memory in MB. ('2048')" +} + +variable "vm_mem_hot_add" { + type = bool + description = "Enable hot add memory." + default = true +} + +variable "vm_disk_size" { + type = number + description = "The size for the virtual disk in MB. ('61440' = 60GB)" + default = 61440 +} + +variable "vm_disk_controller_type" { + type = list(string) + description = "The virtual disk controller types in sequence. ('pvscsi')" + default = ["pvscsi"] +} + +variable "vm_disk_thin_provisioned" { + type = bool + description = "Thin provision the virtual disk." + default = true +} + +variable "vm_disk_eagerly_scrub" { + type = bool + description = "Enable VMDK eager scrubbing for VM." + default = false +} + +variable "vm_network_card" { + type = string + description = "The virtual network card type. ('vmxnet3' or 'e1000e')" + default = "vmxnet3" +} + +variable "common_vm_version" { + type = number + description = "The vSphere virtual hardware version. (e.g. '19')" +} + +variable "common_tools_upgrade_policy" { + type = bool + description = "Upgrade VMware Tools on reboot." + default = true +} + +variable "common_remove_cdrom" { + type = bool + description = "Remove the virtual CD-ROM(s)." + default = true +} + +// Template and Content Library Settings +variable "common_template_conversion" { + type = bool + description = "Convert the virtual machine to template. Must be 'false' for content library." + default = false +} + +variable "common_content_library_name" { + type = string + description = "The name of the target vSphere content library, if used. ('Lab-CL')" + default = null +} + +variable "common_content_library_ovf" { + type = bool + description = "Export to content library as an OVF template." + default = false +} + +variable "common_content_library_destroy" { + type = bool + description = "Delete the virtual machine after exporting to the content library." + default = true +} + +variable "common_content_library_skip_export" { + type = bool + description = "Skip exporting the virtual machine to the content library. Option allows for testing / debugging without saving the machine image." + default = false +} + +// Snapshot Settings +variable "common_snapshot_creation" { + type = bool + description = "Create a snapshot for Linked Clones." + default = false +} + +variable "common_snapshot_name" { + type = string + description = "Name of the snapshot to be created if create_snapshot is true." + default = "Created By Packer" +} + +// OVF Export Settings +variable "common_ovf_export_enabled" { + type = bool + description = "Enable OVF artifact export." + default = false +} + +variable "common_ovf_export_overwrite" { + type = bool + description = "Overwrite existing OVF artifact." + default = true +} + +variable "common_ovf_export_path" { + type = string + description = "Folder path for the OVF export." +} + +// Removable Media Settings +variable "common_iso_datastore" { + type = string + description = "The name of the source vSphere datastore for ISO images. ('datastore-iso-01')" +} + +variable "iso_url" { + type = string + description = "The URL source of the ISO image. ('https://releases.ubuntu.com/20.04.5/ubuntu-20.04.5-live-server-amd64.iso')" +} + +variable "iso_path" { + type = string + description = "The path on the source vSphere datastore for ISO image. ('ISOs/Linux')" +} + +variable "iso_file" { + type = string + description = "The file name of the ISO image used by the vendor. ('ubuntu-20.04.5-live-server-amd64.iso')" +} + +variable "iso_checksum_type" { + type = string + description = "The checksum algorithm used by the vendor. ('sha256')" +} + +variable "iso_checksum_value" { + type = string + description = "The checksum value provided by the vendor." +} + +variable "cd_label" { + type = string + description = "CD Label" + default = "cidata" +} + +// Boot Settings +variable "vm_boot_order" { + type = string + description = "The boot order for virtual machines devices. ('disk,cdrom')" + default = "disk,cdrom" +} + +variable "vm_boot_wait" { + type = string + description = "The time to wait before boot." +} + +variable "vm_boot_command" { + type = list(string) + description = "The virtual machine boot command." + default = [] +} + +variable "vm_shutdown_command" { + type = string + description = "Command(s) for guest operating system shutdown." + default = null +} + +variable "common_ip_wait_timeout" { + type = string + description = "Time to wait for guest operating system IP address response." +} + +variable "common_shutdown_timeout" { + type = string + description = "Time to wait for guest operating system shutdown." +} + +// Communicator Settings and Credentials +variable "build_username" { + type = string + description = "The username to login to the guest operating system. ('admin')" +} + +variable "build_password" { + type = string + description = "The password to login to the guest operating system." + sensitive = true +} + +variable "build_password_encrypted" { + type = string + description = "The encrypted password to login the guest operating system." + sensitive = true + default = null +} + +variable "ssh_keys" { + type = list(string) + description = "List of public keys to be added to ~/.ssh/authorized_keys." + sensitive = true + default = [] +} + +variable "build_remove_keys" { + type = bool + description = "If true, Packer will attempt to remove its temporary key from ~/.ssh/authorized_keys and /root/.ssh/authorized_keys" + default = true +} + +// Communicator Settings +variable "communicator_port" { + type = string + description = "The port for the communicator protocol." +} + +variable "communicator_timeout" { + type = string + description = "The timeout for the communicator protocol." +} + +variable "communicator_insecure" { + type = bool + description = "If true, do not check server certificate chain and host name" + default = true +} + +variable "communicator_ssl" { + type = bool + description = "If true, use SSL" + default = true +} + +// Provisioner Settings +variable "cloud_init_apt_packages" { + type = list(string) + description = "A list of apt packages to install during the subiquity cloud-init installer." + default = [] +} + +variable "cloud_init_apt_mirror" { + type = string + description = "Sets the default apt mirror during the subiquity cloud-init installer." + default = "" +} + +variable "post_install_scripts" { + type = list(string) + description = "A list of scripts and their relative paths to transfer and run after OS install." + default = [] +} + +variable "pre_final_scripts" { + type = list(string) + description = "A list of scripts and their relative paths to transfer and run before finalization." + default = [] +} + +// Kubernetes Settings +variable "k8s_version" { + type = string + description = "Kubernetes version to be installed. Latest stable is listed at https://dl.k8s.io/release/stable.txt" + default = "1.25.3" +} +``` + +The full `variables.pkr.hcl` can be viewed [here](https://github.com/jbowdre/vsphere-k8s/blob/main/packer/variables.pkr.hcl). + +### `ubuntu-k8s.auto.pkrvars.hcl` +Packer automatically knows to load variables defined in files ending in `*.auto.pkrvars.hcl`. Storing the variable values separately from the declarations in `variables.pkr.hcl` makes it easier to protect sensitive values. + +So I'll start by telling Packer what credentials to use for connecting to vSphere, and what vSphere resources to deploy to: +```text +/* + DESCRIPTION: + Ubuntu Server 20.04 LTS Kubernetes node variables used by the Packer Plugin for VMware vSphere (vsphere-iso). +*/ + +// vSphere Credentials +vsphere_endpoint = "vcsa.lab.bowdre.net" +vsphere_username = "packer" +vsphere_password = "VMware1!" +vsphere_insecure_connection = true + +// vSphere Settings +vsphere_datacenter = "NUC Site" +vsphere_cluster = "nuc-cluster" +vsphere_datastore = "nuchost-local" +vsphere_network = "MGT-Home 192.168.1.0" +vsphere_folder = "_Templates" +``` + +I'll then describe the properties of the VM itself: +```text +// Guest Operating System Settings +vm_guest_os_language = "en_US" +vm_guest_os_keyboard = "us" +vm_guest_os_timezone = "America/Chicago" +vm_guest_os_type = "ubuntu64Guest" + +// Virtual Machine Hardware Settings +vm_name = "k8s-u2004" +vm_firmware = "efi-secure" +vm_cdrom_type = "sata" +vm_cpu_count = 2 +vm_cpu_cores = 1 +vm_cpu_hot_add = true +vm_mem_size = 2048 +vm_mem_hot_add = true +vm_disk_size = 30720 +vm_disk_controller_type = ["pvscsi"] +vm_disk_thin_provisioned = true +vm_network_card = "vmxnet3" +common_vm_version = 19 +common_tools_upgrade_policy = true +common_remove_cdrom = true +``` + +Then I'll configure Packer to convert the VM to a template once the build is finished: +```text +// Template and Content Library Settings +common_template_conversion = true +common_content_library_name = null +common_content_library_ovf = false +common_content_library_destroy = true +common_content_library_skip_export = true + +// OVF Export Settings +common_ovf_export_enabled = false +common_ovf_export_overwrite = true +common_ovf_export_path = "" +``` + +Next, I'll tell it where to find the Ubuntu 20.04 ISO I downloaded and placed on a datastore, along with the SHA256 checksum to confirm its integrity: +```text +// Removable Media Settings +common_iso_datastore = "nuchost-local" +iso_url = null +iso_path = "_ISO" +iso_file = "ubuntu-20.04.5-live-server-amd64.iso" +iso_checksum_type = "sha256" +iso_checksum_value = "5035be37a7e9abbdc09f0d257f3e33416c1a0fb322ba860d42d74aa75c3468d4" +``` + +And then I'll specify the VM's boot device order, as well as the boot command that will be used for loading the `cloud-init` coniguration into the Ubuntu installer: +```text +// Boot Settings +vm_boot_order = "disk,cdrom" +vm_boot_wait = "4s" +vm_boot_command = [ + "", + "linux /casper/vmlinuz --- autoinstall ds=\"nocloud\"", + "", + "initrd /casper/initrd", + "", + "boot", + "" + ] +``` + +Once the installer is booted and running, Packer will wait until the VM is available via SSH and then use these credentials to log in. (How will it be able to log in with those creds? We'll take a look at the `cloud-init` configuration in just a minute...) + +```text +// Communicator Settings +communicator_port = 22 +communicator_timeout = "20m" +common_ip_wait_timeout = "20m" +common_shutdown_timeout = "15m" +vm_shutdown_command = "sudo /usr/sbin/shutdown -P now" +build_remove_keys = false +build_username = "admin" +build_password = "VMware1!" +ssh_keys = [ + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOpLvpxilPjpCahAQxs4RQgv+Lb5xObULXtwEoimEBpA builder" +] +``` + +Finally, I'll create two lists of scripts that will be run on the VM once the OS install is complete. The `post_install_scripts` will be run immediately after the operating system installation. The `update-packages.sh` script will cause a reboot, and then the set of `pre_final_scripts` will do some cleanup and prepare the VM to be converted to a template. + +The last bit of this file also designates the desired version of Kubernetes to be installed. +```text +// Provisioner Settings +post_install_scripts = [ + "scripts/wait-for-cloud-init.sh", + "scripts/cleanup-subiquity.sh", + "scripts/install-ca-certs.sh", + "scripts/disable-multipathd.sh", + "scripts/disable-release-upgrade-motd.sh", + "scripts/persist-cloud-init-net.sh", + "scripts/configure-sshd.sh", + "scripts/install-k8s.sh", + "scripts/update-packages.sh" +] + +pre_final_scripts = [ + "scripts/cleanup-cloud-init.sh", + "scripts/enable-vmware-customization.sh", + "scripts/zero-disk.sh", + "scripts/generalize.sh" +] + +// Kubernetes Settings +k8s_version = "1.25.3" +``` + +You can find an full example of this file [here](https://github.com/jbowdre/vsphere-k8s/blob/main/packer/ubuntu-k8s.example.pkrvars.hcl). + +### `user-data.pkrtpl.hcl` +Okay, so we've covered the Packer framework that creates the VM; now let's take a quick look at the `cloud-init` configuration that will allow the OS installation to proceed unattended. + +See the bits that look `${ like_this }`? Those place-holders will take input from the [`locals` block of `ubuntu-k8s.pkr.hcl`](#locals-block) mentioned above. So that's how all the OS properties will get set, including the hostname, locale, LVM partition layout, username, password, and SSH keys. + +```yaml +#cloud-config +autoinstall: + version: 1 + early-commands: + - sudo systemctl stop ssh + locale: ${ vm_guest_os_language } + keyboard: + layout: ${ vm_guest_os_keyboard } + network: + network: + version: 2 + ethernets: + mainif: + match: + name: e* + critical: true + dhcp4: true + dhcp-identifier: mac + ssh: + install-server: true + allow-pw: true +%{ if length( apt_mirror ) > 0 ~} + apt: + primary: + - arches: [default] + uri: "${ apt_mirror }" +%{ endif ~} +%{ if length( apt_packages ) > 0 ~} + packages: +%{ for package in apt_packages ~} + - ${ package } +%{ endfor ~} +%{ endif ~} + storage: + config: + - ptable: gpt + path: /dev/sda + wipe: superblock + type: disk + id: disk-sda + - device: disk-sda + size: 1024M + wipe: superblock + flag: boot + number: 1 + grub_device: true + type: partition + id: partition-0 + - fstype: fat32 + volume: partition-0 + label: EFIFS + type: format + id: format-efi + - device: disk-sda + size: 1024M + wipe: superblock + number: 2 + type: partition + id: partition-1 + - fstype: xfs + volume: partition-1 + label: BOOTFS + type: format + id: format-boot + - device: disk-sda + size: -1 + wipe: superblock + number: 3 + type: partition + id: partition-2 + - name: sysvg + devices: + - partition-2 + type: lvm_volgroup + id: lvm_volgroup-0 + - name: home + volgroup: lvm_volgroup-0 + size: 4096M + wipe: superblock + type: lvm_partition + id: lvm_partition-home + - fstype: xfs + volume: lvm_partition-home + type: format + label: HOMEFS + id: format-home + - name: tmp + volgroup: lvm_volgroup-0 + size: 3072M + wipe: superblock + type: lvm_partition + id: lvm_partition-tmp + - fstype: xfs + volume: lvm_partition-tmp + type: format + label: TMPFS + id: format-tmp + - name: var + volgroup: lvm_volgroup-0 + size: 4096M + wipe: superblock + type: lvm_partition + id: lvm_partition-var + - fstype: xfs + volume: lvm_partition-var + type: format + label: VARFS + id: format-var + - name: log + volgroup: lvm_volgroup-0 + size: 4096M + wipe: superblock + type: lvm_partition + id: lvm_partition-log + - fstype: xfs + volume: lvm_partition-log + type: format + label: LOGFS + id: format-log + - name: audit + volgroup: lvm_volgroup-0 + size: 4096M + wipe: superblock + type: lvm_partition + id: lvm_partition-audit + - fstype: xfs + volume: lvm_partition-audit + type: format + label: AUDITFS + id: format-audit + - name: root + volgroup: lvm_volgroup-0 + size: -1 + wipe: superblock + type: lvm_partition + id: lvm_partition-root + - fstype: xfs + volume: lvm_partition-root + type: format + label: ROOTFS + id: format-root + - path: / + device: format-root + type: mount + id: mount-root + - path: /boot + device: format-boot + type: mount + id: mount-boot + - path: /boot/efi + device: format-efi + type: mount + id: mount-efi + - path: /home + device: format-home + type: mount + id: mount-home + - path: /tmp + device: format-tmp + type: mount + id: mount-tmp + - path: /var + device: format-var + type: mount + id: mount-var + - path: /var/log + device: format-log + type: mount + id: mount-log + - path: /var/log/audit + device: format-audit + type: mount + id: mount-audit + user-data: + package_upgrade: true + disable_root: true + timezone: ${ vm_guest_os_timezone } + hostname: ${ vm_guest_os_hostname } + users: + - name: ${ build_username } + passwd: "${ build_password }" + groups: [adm, cdrom, dip, plugdev, lxd, sudo] + lock-passwd: false + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash +%{ if length( ssh_keys ) > 0 ~} + ssh_authorized_keys: +%{ for ssh_key in ssh_keys ~} + - ${ ssh_key } +%{ endfor ~} +%{ endif ~} +``` + +View the full file [here](https://github.com/jbowdre/vsphere-k8s/blob/main/packer/data/user-data.pkrtpl.hcl). (The `meta-data` file is [empty](https://github.com/jbowdre/vsphere-k8s/blob/main/packer/data/meta-data), by the way.) + + +### `post_install_scripts` +After the OS install is completed, the `shell` provisioner will connect to the VM through SSH and run through some tasks. Remember how I keep talking about this build being modular? That goes down to the scripts, too, so I can use individual pieces in other builds without needing to do a lot of tweaking. + +You can find all of the scripts [here](https://github.com/jbowdre/vsphere-k8s/tree/main/packer/scripts). + +#### `wait-for-cloud-init.sh` +This simply holds up the process until the `/var/lib/cloud//instance/boot-finished` file has been created, signifying the completion of the `cloud-init` process: +```shell +#!/bin/bash -eu +echo '>> Waiting for cloud-init...' +while [ ! -f /var/lib/cloud/instance/boot-finished ]; do + sleep 1 +done +``` + +#### `cleanup-subiquity.sh` +Next I clean up any network configs that may have been created during the install process: +```shell +#!/bin/bash -eu +if [ -f /etc/cloud/cloud.cfg.d/99-installer.cfg ]; then + sudo rm /etc/cloud/cloud.cfg.d/99-installer.cfg + echo 'Deleting subiquity cloud-init config' +fi + +if [ -f /etc/cloud/cloud.cfg.d/subiquity-disable-cloudinit-networking.cfg ]; then + sudo rm /etc/cloud/cloud.cfg.d/subiquity-disable-cloudinit-networking.cfg + echo 'Deleting subiquity cloud-init network config' +fi +``` + +#### `install-ca-certs.sh` +The [`file` provisioner](#build-block) mentioned above helpfully copied my custom CA certs to the `/tmp/certs/` folder on the VM; this script will install them into the certificate store: +```shell +#!/bin/bash -eu +echo '>> Installing custom certificates...' +sudo cp /tmp/certs/* /usr/local/share/ca-certificates/ +cd /usr/local/share/ca-certificates/ +for file in *.cer; do + sudo mv -- "$file" "${file%.cer}.crt" +done +sudo /usr/sbin/update-ca-certificates +``` + +#### `disable-multipathd.sh` +This disables `multipathd`: +```shell +#!/bin/bash -eu +sudo systemctl disable multipathd +echo 'Disabling multipathd' +``` + +#### `disable-release-upgrade-motd.sh` +And this one disable the release upgrade notices that would otherwise be displayed upon each login: +```shell +#!/bin/bash -eu +echo '>> Disabling release update MOTD...' +sudo chmod -x /etc/update-motd.d/91-release-upgrade +``` + +#### `persist-cloud-init-net.sh` +I want to make sure that this VM keeps the same IP address following the reboot that will come in a few minutes, so I 'll set a quick `cloud-init` option to help make sure that happens: +```shell +#!/bin/sh -eu +echo '>> Preserving network settings...' +echo 'manual_cache_clean: True' | sudo tee -a /etc/cloud/cloud.cfg +``` + +#### `configure-sshd.sh` +Then I just set a few options for the `sshd` configuration, like disabling root login: + +```shell +#!/bin/bash -eu +echo '>> Configuring SSH' +sudo sed -i 's/.*PermitRootLogin.*/PermitRootLogin no/' /etc/ssh/sshd_config +sudo sed -i 's/.*PubkeyAuthentication.*/PubkeyAuthentication yes/' /etc/ssh/sshd_config +sudo sed -i 's/.*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config +``` + +#### `install-k8s.sh` +This script is a little longer and takes care of all the Kubernetes-specific settings and packages that will need to be installed on the VM. + +First I enable the required `overlay` and `br_netfilter` modules: +```shell +#!/bin/bash -eu +echo ">> Installing Kubernetes components..." + +# Configure and enable kernel modules +echo ".. configure kernel modules" +cat << EOF | sudo tee /etc/modules-load.d/containerd.conf +overlay +br_netfilter +EOF + +sudo modprobe overlay +sudo modprobe br_netfilter +``` + +Then I'll make some networking tweaks to enable forwarding and bridging: +```shell +# Configure networking +echo ".. configure networking" +cat << EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf +net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.ip_forward = 1 +net.bridge.bridge-nf-call-ip6tables = 1 +EOF + +sudo sysctl --system +``` + +Next, set up `containerd` as the container runtime: +```shell +# Setup containerd +echo ".. setup containerd" +sudo apt-get update && sudo apt-get install -y containerd apt-transport-https jq +sudo mkdir -p /etc/containerd +sudo containerd config default | sudo tee /etc/containerd/config.toml +sudo systemctl restart containerd +``` + +Then disable swap: +```shell +# Disable swap +echo ".. disable swap" +sudo sed -i '/[[:space:]]swap[[:space:]]/ s/^\(.*\)$/#\1/g' /etc/fstab +sudo swapoff -a +``` + +Next I'll install the Kubernetes components and (crucially) `apt-mark hold` them so they won't be automatically upgraded without it being a coordinated change: +```shell +# Install Kubernetes +echo ".. install kubernetes version ${KUBEVERSION}" +sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg +echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list +sudo apt-get update && sudo apt-get install -y kubelet="${KUBEVERSION}"-00 kubeadm="${KUBEVERSION}"-00 kubectl="${KUBEVERSION}"-00 +sudo apt-mark hold kubelet kubeadm kubectl +``` + +#### `update-packages.sh` +Lastly, I'll be sure to update all installed packages (excepting the Kubernetes ones, of course), and then perform a reboot to make sure that any new kernel modules get loaded: +```shell +#!/bin/bash -eu +echo '>> Checking for and installing updates...' +sudo apt-get update && sudo apt-get -y upgrade +echo '>> Rebooting!' +sudo reboot +``` + +### `pre_final_scripts` +After the reboot, all that's left are some cleanup tasks to get the VM ready to be converted to a template and subsequently cloned and customized. + +#### `cleanup-cloud-init.sh` +I'll start with cleaning up the `cloud-init` state: +```shell +#!/bin/bash -eu +echo '>> Cleaning up cloud-init state...' +sudo cloud-init clean -l +``` + +#### `enable-vmware-customization.sh` +And then be (re)enable the ability for VMware to be able to customize the guest successfully: +```shell +#!/bin/bash -eu +echo '>> Enabling legacy VMware Guest Customization...' +echo 'disable_vmware_customization: true' | sudo tee -a /etc/cloud/cloud.cfg +sudo vmware-toolbox-cmd config set deployPkg enable-custom-scripts true +``` + +#### `zero-disk.sh` +I'll also execute this handy script to free up unused space on the virtual disk. It works by creating a file which completely fills up the disk, and then deleting that file: +```shell +#!/bin/bash -eu +echo '>> Zeroing free space to reduce disk size' +sudo sh -c 'dd if=/dev/zero of=/EMPTY bs=1M || true; sync; sleep 1; sync' +sudo sh -c 'rm -f /EMPTY; sync; sleep 1; sync' +``` + +#### `generalize.sh` +Lastly, let's do a final run of cleaning up logs, temporary files, and unique identifiers that don't need to exist in a template. This script will also remove the SSH key with the `packer_key` identifier since that won't be needed anymore. +```shell +#!/bin/bash -eu +# Prepare a VM to become a template. + +echo '>> Clearing audit logs...' +sudo sh -c 'if [ -f /var/log/audit/audit.log ]; then + cat /dev/null > /var/log/audit/audit.log + fi' +sudo sh -c 'if [ -f /var/log/wtmp ]; then + cat /dev/null > /var/log/wtmp + fi' +sudo sh -c 'if [ -f /var/log/lastlog ]; then + cat /dev/null > /var/log/lastlog + fi' +sudo sh -c 'if [ -f /etc/logrotate.conf ]; then + logrotate -f /etc/logrotate.conf 2>/dev/null + fi' +sudo rm -rf /var/log/journal/* +sudo rm -f /var/lib/dhcp/* +sudo find /var/log -type f -delete + +echo '>> Clearing persistent udev rules...' +sudo sh -c 'if [ -f /etc/udev/rules.d/70-persistent-net.rules ]; then + rm /etc/udev/rules.d/70-persistent-net.rules + fi' + +echo '>> Clearing temp dirs...' +sudo rm -rf /tmp/* +sudo rm -rf /var/tmp/* + +echo '>> Clearing host keys...' +sudo rm -f /etc/ssh/ssh_host_* + +echo '>> Removing Packer SSH key...' +sed -i '/packer_key/d' ~/.ssh/authorized_keys + +echo '>> Clearing machine-id...' +sudo truncate -s 0 /etc/machine-id +if [ -f /var/lib/dbus/machine-id ]; then + sudo rm -f /var/lib/dbus/machine-id + sudo ln -s /etc/machine-id /var/lib/dbus/machine-id +fi + +echo '>> Clearing shell history...' +unset HISTFILE +history -cw +echo > ~/.bash_history +sudo rm -f /root/.bash_history +``` + +### Kick out the jams (or at least the build) +Now that all the ducks are nicely lined up, let's give them some marching orders and see what happens. All I have to do is open a terminal session to the folder containing the `.pkr.hcl` files, and then run the Packer build command: + +```shell +packer packer build -on-error=abort -force . +``` + +{{% notice info "Flags" %}} +The `-on-error=abort` option makes sure that the build will abort if any steps in the build fail, and `-force` tells Packer to delete any existing VMs/templates with the same name as the one I'm attempting to build. +{{% /notice %}} + +And off we go! Packer will output details as it goes which makes it easy to troubleshoot if anything goes wrong. +![Packer build session in the terminal](packer_terminal_progress.jpg) + +In this case, though, everything works just fine, and I'm met with a happy "success" message! +![Packer build session complete!](packer_terminal_complete.jpg) + +And I can pop over to vSphere to confirm that everything looks right: +![The new template in vSphere](template_in_vsphere.png) + +## Next steps +My brand new `k8s-u2004` template is ready for use! In the next post, I'll walk through the process of *manually* cloning this template to create my Kubernetes nodes, initializing the cluster, and installing the vSphere integrations. After that process is sorted out nicely, we'll take a look at how to use Terraform to do it all automagically. Stay tuned! \ No newline at end of file diff --git a/content/post/k8s-on-vsphere-node-template-with-packer/packer_terminal_complete.jpg b/content/post/k8s-on-vsphere-node-template-with-packer/packer_terminal_complete.jpg new file mode 100644 index 0000000..ea253c9 Binary files /dev/null and b/content/post/k8s-on-vsphere-node-template-with-packer/packer_terminal_complete.jpg differ diff --git a/content/post/k8s-on-vsphere-node-template-with-packer/packer_terminal_progress.jpg b/content/post/k8s-on-vsphere-node-template-with-packer/packer_terminal_progress.jpg new file mode 100644 index 0000000..8df42e9 Binary files /dev/null and b/content/post/k8s-on-vsphere-node-template-with-packer/packer_terminal_progress.jpg differ diff --git a/content/post/k8s-on-vsphere-node-template-with-packer/template_in_vsphere.png b/content/post/k8s-on-vsphere-node-template-with-packer/template_in_vsphere.png new file mode 100644 index 0000000..0cfb33e Binary files /dev/null and b/content/post/k8s-on-vsphere-node-template-with-packer/template_in_vsphere.png differ diff --git a/content/post/k8s-on-vsphere-node-template-with-packer/thumbnail.jpg b/content/post/k8s-on-vsphere-node-template-with-packer/thumbnail.jpg new file mode 100644 index 0000000..519219a Binary files /dev/null and b/content/post/k8s-on-vsphere-node-template-with-packer/thumbnail.jpg differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/cat-working.gif b/content/post/ldaps-authentication-tanzu-community-edition/cat-working.gif new file mode 100644 index 0000000..f1bd80b Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/cat-working.gif differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/coffee.gif b/content/post/ldaps-authentication-tanzu-community-edition/coffee.gif new file mode 100644 index 0000000..554bf3e Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/coffee.gif differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/dex_login_prompt.png b/content/post/ldaps-authentication-tanzu-community-edition/dex_login_prompt.png new file mode 100644 index 0000000..63d5350 Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/dex_login_prompt.png differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/dex_login_success.png b/content/post/ldaps-authentication-tanzu-community-edition/dex_login_success.png new file mode 100644 index 0000000..47de48d Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/dex_login_success.png differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/download_ca_cert.png b/content/post/ldaps-authentication-tanzu-community-edition/download_ca_cert.png new file mode 100644 index 0000000..0b6b1a2 Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/download_ca_cert.png differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/identity_management_1.png b/content/post/ldaps-authentication-tanzu-community-edition/identity_management_1.png new file mode 100644 index 0000000..2e11efd Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/identity_management_1.png differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/identity_management_2.png b/content/post/ldaps-authentication-tanzu-community-edition/identity_management_2.png new file mode 100644 index 0000000..d3c695a Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/identity_management_2.png differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/index.md b/content/post/ldaps-authentication-tanzu-community-edition/index.md new file mode 100644 index 0000000..0020f1d --- /dev/null +++ b/content/post/ldaps-authentication-tanzu-community-edition/index.md @@ -0,0 +1,430 @@ +--- +title: "Active Directory authentication in Tanzu Community Edition" # Title of the blog post. +date: 2022-03-06 # Date of post creation. +# lastmod: 2022-03-03T20:29:39-06:00 # Date when last modified +description: "Deploying and configuring a Tanzu Community Edition Kubernetes cluster to support authentication with Active Directory users and groups by using pinniped and dex" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "ldaps_test.png" # Sets thumbnail image appearing inside card on homepage. +shareImage: "ldaps_test.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: K8s on vSphere +tags: + - vmware + - kubernetes + - tanzu + - activedirectory + - certs + - cluster + - containers +comment: true # Disable comment if false. +--- +Not long ago, I [deployed a Tanzu Community Edition Kubernetes cluster in my homelab](/tanzu-community-edition-k8s-homelab/), and then I fumbled through figuring out how to [log into it from a different device](/logging-in-tce-cluster-from-new-device/) than the one I'd used for deploying the cluster from the `tanzu` cli. That setup works great for playing with Kubernetes in my homelab but I'd love to do some Kubernetes with my team at work and I really need the ability to authenticate multiple users with domain credentials for that. + +The TCE team has created a [rather detailed guide](https://tanzucommunityedition.io/docs/latest/vsphere-ldap-config/) for using the [Pinniped](https://pinniped.dev/) and [Dex](https://dexidp.io/docs/kubernetes/) packages[^packages] to provide LDAPS authentication when TCE is connected with the NSX Advanced Load Balancer. This guide got me *most* of the way toward a working solution but I had to make some tweaks along the way (particularly since I'm not using NSX-ALB). I took notes as I worked through it, though, so here I'll share what it actually took to make this work in my environment. + +[^packages]: Per VMware, "Pinniped provides the authentication service, which uses Dex to connect to identity providers such as Active Directory." +### Prequisite +In order to put the "Secure" in LDAPS, I need to make sure my Active Directory domain controller is configured for that, and that means also creating a Certificate Authority for issuing certificates. I followed the steps [here](http://vcloud-lab.com/entries/windows-2016-server-r2/configuring-secure-ldaps-on-domain-controller) to get this set up in my homelab. I can then point my browser to `http://win01.lab.bowdre.net/certsrv/certcarc.asp` to download the base64-encoded CA certificate since I'll need that later. +![Downloading the CA cert](download_ca_cert.png) + +With that sorted, I'm ready to move on to creating a new TCE cluster with an LDAPS identity provider configured. +### Cluster creation +The [cluster deployment steps](/tanzu-community-edition-k8s-homelab/#management-cluster) are very similar to what I did last time so I won't repeat all those instructions here. The only difference is that this time I don't skip past the Identity Management screen; instead, I'll select the LDAPS radio button and get ready to fill out the form. + +#### Identity management configuration +![Identity Management section](identity_management_1.png) + +**LDAPS Identity Management Source** +| Field | Value | Notes | +| --- | --- | ---- | +| LDAPS Endpoint | `win01.lab.bowdre.net:636` | LDAPS interface of my AD DC | +| BIND DN | `CN=LDAP Bind,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net` | DN of an account with LDAP read permissions | +| BIND Password | `*******` | Password for that account | + +**User Search Attributes** +| Field | Value | Notes | +| --- | --- | --- | +| Base DN | `OU=LAB,DC=lab,DC=bowdre,DC=net` | DN for the top-level OU containing my users | +| Filter | `objectClass=(person)` | | +| Username | `sAMAccountName` | I want to auth as `john` rather than `john@lab.bowdre.net` (`userPrincipalName`) | + +**Group Search Attributes** +| Field | Value | Notes | +| --- | --- | --- | +| Base DN | `OU=LAB,DC=lab,DC=bowdre,DC=net` | DN for OU containing my users | +| Filter | `(objectClass=group)` | | +| Name Attribute | `cn` | Common Name | +| User Attribute | `DN` | Distinguished Name (capitalization matters!) | +| Group Attribute | `member:1.2.840.113556.1.4.1941:` | Used to enumerate which groups a user is a member of[^member] | + +And I'll copy the contents of the base64-encoded CA certificate I downloaded earlier and paste them into the Root CA Certificate field. + +![Completed Identity Management section](identity_management_2.png) + +Before moving on, I can use the **Verify LDAP Configuration** button to quickly confirm that the connection is set up correctly. (I discovered that it doesn't honor the attribute selections made on the previous screen so I have to search for my Common Name (`John Bowdre`) instead of my username (`john`), but this at least lets me verify that the connection and certificate are working correctly.) +![LDAPS test](ldaps_test.png) + +I can then click through the rest of the wizard but (as before) I'll stop on the final review page. Despite entering everything correctly in the wizard I'll actually need to make a small edit to the deployment configuration YAML so I make a note of its location and copy it to a file called `tce-mgmt-deploy.yaml` in my working directory so that I can take a look. +![Reviewing the cluster configuration file](review_cluster_configuration.png) + +#### Editing the cluster spec +Remember that awkward `member:1.2.840.113556.1.4.1941:` attribute from earlier? Here's how it looks within the TCE cluster-defining YAML: +```yaml +LDAP_GROUP_SEARCH_BASE_DN: OU=LAB,DC=lab,DC=bowdre,DC=net +LDAP_GROUP_SEARCH_FILTER: (objectClass=group) +LDAP_GROUP_SEARCH_GROUP_ATTRIBUTE: 'member:1.2.840.113556.1.4.1941:' +LDAP_GROUP_SEARCH_NAME_ATTRIBUTE: cn +LDAP_GROUP_SEARCH_USER_ATTRIBUTE: DN +``` + +That `:` at the end of the line will cause problems down the road - specifically when the deployment process creates the `dex` app which handles the actual LDAPS authentication piece. Cumulative hours of [troubleshooting](#troubleshooting-notes) (and learning!) eventually revealed to me that something along the way had choked on that trailing colon and inserted this into the `dex` configuration: +```yaml +userMatchers: +- userAttr: DN + groupAttr: + member:1.2.840.113556.1.4.1941: null +``` + +It *should* look like this instead: +```yaml +userMatchers: +- userAttr: DN + groupAttr: 'member:1.2.840.113556.1.4.1941:' +``` + +That error prevents `dex` from starting correctly so the authentication would never work. I eventually figured out that using the `|` character to define the attribute as a [literal scalar](https://yaml.org/spec/1.2.2/#812-literal-style) would help to get around this issue so I changed the cluster YAML to look like this: +```yaml +LDAP_GROUP_SEARCH_BASE_DN: OU=LAB,DC=lab,DC=bowdre,DC=net +LDAP_GROUP_SEARCH_FILTER: (objectClass=group) +LDAP_GROUP_SEARCH_GROUP_ATTRIBUTE: | + 'member:1.2.840.113556.1.4.1941:' +LDAP_GROUP_SEARCH_NAME_ATTRIBUTE: cn +LDAP_GROUP_SEARCH_USER_ATTRIBUTE: DN +``` + +[^member]: Setting this to just `member` would work, but it wouldn't return any nested group memberships. I struggled with this for a long while until I came across a [post from Brian Ragazzi](https://brianragazzi.wordpress.com/2020/05/12/configure-tanzu-kubernetes-grid-to-use-active-directory/) which mentioned using `member:1.2.840.113556.1.4.1941:` instead. This leverages a special [LDAP matching rule OID](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) to expand the nested groups. + +#### Deploying the cluster +That's the only thing I need to manually edit so now I can go ahead and create the cluster with: +``` +tanzu management-cluster create tce-mgmt -f tce-mgmt-deploy.yaml +``` + +This will probably take 10-15 minutes to deploy so it's a great time to go top off my coffee. + +![Coffee break!](coffee.gif) + + +And we're back - and with a friendly success message in the console: +``` +You can now access the management cluster tce-mgmt by running 'kubectl config use-context tce-mgmt-admin@tce-mgmt' +Management cluster created! + +You can now create your first workload cluster by running the following: + tanzu cluster create [name] -f [file] + +Some addons might be getting installed! Check their status by running the following: + + kubectl get apps -A +``` + +I obediently follow the instructions to switch to the correct context and verify that the addons are all running: +```bash +❯ kubectl config use-context tce-mgmt-admin@tce-mgmt +Switched to context "tce-mgmt-admin@tce-mgmt". + +❯ kubectl get apps -A +NAMESPACE NAME DESCRIPTION SINCE-DEPLOY AGE +tkg-system antrea Reconcile succeeded 5m2s 11m +tkg-system metrics-server Reconcile succeeded 39s 11m +tkg-system pinniped Reconcile succeeded 4m55s 11m +tkg-system secretgen-controller Reconcile succeeded 65s 11m +tkg-system tanzu-addons-manager Reconcile succeeded 70s 11m +tkg-system vsphere-cpi Reconcile succeeded 32s 11m +tkg-system vsphere-csi Reconcile succeeded 66s 11m +``` + +### Post-deployment tasks + +I've got a TCE cluster now but it's not quite ready for me to authenticate with my AD credentials just yet. + +#### Load Balancer deployment +The [guide I'm following from the TCE site](https://tanzucommunityedition.io/docs/latest/vsphere-ldap-config/) assumes that I'm using NSX-ALB in my environment, but I'm not. So, [as before](/tanzu-community-edition-k8s-homelab/#deploying-kube-vip-as-a-load-balancer), I'll need to deploy [Scott Rosenberg's `kube-vip` Carvel package](https://github.com/vrabbi/tkgm-customizations): + +```bash +git clone https://github.com/vrabbi/tkgm-customizations.git +cd tkgm-customizations/carvel-packages/kube-vip-package +kubectl apply -n tanzu-package-repo-global -f metadata.yml +kubectl apply -n tanzu-package-repo-global -f package.yaml +cat << EOF > values.yaml +vip_range: 192.168.1.64-192.168.1.70 +EOF +tanzu package install kubevip -p kubevip.terasky.com -v 0.3.9 -f values.yaml +``` + +#### Modifying services to use the Load Balancer +With the load balancer in place, I can follow the TCE instruction to modify the Pinniped and Dex services to switch from the `NodePort` type to the `LoadBalancer` type so they can be easily accessed from outside of the cluster. This process starts by creating a file called `pinniped-supervisor-svc-overlay.yaml` and pasting in the following overlay manifest: + +```yaml +#@ load("@ytt:overlay", "overlay") +#@overlay/match by=overlay.subset({"kind": "Service", "metadata": {"name": "pinniped-supervisor", "namespace": "pinniped-supervisor"}}) +--- +#@overlay/replace +spec: + type: LoadBalancer + selector: + app: pinniped-supervisor + ports: + - name: https + protocol: TCP + port: 443 + targetPort: 8443 + +#@ load("@ytt:overlay", "overlay") +#@overlay/match by=overlay.subset({"kind": "Service", "metadata": {"name": "dexsvc", "namespace": "tanzu-system-auth"}}), missing_ok=True +--- +#@overlay/replace +spec: + type: LoadBalancer + selector: + app: dex + ports: + - name: dex + protocol: TCP + port: 443 + targetPort: https +``` + +This overlay will need to be inserted into the `pinniped-addon` secret which means that the contents need to be converted to a base64-encoded string: +```bash +❯ base64 -w 0 pinniped-supervisor-svc-overlay.yaml +I0AgbG9hZCgi[...]== +``` +{{% notice info "Avoid newlines" %}} +The `-w 0` / `--wrap=0` argument tells `base64` to *not* wrap the encoded lines after a certain number of characters. If you leave this off, the string will get a newline inserted every 76 characters, and those linebreaks would make the string a bit more tricky to work with. Avoid having to clean up the output afterwards by being more specific with the request up front! +{{% /notice %}} + +I'll copy the resulting base64 string (which is much longer than the truncated form I'm using here), and paste it into the following command to patch the secret (which will be named after the management cluster name so replace the `tce-mgmt` part as appropriate): +```bash +❯ kubectl -n tkg-system patch secret tce-mgmt-pinniped-addon -p '{"data": {"overlays.yaml": "I0AgbG9hZCgi[...]=="}}' +secret/tce-mgmt-pinniped-addon patched +``` + +I can watch as the `pinniped-supervisor` and `dexsvc` services get updated with the new service type: +```bash +❯ kubectl get svc -A -w +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) +pinniped-supervisor pinniped-supervisor NodePort 100.65.185.82 443:31234/TCP +tanzu-system-auth dexsvc NodePort 100.70.238.106 5556:30167/TCP +tkg-system packaging-api ClusterIP 100.65.185.94 443/TCP +tanzu-system-auth dexsvc LoadBalancer 100.70.238.106 443:30167/TCP +pinniped-supervisor pinniped-supervisor LoadBalancer 100.65.185.82 443:31234/TCP +pinniped-supervisor pinniped-supervisor LoadBalancer 100.65.185.82 192.168.1.70 443:31234/TCP +tanzu-system-auth dexsvc LoadBalancer 100.70.238.106 192.168.1.64 443:30167/TCP +``` + +I'll also need to restart the `pinniped-post-deploy-job` job to account for the changes I just made; that's accomplished by simply deleting the existing job. After a few minutes a new job will be spawned automagically. I'll just watch for the new job to be created: +```bash +❯ kubectl -n pinniped-supervisor delete jobs pinniped-post-deploy-job +job.batch "pinniped-post-deploy-job" deleted + +❯ kubectl get jobs -A -w +NAMESPACE NAME COMPLETIONS DURATION AGE +pinniped-supervisor pinniped-post-deploy-job 0/1 0s +pinniped-supervisor pinniped-post-deploy-job 0/1 0s +pinniped-supervisor pinniped-post-deploy-job 0/1 0s 0s +pinniped-supervisor pinniped-post-deploy-job 1/1 9s 9s +``` + +### Authenticating +Right now, I've got all the necessary components to support LDAPS authentication with my TCE management cluster but I haven't done anything yet to actually define who should have what level of access. To do that, I'll create a `ClusterRoleBinding`. + +I'll toss this into a file I'll call `tanzu-admins-crb.yaml`: +```yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tanzu-admins +subjects: + - kind: Group + name: Tanzu-Admins + apiGroup: +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +``` + +I have a group in Active Directory called `Tanzu-Admins` which contains a group called `vRA-Admins`, and that group contains my user account (`john`). It's a roundabout way of granting access for a single user in this case but it should help to confirm that nested group memberships are being enumerated properly. + +Once applied, users within that group will be granted the `cluster-admin` role[^roles]. + +Let's do it: +```bash +❯ kubectl apply -f tanzu-admins-crb.yaml +clusterrolebinding.rbac.authorization.k8s.io/tanzu-admins created +``` + +Thus far, I've been using the default administrator context to interact with the cluster. Now it's time to switch to the non-admin context: +```bash +❯ tanzu management-cluster kubeconfig get +You can now access the cluster by running 'kubectl config use-context tanzu-cli-tce-mgmt@tce-mgmt' + +❯ kubectl config use-context tanzu-cli-tce-mgmt@tce-mgmt +Switched to context "tanzu-cli-tce-mgmt@tce-mgmt". +``` + +After assuming the non-admin context, the next time I try to interact with the cluster it should kick off the LDAPS authentication process. It won't look like anything is happening in the terminal: +```bash +❯ kubectl get nodes + +``` + +But it will shortly spawn a browser page prompting me to log in: +![Dex login prompt](dex_login_prompt.png) + +Doing so successfully will yield: +![Dex login success!](dex_login_success.png) + +And the `kubectl` command will return the expected details: +```bash +❯ kubectl get nodes +NAME STATUS ROLES AGE VERSION +tce-mgmt-control-plane-v8l8r Ready control-plane,master 29h v1.21.5+vmware.1 +tce-mgmt-md-0-847db9ddc-5bwjs Ready 28h v1.21.5+vmware.1 +``` + +![It's working!!! Holy crap, I can't believe it.](its-working.gif) + +So I've now successfully logged in to the management cluster as a non-admin user with my Active Directory credentials. Excellent! + +[^roles]: You can read up on some other default user-facing roles [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles). + +### Sharing access +To allow other users to log in this way, I'd need to give them a copy of the non-admin `kubeconfig`, which I can get by running `tanzu management-cluster config get --export-file tce-mgmt-config` to export it into a file named `tce-mgmt-config`. They could use [whatever method they like](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) to merge this in with their existing `kubeconfig`. + +{{% notice tip "Tanzu CLI required" %}} +Other users hoping to work with a Tanzu Community Edition cluster will also need to install the Tanzu CLI tool in order to authenticate in this way. Installation instructions can be found [here](https://tanzucommunityedition.io/docs/latest/cli-installation/). +{{% /notice %}} + +### Deploying a workload cluster +At this point, I've only configured authentication for the management cluster - not the workload cluster. The TCE community docs cover what's needed to make this configuration available in the workload cluster as well [here](https://tanzucommunityedition.io/docs/latest/vsphere-ldap-config/#configuration-steps-on-the-workload-cluster). [As before](/tanzu-community-edition-k8s-homelab/#workload-cluster), I created the deployment YAML for the workload cluster by copying the management cluster's deployment YAML and changing the `CLUSTER_NAME` and `VSPHERE_CONTROL_PLANE_ENDPOINT` values accordingly. This time I also deleted all of the `LDAP_*` and `OIDC_*` lines, but made sure to preserve the `IDENTITY_MANAGEMENT_TYPE: ldap` one. + +I was then able to deploy the workload cluster with: +```bash +❯ tanzu cluster create --file tce-work-deploy.yaml +Validating configuration... +Creating workload cluster 'tce-work'... +Waiting for cluster to be initialized... +cluster control plane is still being initialized: WaitingForControlPlane +cluster control plane is still being initialized: ScalingUp +Waiting for cluster nodes to be available... +Waiting for addons installation... +Waiting for packages to be up and running... + +Workload cluster 'tce-work' created +``` + +Access the admin context: +```bash +❯ tanzu cluster kubeconfig get --admin tce-work +Credentials of cluster 'tce-work' have been saved +You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work' + +❯ kubectl config use-context tce-work-admin@tce-work +Switched to context "tce-work-admin@tce-work". +``` + +Apply the same ClusterRoleBinding from before[^crb]: +```bash +❯ kubectl apply -f tanzu-admins-crb.yaml +clusterrolebinding.rbac.authorization.k8s.io/tanzu-admins created +``` + +And finally switch to the non-admin context and log in with my AD account: +```bash +❯ tanzu cluster kubeconfig get tce-work +ℹ You can now access the cluster by running 'kubectl config use-context tanzu-cli-tce-work@tce-work' + +❯ kubectl config use-context tanzu-cli-tce-work@tce-work +Switched to context "tanzu-cli-tce-work@tce-work". + +❯ kubectl get nodes +NAME STATUS ROLES AGE VERSION +tce-work-control-plane-zts6r Ready control-plane,master 12m v1.21.5+vmware.1 +tce-work-md-0-bcfdc4d79-vn9xb Ready 11m v1.21.5+vmware.1 +``` + +Now I can *Do Work*! +![Back to the grind](cat-working.gif) + +{{% notice note "Create DHCP reservations for control plane nodes" %}} +VMware [points out](https://tanzucommunityedition.io/docs/latest/verify-deployment/#configure-dhcp-reservations-for-the-control-plane-nodes-vsphere-only) that it's important to create DHCP reservations for the IP addresses which were dynamically assigned to the control plane nodes in both the management and workload clusters so be sure to take care of that before getting too involved in "Work". +{{% /notice %}} + +[^crb]: Or a different one. In reality, you probably don't want the same users having the same levels of access on both the management and workload clusters. But I stuck with just the one here for now. +### Troubleshooting notes +It took me quite a bit of trial and error to get this far and (being a k8s novice) even more time figuring out how to even troubleshoot the problems I was encountering. So here are a few tips that helped me out. + +#### Checking and modifying `dex` configuration +I had a lot of trouble figuring out how to correctly format the `member:1.2.840.113556.1.4.1941:` attribute in the LDAPS config so that it wouldn't get split into multiple attributes due to the trailing colon - and it took me forever to discover that was even the issue. What eventually did the trick for me was learning that I could look at (and modify!) the configuration for the `dex` app with: + +```bash +❯ kubectl -n tanzu-system-auth edit configmaps dex +[...] + groupSearch: + baseDN: OU=LAB,DC=lab,DC=bowdre,DC=net + filter: (objectClass=group) + nameAttr: cn + scope: sub + userMatchers: + - userAttr: DN + groupAttr: 'member:1.2.840.113556.1.4.1941:' + host: win01.lab.bowdre.net:636 +[...] +``` + +This let me make changes on the fly until I got a working configuration and then work backwards from there to format the initial input correctly. + +#### Reviewing `dex` logs +Authentication attempts (at least on the LDAPS side of things) will show up in the logs for the `dex` pod running in the `tanzu-system-auth` namespace. This is a great place to look to see if the user isn't being found, credentials are invalid, or the groups aren't being enumerated correctly: + +```bash +❯ kubectl -n tanzu-system-auth get pods +NAME READY STATUS RESTARTS AGE +dex-7bf4f5d4d9-k4jfl 1/1 Running 0 40h + +❯ kubectl -n tanzu-system-auth logs dex-7bf4f5d4d9-k4jfl +# no such user +{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=person)(sAMAccountName=johnny))","time":"2022-03-06T22:29:57Z"} +{"level":"error","msg":"ldap: no results returned for filter: \"(\u0026(objectClass=person)(sAMAccountName=johnny))\"","time":"2022-03-06T22:29:57Z"} +#invalid password +{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=person)(sAMAccountName=john))","time":"2022-03-06T22:30:45Z"} +{"level":"info","msg":"username \"john\" mapped to entry CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net","time":"2022-03-06T22:30:45Z"} +{"level":"error","msg":"ldap: invalid password for user \"CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net\"","time":"2022-03-06T22:30:45Z"} +# successful login +{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=person)(sAMAccountName=john))","time":"2022-03-06T22:31:21Z"} +{"level":"info","msg":"username \"john\" mapped to entry CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net","time":"2022-03-06T22:31:21Z"} +{"level":"info","msg":"performing ldap search OU=LAB,DC=lab,DC=bowdre,DC=net sub (\u0026(objectClass=group)(member:1.2.840.113556.1.4.1941:=CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net))","time":"2022-03-06T22:31:21Z"} +{"level":"info","msg":"login successful: connector \"ldap\", username=\"john\", preferred_username=\"\", email=\"CN=John Bowdre,OU=Users,OU=BOW,OU=LAB,DC=lab,DC=bowdre,DC=net\", groups=[\"vRA-Admins\" \"Tanzu-Admins\"]","time":"2022-03-06T22:31:21Z"} +``` + +#### Clearing pinniped sessions +I couldn't figure out an elegant way to log out so that I could try authenticating as a different user, but I did discover that information about authenticated sessions get stored in `~/.config/tanzu/pinniped/sessions.yaml`. The sessions expired after a while but until that happens I'm able to keep on interacting with `kubectl` - and not given an option to re-authenticate even if I wanted to. + +So in lieu of a handy logout option, I was able to remove the cached sessions by deleting the file: +```bash +rm ~/.config/tanzu/pinniped/sessions.yaml +``` + +That let me use `kubectl get nodes` to trigger the authentication prompt again. + +### Conclusion +So this is a pretty basic walkthrough of how I set up my Tanzu Community Edition Kubernetes clusters for Active Directory authentication in my homelab. I feel like I've learned a lot more about TCE specifically and Kubernetes in general through this process, and I'm sure I'll learn more in the future as I keep experimenting with the setup. \ No newline at end of file diff --git a/content/post/ldaps-authentication-tanzu-community-edition/its-working.gif b/content/post/ldaps-authentication-tanzu-community-edition/its-working.gif new file mode 100644 index 0000000..900c4be Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/its-working.gif differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/ldaps_test.png b/content/post/ldaps-authentication-tanzu-community-edition/ldaps_test.png new file mode 100644 index 0000000..e452e92 Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/ldaps_test.png differ diff --git a/content/post/ldaps-authentication-tanzu-community-edition/review_cluster_configuration.png b/content/post/ldaps-authentication-tanzu-community-edition/review_cluster_configuration.png new file mode 100644 index 0000000..ebb7c65 Binary files /dev/null and b/content/post/ldaps-authentication-tanzu-community-edition/review_cluster_configuration.png differ diff --git a/content/post/logging-in-tce-cluster-from-new-device/index.md b/content/post/logging-in-tce-cluster-from-new-device/index.md new file mode 100644 index 0000000..b75728c --- /dev/null +++ b/content/post/logging-in-tce-cluster-from-new-device/index.md @@ -0,0 +1,116 @@ +--- +title: "Logging in to a Tanzu Community Edition Kubernetes Cluster from a new device" # Title of the blog post. +date: 2022-02-01T22:07:18-06:00 # Date of post creation. +# lastmod: 2022-02-01T10:58:57-06:00 # Date when last modified +description: "The Tanzu Community Edition documentation does a great job of explaining how to authenticate to a newly-deployed cluster at the tail end of the installation steps, but how do you log in from another system once it's set up?" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: false # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +featureImage: "tanzu.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "tanzu.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips +tags: + - vmware + - kubernetes + - tanzu +comment: true # Disable comment if false. +--- +When I [set up my Tanzu Community Edition environment](/tanzu-community-edition-k8s-homelab/), I did so from a Linux VM since the containerized Linux environment on my Chromebook doesn't support the `kind` bootstrap cluster used for the deployment. But now that the Kubernetes cluster is up and running, I'd like to be able to connect to it directly without the aid of a jumpbox. How do I get the appropriate cluster configuration over to my Chromebook? + +The Tanzu CLI actually makes that pretty easy - once I figured out the appropriate incantation. I just needed to use the `tanzu management-cluster kubeconfig get` command on my Linux VM to export the `kubeconfig` of my management (`tce-mgmt`) cluster to a file: +```shell +tanzu management-cluster kubeconfig get --admin --export-file tce-mgmt-kubeconfig.yaml +``` + +I then used `scp` to pull the file from the VM into my local Linux environment, and proceeded to [install `kubectl`](/tanzu-community-edition-k8s-homelab/#kubectl-binary) and the [`tanzu` CLI](/tanzu-community-edition-k8s-homelab/#tanzu-cli) (making sure to also [enable shell auto-completion](/enable-tanzu-cli-auto-completion-bash-zsh/) along the way!). + +Now I'm ready to import the configuration locally with `tanzu login` on my Chromebook: + +```shell +❯ tanzu login --kubeconfig ~/projects/tanzu-homelab/tanzu-setup/tce-mgmt-kubeconfig.yaml --context tce-mgmt-admin@tce-mgmt --name tce-mgmt +✔ successfully logged in to management cluster using the kubeconfig tce-mgmt +``` + +{{% notice tip "Use the absolute path" %}} +Pass in the full path to the exported kubeconfig file. This will help the Tanzu CLI to load the correct config across future terminal sessions. +{{% /notice %}} + +Even though that's just importing the management cluster it actually grants access to both the management and workload clusters: +```shell +❯ tanzu cluster list + NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES PLAN + tce-work default running 1/1 1/1 v1.21.2+vmware.1 dev + +❯ tanzu cluster get tce-work + NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES + tce-work default running 1/1 1/1 v1.21.2+vmware.1 +ℹ + +Details: + +NAME READY SEVERITY REASON SINCE MESSAGE +/tce-work True 24h +├─ClusterInfrastructure - VSphereCluster/tce-work True 24h +├─ControlPlane - KubeadmControlPlane/tce-work-control-plane True 24h +│ └─Machine/tce-work-control-plane-vc2pb True 24h +└─Workers + └─MachineDeployment/tce-work-md-0 + └─Machine/tce-work-md-0-687444b744-crc9q True 24h + +❯ tanzu management-cluster get + NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES + tce-mgmt tkg-system running 1/1 1/1 v1.21.2+vmware.1 management + + +Details: + +NAME READY SEVERITY REASON SINCE MESSAGE +/tce-mgmt True 23h +├─ClusterInfrastructure - VSphereCluster/tce-mgmt True 23h +├─ControlPlane - KubeadmControlPlane/tce-mgmt-control-plane True 23h +│ └─Machine/tce-mgmt-control-plane-7pwz7 True 23h +└─Workers + └─MachineDeployment/tce-mgmt-md-0 + └─Machine/tce-mgmt-md-0-745b858d44-5llk5 True 23h + + +Providers: + + NAMESPACE NAME TYPE PROVIDERNAME VERSION WATCHNAMESPACE + capi-kubeadm-bootstrap-system bootstrap-kubeadm BootstrapProvider kubeadm v0.3.23 + capi-kubeadm-control-plane-system control-plane-kubeadm ControlPlaneProvider kubeadm v0.3.23 + capi-system cluster-api CoreProvider cluster-api v0.3.23 + capv-system infrastructure-vsphere InfrastructureProvider vsphere v0.7.10 +``` + +And I can then tell `kubectl` about the two clusters: +```shell +❯ tanzu management-cluster kubeconfig get tce-mgmt --admin +Credentials of cluster 'tce-mgmt' have been saved +You can now access the cluster by running 'kubectl config use-context tce-mgmt-admin@tce-mgmt' + +❯ tanzu cluster kubeconfig get tce-work --admin +Credentials of cluster 'tce-work' have been saved +You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work' +``` + +And sure enough, there are my contexts: +```shell +❯ kubectl config get-contexts +CURRENT NAME CLUSTER AUTHINFO NAMESPACE + tce-mgmt-admin@tce-mgmt tce-mgmt tce-mgmt-admin +* tce-work-admin@tce-work tce-work tce-work-admin + +❯ kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +tce-work-control-plane-vc2pb Ready control-plane,master 23h v1.21.2+vmware.1 192.168.1.132 192.168.1.132 VMware Photon OS/Linux 4.19.198-1.ph3 containerd://1.4.6 +tce-work-md-0-687444b744-crc9q Ready 23h v1.21.2+vmware.1 192.168.1.133 192.168.1.133 VMware Photon OS/Linux 4.19.198-1.ph3 containerd://1.4.6 +``` + +Perfect, now I can get back to Tanzuing from my Chromebook without having to jump through a VM. (And, [thanks to Tailscale](/secure-networking-made-simple-with-tailscale/), I can even access my TCE resources remotely!) diff --git a/content/post/logging-in-tce-cluster-from-new-device/tanzu.png b/content/post/logging-in-tce-cluster-from-new-device/tanzu.png new file mode 100644 index 0000000..dd2724d Binary files /dev/null and b/content/post/logging-in-tce-cluster-from-new-device/tanzu.png differ diff --git a/content/post/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/LJOcy2oqc.png b/content/post/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/LJOcy2oqc.png new file mode 100644 index 0000000..4fae45e Binary files /dev/null and b/content/post/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/LJOcy2oqc.png differ diff --git a/content/post/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/index.md b/content/post/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/index.md new file mode 100644 index 0000000..7a233a2 --- /dev/null +++ b/content/post/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/index.md @@ -0,0 +1,59 @@ +--- +series: Scripts +date: "2020-09-16T08:34:30Z" +thumbnail: LJOcy2oqc.png +usePageBundles: true +tags: +- vmware +- powercli +title: Logging in to Multiple vCenter Servers at Once with PowerCLI +--- + +I manage a large VMware environment spanning several individual vCenters, and I often need to run [PowerCLI](https://code.vmware.com/web/tool/12.0.0/vmware-powercli) queries across the entire environment. I waste valuable seconds running `Connect-ViServer` and logging in for each and every vCenter I need to talk to. Wouldn't it be great if I could just log into all of them at once? + +I can, and here's how I do it. + +![Logging in to multiple vCenters](LJOcy2oqc.png) + +### The Script +The following Powershell script will let you define a list of vCenters to be accessed, securely store your credentials for each vCenter, log in to every vCenter with a single command, and also close the connections when they're no longer needed. It's also a great starting point for any other custom functions you'd like to incorporate into your PowerCLI sessions. +```powershell +# PowerCLI_Custom_Functions.ps1 +# Usage: +# 0) Edit $vCenterList to reference the vCenters in your environment. +# 1) Call 'Update-Credentials' to create/update a ViCredentialStoreItem to securely store your username and password. +# 2) Call 'Connect-vCenters' to open simultaneously connections to all the vCenters in your environment. +# 3) Do PowerCLI things. +# 4) Call 'Disconnect-vCenters' to cleanly close all ViServer connections because housekeeping. +Import-Module VMware.PowerCLI + +$vCenterList = @("vcenter1", "vcenter2", "vcenter3", "vcenter4", "vcenter5") + +function Update-Credentials { + $newCredential = Get-Credential + ForEach ($vCenter in $vCenterList) { + New-ViCredentialStoreItem -Host $vCenter -User $newCredential.UserName -Password $newCredential.GetNetworkCredential().password + } +} + +function Connect-vCenters { + ForEach ($vCenter in $vCenterList) { + Connect-ViServer -Server $vCenter + } +} + +function Disconnect-vCenters { + Disconnect-ViServer -Server * -Force -Confirm:$false +} +``` +### The Setup +Edit whatever shortcut you use for launching PowerCLI (I use a tab in [Windows Terminal](https://github.com/microsoft/terminal) - I'll do another post on that setup later) to reference the custom init script. Here's the commandline I use: +```powershell +powershell.exe -NoExit -Command ". C:\Scripts\PowerCLI_Custom_Functions.ps1" +``` +### The Usage +Now just use that shortcut to open up PowerCLI when you wish to do things. The custom functions will be loaded and waiting for you. +1. Start by running `Update-Credentials`. It will prompt you for the username+password needed to log into each vCenter listed in `$vCenterList`. These can be the same or different accounts, but you will need to enter the credentials for each vCenter since they get stored in a separate `ViCredentialStoreItem`. You'll also run this function again if you need to change the password(s) in the future. +2. Log in to all the things by running `Connect-vCenters`. +3. Do your work. +4. When you're finished, be sure to call `Disconnect-vCenters` so you don't leave sessions open in the background. diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/index.md b/content/post/nessus-essentials-on-tanzu-community-edition/index.md new file mode 100644 index 0000000..f863530 --- /dev/null +++ b/content/post/nessus-essentials-on-tanzu-community-edition/index.md @@ -0,0 +1,145 @@ +--- +title: "Nessus Essentials on Tanzu Community Edition" # Title of the blog post. +date: 2022-02-07T09:18:07-06:00 # Date of post creation. +# lastmod: 2022-02-07T09:18:07-06:00 # Date when last modified +description: "Deploying the free Nessus Essentials security scanner to a Tanzu Community Edition Kubernetes cluster in my homelab." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "nessus_login.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips # Projects, Scripts, vRA8 +tags: + - vmware + - kubernetes + - tanzu + - containers + - security +comment: true # Disable comment if false. +--- +Now that VMware [has released](https://blogs.vmware.com/vsphere/2022/01/announcing-availability-of-vsphere-7-update-3c.html) [vCenter 7.0U3c](https://docs.vmware.com/en/VMware-vSphere/7.0/rn/vsphere-vcenter-server-70u3c-release-notes.html) to resolve the Log4Shell vulnerabilities I thought it might be fun to run a security scan against the upgraded VCSA in my homelab to see how it looks. Of course, I don't actually have a security scanner in that environment so I'll need to deploy one. + +I start off by heading to [tenable.com/products/nessus/nessus-essentials](https://www.tenable.com/products/nessus/nessus-essentials) to register for a (free!) license key which will let me scan up to 16 hosts. I'll receive the key and download link in an email, but I'm not actually going to use that link to download the Nessus binary. I've got this shiny-and-new [Tanzu Community Edition Kubernetes cluster](/tanzu-community-edition-k8s-homelab/) that could use some more real workloads so I'll instead opt for the [Docker version](https://hub.docker.com/r/tenableofficial/nessus). + +Tenable provides an [example `docker-compose.yml`](https://community.tenable.com/s/article/Deploy-Nessus-docker-image-with-docker-compose) to make it easy to get started: +```yaml +version: '3.1' + +services: + + nessus: + image: tenableofficial/nessus + restart: always + container_name: nessus + environment: + USERNAME: + PASSWORD: + ACTIVATION_CODE: + ports: + - 8834:8834 +``` + +I can use that knowledge to craft something I can deploy on Kubernetes: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: nessus + labels: + app: nessus +spec: + type: LoadBalancer + ports: + - name: nessus-web + port: 443 + protocol: TCP + targetPort: 8834 + selector: + app: nessus +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nessus +spec: + selector: + matchLabels: + app: nessus + replicas: 1 + template: + metadata: + labels: + app: nessus + spec: + containers: + - name: nessus + image: tenableofficial/nessus + env: + - name: ACTIVATION_CODE + value: "ABCD-1234-EFGH-5678-IJKL" + - name: USERNAME + value: "admin" + - name: PASSWORD + value: "VMware1!" + ports: + - name: nessus-web + containerPort: 8834 +``` + +Note that I'm configuring the `LoadBalancer` to listen on port `443` and route traffic to the pod on port `8834` so that I don't have to remember to enter an oddball port number when I want to connect to the web interface. + +And now I can just apply the file: +```bash +❯ kubectl apply -f nessus.yaml +service/nessus created +deployment.apps/nessus created +``` + +I'll give it a moment or two to deploy and then check on the service to figure out what IP I need to use to connect: +```bash +❯ kubectl get svc/nessus +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +nessus LoadBalancer 100.67.16.51 192.168.1.79 443:31260/TCP 57s +``` + +I point my browser to `https://192.168.1.79` and see that it's a great time for a quick coffee break since it will take a few minutes for Nessus to initialize itself: +![Nessus Initialization](nessus_init.png) + +Eventually that gets replaced with a login screen, where I can authenticate using the username and password specified earlier in the YAML. +![Nessus login screen](nessus_login.png) + +After logging in, I get prompted to run a discovery scan to identify hosts on the network. There's a note that hosts revealed by the discovery scan will *not* count against my 16-host limit unless/until I select individual hosts for more detailed scans. That's good to know for future efforts, but for now I'm focused on just scanning my one vCenter server so I dismiss the prompt. + + What I *am* interested in is scanning my vCenter for the Log4Shell vulnerability so I'll hit the friendly blue **New Scan** button at the top of the *Scans* page to create my scan. That shows me a list of *Scan Templates*: +![Scan templates](scan_templates.png) + +I'll scroll down a bit and pick the first *Log4Shell* template: +![Log4Shell templates](log4shell_templates.png) + +I plug in a name for the scan and enter my vCenter IP (`192.168.1.12`) as the lone scan target: +![Naming the scan and selecting the target](scan_setup_page_1.png) + +There's a note there that I'll also need to include credentials so that the Nessus scanner can log in to the target in order to conduct the scan, so I pop over to the aptly-named *Credentials* tab to add some SSH credentials. This is just my lab environment so I'll give it the `root` credentials, but if I were running Nessus in a real environment I'd probably want to use a dedicated user account just for scans. +![Giving credentials for the scan](scan_setup_page2.png) + +Now I can scroll to the bottom of the page, click the down-arrow next to the *Save* button and select the **Launch** option to kick off the scan: +![Go for launch](launch.png) + +That drops me back to the *My Scans* view where I can see the status of my scan. I'll grab another coffee while I stare at the little green spinny thing. +![My scans](my_scans.gif) + +Okay, break's over - and so is the scan! Now I can click on the name of the scan to view the results: +![Results summary](scan_results_summary.png) + +And I can drill down into the vulnerability details: +![Log4j-related vulnerabilities](scan_results_log4j.png) + +This reveals a handful of findings related to old 1.x versions of Log4j (which went EOL in 2015 - yikes!) as well as [CVE-2021-44832](https://nvd.nist.gov/vuln/detail/CVE-2021-44832) Remote Code Execution vulnerability (which is resolved in Log4j 2.17.1), but the inclusion of Log4j 2.17.0 in vCenter 7.0U3c *was* sufficient to close the highly-publicized [CVE-2021-44228](https://nvd.nist.gov/vuln/detail/CVE-2021-44228) Log4Shell vulnerability. Hopefully VMware can get these other Log4j vulnerabilities taken care of in another upcoming vCenter release. + +So there's that curiosity satisfied, and now I've got a handy new tool to play with in my lab. \ No newline at end of file diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/launch.png b/content/post/nessus-essentials-on-tanzu-community-edition/launch.png new file mode 100644 index 0000000..6d88ac4 Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/launch.png differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/log4shell_templates.png b/content/post/nessus-essentials-on-tanzu-community-edition/log4shell_templates.png new file mode 100644 index 0000000..2976463 Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/log4shell_templates.png differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/my_scans.gif b/content/post/nessus-essentials-on-tanzu-community-edition/my_scans.gif new file mode 100644 index 0000000..546060c Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/my_scans.gif differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/nessus_init.png b/content/post/nessus-essentials-on-tanzu-community-edition/nessus_init.png new file mode 100644 index 0000000..8beab2f Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/nessus_init.png differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/nessus_login.png b/content/post/nessus-essentials-on-tanzu-community-edition/nessus_login.png new file mode 100644 index 0000000..da84f67 Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/nessus_login.png differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/scan_results_log4j.png b/content/post/nessus-essentials-on-tanzu-community-edition/scan_results_log4j.png new file mode 100644 index 0000000..5b50c34 Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/scan_results_log4j.png differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/scan_results_summary.png b/content/post/nessus-essentials-on-tanzu-community-edition/scan_results_summary.png new file mode 100644 index 0000000..ba69884 Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/scan_results_summary.png differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/scan_setup_page2.png b/content/post/nessus-essentials-on-tanzu-community-edition/scan_setup_page2.png new file mode 100644 index 0000000..395800c Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/scan_setup_page2.png differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/scan_setup_page_1.png b/content/post/nessus-essentials-on-tanzu-community-edition/scan_setup_page_1.png new file mode 100644 index 0000000..31a0b04 Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/scan_setup_page_1.png differ diff --git a/content/post/nessus-essentials-on-tanzu-community-edition/scan_templates.png b/content/post/nessus-essentials-on-tanzu-community-edition/scan_templates.png new file mode 100644 index 0000000..d688ea6 Binary files /dev/null and b/content/post/nessus-essentials-on-tanzu-community-edition/scan_templates.png differ diff --git a/content/post/notes-on-vra-ha-with-nsx-alb/index.md b/content/post/notes-on-vra-ha-with-nsx-alb/index.md new file mode 100644 index 0000000..e65a418 --- /dev/null +++ b/content/post/notes-on-vra-ha-with-nsx-alb/index.md @@ -0,0 +1,99 @@ +--- +series: vRA8 +date: "2021-08-25T00:00:00Z" +usePageBundles: true +tags: +- nsx +- cluster +- vra +- availability +- networking +title: Notes on vRA HA with NSX-ALB +--- +This is going to be a pretty quick recap of the steps I recently took to convert a single-node instance of vRealize Automation 8.4.2 into a 3-node High-Availability vRA cluster behind a standalone NSX Advanced Load Balancer (without NSX being deployed in the environment). No screenshots or specific details since I ran through this in the lab at work and didn't capture anything along the way, and my poor NUC homelab struggles enough to run a single instance of memory-hogging vRA. + +### Getting started with NSX-ALB +I found a lot of information on how to use NSX-ALB as a component of a broader NSX-equipped environment, but not a lot of detail on how to use the ALB *without* NSX - until I found [Rudi Martinsen's blog on the subject](https://rudimartinsen.com/2021/06/25/load-balancing-with-nsx-alb/). That turned out to be a great reference for the ALB configuration so be sure to check it out if you need more details than what I provide in this section. + +#### Download +NSX-ALB is/was formerly known as the Avi Vantage Controller, and downloads are available [here](https://portal.avipulse.vmware.com/software/vantage). You'll need to log in with your VMware Customer Connect account to access the download, and then grab the latest VMware Controller OVA. Be sure to make a note of the default password listed on the right-hand side since you'll need that to log in post-deployment. + +#### Deploy +It's an OVA, so deploy it like an OVA. When you get to the "Customize template" stage, drop in valid data for the **Management Interface IP Address**, **Management Interface Subnet Mask**, and **Default Gateway** fields but leave everything else blank. Click on through to the end and watch the thing deploy. Once the deployment completes, power on the new VM and wait a little bit for it to configure itself and get ready for operation. + +### Configure NSX-ALB +Point a browser to the NSX-ALB appliance's IP and log in as `admin` using the password you copied from the download page (I told you it would come in handy!). Once you're in, you'll be prompted to establish a passphrase (for backups and such) and provide DNS Resolver(s) and the DNS search domain. Set the SMTP option to "None" and leave the other options as the defaults. + +I'd then recommend clicking the little Avi icon at the top right of the screen and using the **My Account** button to change the admin password to something that's not posted out on the internet. You know, for reasons. + +#### Cloud +Go to **Infrastructure > Clouds** and click the pencil icon for *Default-Cloud*, then set the *Cloud Infrastructure Type* to "VMware". Input the credentials needed for connecting to your vCenter, and make sure the account has "Write" access so it can create the Service Engine VMs and whatnot. + +Click over to the *Data Center* tab and point it to the virtual data center used in your vCenter. On the *Network* tab, select the network that will be used for management traffic. Also configure a subnet (in CIDR notation) and gateway, and add a small static IP address pool that can be assigned to the Service Engine nodes (I used something like `192.168.1.120-192.168.1.126`). + +#### Networks +Once thats sorted, navigate to **Infrastructure > Cloud Resources > Networks**. You should already see the networks which were imported from vCenter; find the one you'll use for servers (like your pending vRA cluster) and click the pencil icon to edit it. Then click the **Add Subnet** button, define the subnet in CIDR format, and add a static IP pool as well. Also go ahead and select the *Default-Group* as the **Template Service Engine Group**. + +Back on the Networks list, you should now see both your management and server network defined with IP pools for each. + +#### IPAM profile +Now go to **Templates > Profiles > IPAM/DNS Profiles**, click the **Create** button at the top right, and select **IPAM Profile**. Give it a name, set **Type** to `Avi Vantage IPAM`, pick the appropriate Cloud, and then also select the Networks for which you created the IP pools earlier. + +Then go back to **Infastructure > Clouds**, edit the Cloud, and select the IPAM Profile you just created. + +#### Service Engine Group +Navigate to **Infrastructure > Cloud Resources > Service Engine Group** and edit the *Default-Group*. I left everything on the *Basic Settings* tab at the defaults. On the *Advanced* tab, I specified which vSphere cluster the Service Engines should be deployed to. And I left everything else with the default settings. + +#### SSL Certificate +Hop over to **Templates > Security > SSL/TLS Certificates** and click **Create > Application Certificate**. Give the new cert a name and change the **Type** to `CSR` to generate a new signing request. Enter the **Common Name** you're going to want to use for the load balancer VIP (something like `vra`, perhaps?) and all the usual cert fields. Use the **Subject Alternate Name (SAN)** section at the bottom to add all the other components, like the individual vRA cluster members by both hostname and FQDN. I went ahead and included those IPs as well for good measure. + +| Name | +|----------------------| +| `vra.domain.local` | +| `vra01.domain.local` | +| `vra01` | +| `192.168.1.41` | +| `vra02.domain.local` | +| `vra02` | +| `192.168.1.42` | +| `vra03.domain.local` | +| `vra03` | +| `192.168.1.43` | + +Click **Save**. + +Click **Create** again, but this time select **Root/Intermediate CA Certificate** and upload/paste your CA's cert so it can be trusted. Save your work. + +Back at the cert list, find your new application cert and click the pencil icon to edit it. Copy the **Certificate Signing Request** field and go get it signed by your CA. Be sure to grab the certificate chain (base64-encoded) as well if you can. Come back and paste in / upload your shiny new CA-signed certificate file. + +#### Virtual Service +Now it's finally time to create the Virtual Service that will function as the load balancer front-end. Pop over to **Applications > Virtual Services** and click **Create Virtual Service > Basic Setup**. Give it a name and set the **Application Type** to `HTTPS`, which will automatically set the port and bind a default self-signed certificate. + +Click on the **Certificate** field and select the new cert you created above. Be sure to remove the default cert. + +Tick the box to auto-allocate the IP(s), and select the appropriate network and subnet. + +Add your vRA servers (current and future) by their IP addresses (`192.168.1.41`, `192.168.1.42`, `192.168.1.43`), and then click **Save**. + +Now that the Virtual Service is created, make a note of the IP address assigned to the service and go add that to your DNS so that the name will resolve. + +### Now do vRA +Log into LifeCycle Manager in a new browser tab/window. Make sure that you've mapped an *Install* product binary for your current version of vRA; the upgrade binary that you probably used to do your last update won't cut it. It's probably also a good idea to go make a snapshot of your vRA and IDM instances just in case. + +#### Adding new certificate +In LCM, go to **Locker > Certificates** and select the option to **Import**. Switch back to the NSX-ALB tab and go to **Templates > Security > SSL/TLS Certificates**. Click the little down-arrow-in-a-circle "Export" icon next to the application certificate you created earlier. Copy the key section and paste that into LCM. Then open the file containing the certificate chain you got from your CA, copy its contents, and paste it into LCM as well. Do *not* try to upload a certificate file directly to LCM; that will fail unless the file includes both the cert and the private key and that's silly. + +Once the cert is successfully imported, go to the **Lifecycle Operations** component of LCM and navigate to the environment containing your vRA instance. Select the vRA product, hit the three-dot menu, and use the **Replace Certificate** option to replace the old and busted cert with the new HA-ready one. It will take a little bit for this to get applied. Don't move on until vRA services are back up. + +#### Scale out vRA +Still on the vRA product page, click on the **+ Add Components** button. + +On the **Infrastructure** page, tell LCM where to put the new VRA VMs. + +On the **Network** page, tell it which network configuration to use. + +On the **Components** page, scroll down a bit and click on **(+) > vRealize Automation Secondary Node** - twice. That will reveal a new section labeled **Cluster Virtual IP**. Put in the FQDN you configured for the Virtual Service, and tick the box to terminate SSL at the load balancer. Then scroll on down and enter the details for the additional vRA nodes, making sure that the IP addresses match the servers you added to the Virtual Service configuration and that the FQDNs match what's in the SSL cert. + +Click on through to do the precheck and ultimately kick off the deployment. It'll take a while, but you'll eventually be able to connect to the NSX-ALB at `vra.domain.local` and get passed along to one of your three cluster nodes. + +Have fun! diff --git a/content/post/powercli-list-linux-vms-and-datacenter-locations/PowerCLI.png b/content/post/powercli-list-linux-vms-and-datacenter-locations/PowerCLI.png new file mode 100644 index 0000000..0e46251 Binary files /dev/null and b/content/post/powercli-list-linux-vms-and-datacenter-locations/PowerCLI.png differ diff --git a/content/post/powercli-list-linux-vms-and-datacenter-locations/index.md b/content/post/powercli-list-linux-vms-and-datacenter-locations/index.md new file mode 100644 index 0000000..1d10204 --- /dev/null +++ b/content/post/powercli-list-linux-vms-and-datacenter-locations/index.md @@ -0,0 +1,45 @@ +--- +title: "Using PowerCLI to list Linux VMs and Datacenter Locations" # Title of the blog post. +date: 2022-01-13T13:53:08-06:00 # Date of post creation. +# lastmod: 2022-01-13T13:53:08-06:00 # Date when last modified +description: "A quick bit of PowerCLI to generate a report showing Linux VMs and their datacenter locations." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: false # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "PowerCLI.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Scripts +tags: + - vmware + - powercli + - powershell +comment: true # Disable comment if false. +--- + +I recently needed to export a list of all the Linux VMs in a rather large vSphere environment spanning multiple vCenters (and the entire globe), and I wanted to include information about which virtual datacenter each VM lived in to make it easier to map VMs to their physical location. + +I've got a [`Connect-vCenters` function](/logging-in-to-multiple-vcenter-servers-at-once-with-powercli/) that I use to quickly log into multiple vCenters at once. That then enables me to run a single query across the entire landscape - but what query? There isn't really a direct way to get datacenter information out of the results generated by `Get-VM`; I could run an additional `Get-Datacenter` query against each returned VM object but that doesn't sound very efficient. + +What I came up with is using `Get-Datacenter` to enumerate each virtual datacenter, and then list the VMs matching my query within: + +```powershell +$linuxVms = foreach( $datacenter in ( Get-Datacenter )) { + Get-Datacenter $datacenter | Get-VM | Where { $_.ExtensionData.Config.GuestFullName -notmatch "win" -and $_.Name -notmatch "vcls" } | ` + Select @{ N="Datacenter";E={ $datacenter.Name }}, + Name, + Notes, + @{ N="Configured OS";E={ $_.ExtensionData.Config.GuestFullName }}, # OS based on the .vmx configuration + @{ N="Running OS";E={ $_.Guest.OsFullName }}, # OS as reported by VMware Tools + @{ N="Powered On";E={ $_.PowerState -eq "PoweredOn" }}, + @{ N="IP Address";E={ $_.ExtensionData.Guest.IpAddress }} +} +$linuxVms | Export-Csv -Path ./linuxVms.csv -NoTypeInformation -UseCulture +``` + +This gave me a CSV export with exactly the data I needed. diff --git a/content/post/powershell-download-web-folder-contents/index.md b/content/post/powershell-download-web-folder-contents/index.md new file mode 100644 index 0000000..73c9f3e --- /dev/null +++ b/content/post/powershell-download-web-folder-contents/index.md @@ -0,0 +1,46 @@ +--- +title: "Download Web Folder Contents with Powershell (`wget -r` replacement)" # Title of the blog post. +date: 2022-04-19T09:18:04-05:00 # Date of post creation. +# lastmod: 2022-04-19T09:18:04-05:00 # Date when last modified +description: "Using PowerShell to retrieve the files stored in a web directory when `wget` isn't an option." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Scripts +tags: + - powershell + - windows +comment: true # Disable comment if false. +--- +We've been working lately to use [HashiCorp Packer](https://www.packer.io/) to standardize and automate our VM template builds, and we found a need to pull in all of the contents of a specific directory on an internal web server. This would be pretty simple for Linux systems using `wget -r`, but we needed to find another solution for our Windows builds. + +A coworker and I cobbled together a quick PowerShell solution which will download the files within a specified web URL to a designated directory (without recreating the nested folder structure): +```powershell +$outputdir = 'C:\Scripts\Download\' +$url = 'https://win01.lab.bowdre.net/stuff/files/' + +# enable TLS 1.2 and TLS 1.1 protocols +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12, [Net.SecurityProtocolType]::Tls11 + +$WebResponse = Invoke-WebRequest -Uri $url +# get the list of links, skip the first one ("[To Parent Directory]") and download the files +$WebResponse.Links | Select-Object -ExpandProperty href -Skip 1 | ForEach-Object { + $fileName = $_.ToString().Split('/')[-1] # 'filename.ext' + $filePath = Join-Path -Path $outputdir -ChildPath $fileName # 'C:\Scripts\Download\filename.ext' + $baseUrl = $url.split('/') # ['https', '', 'win01.lab.bowdre.net', 'stuff', 'files'] + $baseUrl = $baseUrl[0,2] -join '//' # 'https://win01.lab.bowdre.net' + $fileUrl = '{0}{1}' -f $baseUrl.TrimEnd('/'), $_ # 'https://win01.lab.bowdre.net/stuff/files/filename.ext' + Invoke-WebRequest -Uri $fileUrl -OutFile $filePath +} +``` + +The latest version of this script will be found on [GitHub](https://github.com/jbowdre/misc-scripts/blob/main/PowerShell/Download-WebFolder.ps1). + diff --git a/content/post/psa-halt-replication-before-snapshotting-linked-vcenters/XTaU9VDy8.png b/content/post/psa-halt-replication-before-snapshotting-linked-vcenters/XTaU9VDy8.png new file mode 100644 index 0000000..0509468 Binary files /dev/null and b/content/post/psa-halt-replication-before-snapshotting-linked-vcenters/XTaU9VDy8.png differ diff --git a/content/post/psa-halt-replication-before-snapshotting-linked-vcenters/index.md b/content/post/psa-halt-replication-before-snapshotting-linked-vcenters/index.md new file mode 100644 index 0000000..457af1c --- /dev/null +++ b/content/post/psa-halt-replication-before-snapshotting-linked-vcenters/index.md @@ -0,0 +1,71 @@ +--- +series: Tips +date: "2021-01-30T08:34:30Z" +thumbnail: XTaU9VDy8.png +usePageBundles: true +tags: +- vmware +title: 'PSA: halt replication before snapshotting linked vCenters' +toc: false +--- + +It's a good idea to take a snapshot of your virtual appliances before applying any updates, just in case. When you have multiple vCenter appliances operating in Enhanced Link Mode, though, it's important to make sure that the snapshots are in a consistent state. The vCenter `vmdird` service is responsible for continuously syncing data between the vCenters within a vSphere Single Sign-On (SSO) domain. Reverting to a snapshot where `vmdird`'s knowledge of the environment dramatically differed from that of the other vCenters could cause significant problems down the road or even result in having to rebuild a vCenter from scratch. + +*(Yes, that's a lesson I learned the hard way - and warnings about that are tragically hard to come by from what I've seen. So I'm sharing my notes so that you can avoid making the same mistake.)* + +![Viewing replication status of linked vCenters](XTaU9VDy8.png) + +Take these steps when you need to snapshot linked vCenters to avoid breaking replication: + +1. Open an SSH session to *all* the vCenters within the SSO domain. +2. Log in and enter `shell` to access the shell on each vCenter. +3. Verify that replication is healthy by running `/usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w [SSO_ADMIN_PASSWORD]` on each vCenter. You want to ensure that each host shows as available to all other hosts, and the message that `Partner is 0 changes behind.`: + + ```shell + root@vcsa [ ~ ]# /usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w $ssoPass + Partner: vcsa2.lab.bowdre.net + Host available: Yes + Status available: Yes + My last change number: 9346 + Partner has seen my change number: 9346 + Partner is 0 changes behind. + + root@vcsa2 [ ~ ]# /usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w $ssoPass + Partner: vcsa.lab.bowdre.net + Host available: Yes + Status available: Yes + My last change number: 9518 + Partner has seen my change number: 9518 + Partner is 0 changes behind. + ``` +4. Stop `vmdird` on each vCenter by running `/bin/service-control --stop vmdird`: + + ```shell + root@vcsa [ ~ ]# /bin/service-control --stop vmdird + Operation not cancellable. Please wait for it to finish... + Performing stop operation on service vmdird... + Successfully stopped service vmdird + + root@vcsa2 [ ~ ]# /bin/service-control --stop vmdird + Operation not cancellable. Please wait for it to finish... + Performing stop operation on service vmdird... + Successfully stopped service vmdird + ``` +5. Snapshot the vCenter appliance VMs. +6. Start replication on each server again with `/bin/service-control --start vmdird`: + + ```shell + root@vcsa [ ~ ]# /bin/service-control --start vmdird + Operation not cancellable. Please wait for it to finish... + Performing start operation on service vmdird... + Successfully started service vmdird + + root@vcsa2 [ ~ ]# /bin/service-control --start vmdird + Operation not cancellable. Please wait for it to finish... + Performing start operation on service vmdird... + Successfully started service vmdird + ``` +7. Check the replication status with `/usr/lib/vmware-vmdir/bin/vdcrepadmin -f showpartnerstatus -h localhost -u administrator -w [SSO_ADMIN_PASSWORD]` again just to be sure. Don't proceed with whatever else you were planning to do until you've confirmed that the vCenters are in sync. + +You can learn more about the `vdcrepadmin` utility here: +https://kb.vmware.com/s/article/2127057 \ No newline at end of file diff --git a/content/post/psa-microsoft-kb5022842-breaks-ws2022-secure-boot/index.md b/content/post/psa-microsoft-kb5022842-breaks-ws2022-secure-boot/index.md new file mode 100644 index 0000000..f87dd47 --- /dev/null +++ b/content/post/psa-microsoft-kb5022842-breaks-ws2022-secure-boot/index.md @@ -0,0 +1,53 @@ +--- +title: "PSA: Microsoft's KB5022842 breaks Windows Server 2022 VMs with Secure Boot" # Title of the blog post. +date: 2023-02-17T12:24:48-06:00 # Date of post creation. +lastmod: 2023-02-21 +description: "Quick warning about a problematic patch from Microsoft, and a PowerCLI script to expose the potential impact in your vSphere environment." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips # Projects, Scripts, vRA8, K8s on vSphere +tags: + - vmware + - powershell + - windows + - powercli +comment: true # Disable comment if false. +--- +{{% notice info "Fix available" %}} +VMware has released a fix for this problem in the form of [ESXi 7.0 Update 3k](https://docs.vmware.com/en/VMware-vSphere/7.0/rn/vsphere-esxi-70u3k-release-notes.html#resolvedissues): +> If you already face the issue, after patching the host to ESXi 7.0 Update 3k, just power on the affected Windows Server 2022 VMs. After you patch a host to ESXi 7.0 Update 3k, you can migrate a running Windows Server 2022 VM from a host of version earlier than ESXi 7.0 Update 3k, install KB5022842, and the VM boots properly without any additional steps required. +{{% /notice %}} + +Microsoft released [a patch](https://msrc.microsoft.com/update-guide/releaseNote/2023-Feb) this week for Windows Server 2022 that might cause some [big problems](https://support.microsoft.com/en-gb/topic/february-14-2023-kb5022842-os-build-20348-1547-be155955-29f7-47c4-855c-34bd43895940#known-issues-in-this-update:~:text=Known%20issues%20in%20this%20update) in VMware environments. Per [VMware's KB90947](https://kb.vmware.com/s/article/90947): +> After installing Windows Server 2022 update KB5022842 (OS Build 20348.1547), guest OS can not boot up when virtual machine(s) configured with secure boot enabled running on vSphere ESXi 6.7 U2/U3 or vSphere ESXi 7.0.x. +> +> Currently there is no resolution for virtual machines running on vSphere ESXi 6.7 U2/U3 and vSphere ESXi 7.0.x. However the issue doesn't exist with virtual machines running on vSphere ESXi 8.0.x. + +So yeah. That's, uh, *not great.* + +If you've got any **Windows Server 2022** VMs with **[Secure Boot](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.security.doc/GUID-898217D4-689D-4EB5-866C-888353FE241C.html)** enabled on **ESXi 6.7/7.x**, you'll want to make sure they *do not* get **KB5022842** until this problem is resolved. + +I put together a quick PowerCLI query to help identify impacted VMs in my environment: +```powershell +$secureBoot2022VMs = foreach($datacenter in (Get-Datacenter)) { + $datacenter | Get-VM | + Where-Object {$_.Guest.OsFullName -Match 'Microsoft Windows Server 2022' -And $_.ExtensionData.Config.BootOptions.EfiSecureBootEnabled} | + Select-Object @{N="Datacenter";E={$datacenter.Name}}, + Name, + @{N="Running OS";E={$_.Guest.OsFullName}}, + @{N="Secure Boot";E={$_.ExtensionData.Config.BootOptions.EfiSecureBootEnabled}}, + PowerState +} +$secureBoot2022VMs | Export-Csv -NoTypeInformation -Path ./secureBoot2022VMs.csv +``` + +Be careful out there! diff --git a/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-new-series-link.png b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-new-series-link.png new file mode 100644 index 0000000..240abde Binary files /dev/null and b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-new-series-link.png differ diff --git a/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-old-category-link.png b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-old-category-link.png new file mode 100644 index 0000000..2b7794d Binary files /dev/null and b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-old-category-link.png differ diff --git a/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-posts-by-category.png b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-posts-by-category.png new file mode 100644 index 0000000..e66d998 Binary files /dev/null and b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-posts-by-category.png differ diff --git a/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-series-navigation.png b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-series-navigation.png new file mode 100644 index 0000000..254b5b5 Binary files /dev/null and b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-series-navigation.png differ diff --git a/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-vra8-series.png b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-vra8-series.png new file mode 100644 index 0000000..e1d1f12 Binary files /dev/null and b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/20210724-vra8-series.png differ diff --git a/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/index.md b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/index.md new file mode 100644 index 0000000..77d2dff --- /dev/null +++ b/content/post/recreating-hashnode-series-categories-in-jekyll-on-github-pages/index.md @@ -0,0 +1,286 @@ +--- +series: Tips +date: "2021-07-24T16:46:00Z" +thumbnail: 20210724-series-navigation.png +usePageBundles: true +tags: +- meta +- jekyll +title: Recreating Hashnode Series (Categories) in Jekyll on GitHub Pages +--- + +I recently [migrated this site](/virtually-potato-migrated-to-github-pages) from Hashnode to GitHub Pages, and I'm really getting into the flexibility and control that managing the content through Jekyll provides. So, naturally, after finalizing the move I got to work recreating Hashnode's "Series" feature, which lets you group posts together and highlight them as a collection. One of the things I liked about the Series setup was that I could control the order of the collected posts: my posts about [building out the vRA environment in my homelab](/series/vra8) are probably best consumed in chronological order (oldest to newest) since the newer posts build upon the groundwork laid by the older ones, while posts about my [other one-off projects](/series/projects) could really be enjoyed in any order. + +I quickly realized that if I were hosting this pretty much anywhere *other* than GitHub Pages I could simply leverage the [`jekyll-archives`](https://github.com/jekyll/jekyll-archives) plugin to manage this for me - but, alas, that's not one of the [plugins supported by the platform](https://pages.github.com/versions/). I needed to come up with my own solution, and being still quite new to Jekyll (and this whole website design thing in general) it took me a bit of fumbling to get it right. + +### Reviewing the theme-provided option +The Jekyll theme I'm using ([Minimal Mistakes](https://github.com/mmistakes/minimal-mistakes)) comes with [built-in support](https://mmistakes.github.io/mm-github-pages-starter/categories/) for a [category archive page](/series), which (like the [tags page](/tags)) displays all the categorized posts on a single page. Links at the top will let you jump to an appropriate anchor to start viewing the selected category, but it's not really an elegant way to display a single category. +![Posts by category](20210724-posts-by-category.png) + +It's a start, though, so I took a few minutes to check out how it's being generated. The category archive page lives at [`_pages/category-archive.md`](https://raw.githubusercontent.com/mmistakes/mm-github-pages-starter/master/_pages/category-archive.md): +```markdown +--- +title: "Posts by Category" +layout: categories +permalink: /categories/ +author_profile: true +--- +``` + +The `title` indicates what's going to be written in bold text at the top of the page, the `permalink` says that it will be accessible at `http://localhost/categories/`, and the nice little `author_profile` sidebar will appear on the left. + +This page then calls the `categories` layout, which is defined in [`_layouts/categories.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_layouts/categories.html): +```liquid +{% raw %}--- +layout: archive +--- + +{{ content }} + +{% assign categories_max = 0 %} +{% for category in site.categories %} + {% if category[1].size > categories_max %} + {% assign categories_max = category[1].size %} + {% endif %} +{% endfor %} + +
    + {% for i in (1..categories_max) reversed %} + {% for category in site.categories %} + {% if category[1].size == i %} +
  • + + {{ category[0] }} {{ i }} + +
  • + {% endif %} + {% endfor %} + {% endfor %} +
+ +{% assign entries_layout = page.entries_layout | default: 'list' %} +{% for i in (1..categories_max) reversed %} + {% for category in site.categories %} + {% if category[1].size == i %} +
+

{{ category[0] }}

+
+ {% for post in category.last %} + {% include archive-single.html type=entries_layout %} + {% endfor %} +
+ {{ site.data.ui-text[site.locale].back_to_top | default: 'Back to Top' }} ↑ +
+ {% endif %} + {% endfor %} +{% endfor %}{% endraw %} +``` + +I wanted my solution to preserve the formatting that's used by the theme elsewhere on this site so this bit is going to be my base. The big change I'll make is that instead of enumerating all of the categories on one page, I'll have to create a new static page for each of the categories I'll want to feature. And each of those pages will refer to a new layout to determine what will actually appear on the page. + +### Defining a new layout +I create a new file called `_layouts/series.html` which will define how these new series pages get rendered. It starts out just like the default `categories.html` one: + +```liquid +{% raw %}--- +layout: archive +--- + +{{ content }}{% endraw %} +``` + +That `{{ content }}` block will let me define text to appear above the list of articles - very handy. Much of the original `categories.html` code has to do with iterating through the list of categories. I won't need that, though, so I'll jump straight to setting what layout the entries on this page will use: +```liquid +{% assign entries_layout = page.entries_layout | default: 'list' %} +``` + +I'll be including two custom variables in the [Front Matter](https://jekyllrb.com/docs/front-matter/) for my category pages: `tag` to specify what category to filter on, and `sort_order` which will be set to `reverse` if I want the older posts up top. I'll be able to access these in the layout as `page.tag` and `page.sort_order`, respectively. So I'll go ahead and grab all the posts which are categorized with `page.tag`, and then decide whether the posts will get sorted normally or in reverse: +```liquid +{% raw %}{% assign posts = site.categories[page.tag] %} +{% if page.sort_order == 'reverse' %} + {% assign posts = posts | reverse %} +{% endif %}{% endraw %} +``` + +And then I'll loop through each post (in either normal or reverse order) and insert them into the rendered page: +```liquid +{% raw %}
+ {% for post in posts %} + {% include archive-single.html type=entries_layout %} + {% endfor %} +
{% endraw %} +``` + +Putting it all together now, here's my new `_layouts/series.html` file: +```liquid +{% raw %}--- +layout: archive +--- + +{{ content }} + +{% assign entries_layout = page.entries_layout | default: 'list' %} +{% assign posts = site.categories[page.tag] %} +{% if page.sort_order == 'reverse' %} + {% assign posts = posts | reverse %} +{% endif %} +
+ {% for post in posts %} + {% include archive-single.html type=entries_layout %} + {% endfor %} +
{% endraw %} +``` + +### Series pages +Since I can't use a plugin to automatically generate pages for each series, I'll have to do it manually. Fortunately this is pretty easy, and I've got a limited number of categories/series to worry about. I started by making a new `_pages/series-vra8.md` and setting it up thusly: +```markdown +{% raw %}--- +title: "Adventures in vRealize Automation 8" +layout: series +permalink: "/series/vra8" +tag: vRA8 +sort_order: reverse +author_profile: true +header: + teaser: assets/images/posts-2020/RtMljqM9x.png +--- + +*Follow along as I create a flexible VMware vRealize Automation 8 environment for provisioning virtual machines - all from the comfort of my Intel NUC homelab.*{% endraw %} +``` + +You can see that this page is referencing the series layout I just created, and it's going to live at `http://localhost/series/vra8` - precisely where this series was on Hashnode. I've tagged it with the category I want to feature on this page, and specified that the posts will be sorted in reverse order so that anyone reading through the series will start at the beginning (I hear it's a very good place to start). I also added a teaser image which will be displayed when I link to the series from elsewhere. And I included a quick little italicized blurb to tell readers what the series is about. + +Check it out [here](/series/vra8): +![vRA8 series](20210724-vra8-series.png) + +The other series pages will be basically the same, just without the reverse sort directive. Here's `_pages/series-tips.md`: +```markdown +{% raw %}--- +title: "Tips & Tricks" +layout: series +permalink: "/series/tips" +tag: Tips +author_profile: true +header: + teaser: assets/images/posts-2020/kJ_l7gPD2.png +--- + +*Useful tips and tricks I've stumbled upon.*{% endraw %} +``` + +### Changing the category permalink +Just in case someone wants to look at all the post series in one place, I'll be keeping the existing category archive page around, but I'll want it to be found at `/series/` instead of `/categories/`. I'll start with going into the `_config.yml` file and changing the `category_archive` path: + +```yaml +category_archive: + type: liquid + # path: /categories/ + path: /series/ +tag_archive: + type: liquid + path: /tags/ +``` + +I'll also rename `_pages/category-archive.md` to `_pages/series-archive.md` and update its title and permalink: +```markdown +{% raw %}--- +title: "Posts by Series" +layout: categories +permalink: /series/ +author_profile: true +---{% endraw %} +``` + +### Fixing category links in posts +The bottom of each post has a section which lists the tags and categories to which it belongs. Right now, those are still pointing to the category archive page (`/series/#vra8`) instead of the series feature pages I created (`/series/vra8`). +![Old category link](20210724-old-category-link.png) + +That *works* but I'd rather it reference the fancy new pages I created. Tracking down where to make that change was a bit of a journey. + +I started with the [`_layouts/single.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_layouts/single.html) file which is the layout I'm using for individual posts. This bit near the end gave me the clue I needed: +```liquid +{% raw %}
+ {% if site.data.ui-text[site.locale].meta_label %} +

{{ site.data.ui-text[site.locale].meta_label }}

+ {% endif %} + {% include page__taxonomy.html %} + {% include page__date.html %} +
{% endraw %} +``` + +It looks like [`page__taxonomy.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_includes/page__taxonomy.html) is being used to display the tags and categories, so I then went to that file in the `_include` directory: +```liquid +{% raw %}{% if site.tag_archive.type and page.tags[0] %} + {% include tag-list.html %} +{% endif %} + +{% if site.category_archive.type and page.categories[0] %} + {% include category-list.html %} +{% endif %}{% endraw %} +``` + +Okay, it looks like [`_include/category-list.html`](https://github.com/mmistakes/minimal-mistakes/blob/master/_includes/category-list.html) is what I actually want. Here's that file: +```liquid +{% raw %}{% case site.category_archive.type %} + {% when "liquid" %} + {% assign path_type = "#" %} + {% when "jekyll-archives" %} + {% assign path_type = nil %} +{% endcase %} + +{% if site.category_archive.path %} + {% assign categories_sorted = page.categories | sort_natural %} + +

+ {{ site.data.ui-text[site.locale].categories_label | default: "series:" }} + + {% for category_word in categories_sorted %} + {% unless forloop.last %}, {% endunless %} + {% endfor %} + +

+{% endif %}{% endraw %} +``` + +I'm using the `liquid` archive approach since I can't use the `jekyll-archives` plugin, so I can see that it's setting the `path_type` to `"#"`. And near the bottom of the file, I can see that it's assembling the category link by slugifying the `category_word`, sticking the `path_type` in front of it, and then putting the `site.category_archive.path` (which I edited earlier in `_config.yml`) in front of that. So that's why my category links look like `/series/#category`. I can just edit the top of this file to statically set `path_type = nil` and that should clear this up in a jiffy: +```liquid +{% raw %}{% assign path_type = nil %} +{% if site.category_archive.path %} + {% assign categories_sorted = page.categories | sort_natural %} + [...]{% endraw %} +``` + +To sell the series illusion even further, I can pop into [`_data/ui-text.yml`](https://github.com/mmistakes/minimal-mistakes/blob/master/_data/ui-text.yml) to update the string used for `categories_label`: +```yaml + meta_label : + tags_label : "Tags:" + categories_label : "Series:" + date_label : "Updated:" + comments_label : "Leave a comment" +``` +![Updated series link](20210724-new-series-link.png) + +Much better! + +### Updating the navigation header +And, finally, I'll want to update the navigation links at the top of each page to help visitors find my new featured series pages. For that, I can just edit `_data/navigation.yml` with links to my new pages: +```yaml +main: + - title: "vRealize Automation 8" + url: /series/vra8 + - title: "Projects" + url: /series/projects + - title: "Scripts" + url: /series/scripts + - title: "Tips & Tricks" + url: /series/tips + - title: "Tags" + url: /tags/ + - title: "All Posts" + url: /posts/ +``` + +### All done! +![Slick series navigation!](20210724-series-navigation.png) + +I set out to recreate the series setup that I had over at Hashnode, and I think I've accomplished that. More importantly, I've learned quite a bit more about how Jekyll works, and I'm already plotting further tweaks. For now, though, I think this is ready for a `git push`! \ No newline at end of file diff --git a/content/post/removing-recreating-vcls-vms/add-advanced-setting.png b/content/post/removing-recreating-vcls-vms/add-advanced-setting.png new file mode 100644 index 0000000..db49401 Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/add-advanced-setting.png differ diff --git a/content/post/removing-recreating-vcls-vms/basic-architecture.png b/content/post/removing-recreating-vcls-vms/basic-architecture.png new file mode 100644 index 0000000..0284725 Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/basic-architecture.png differ diff --git a/content/post/removing-recreating-vcls-vms/cluster-domain-id.png b/content/post/removing-recreating-vcls-vms/cluster-domain-id.png new file mode 100644 index 0000000..005679e Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/cluster-domain-id.png differ diff --git a/content/post/removing-recreating-vcls-vms/index.md b/content/post/removing-recreating-vcls-vms/index.md new file mode 100644 index 0000000..856689a --- /dev/null +++ b/content/post/removing-recreating-vcls-vms/index.md @@ -0,0 +1,78 @@ +--- +title: "Removing and Recreating vCLS VMs" # Title of the blog post. +date: 2022-07-24 +lastmod: 2022-07-25 # Date when last modified +description: "How to remove and (optionally) recreate the vSphere Clustering Services VMs" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +featureImage: "basic-architecture.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "basic-architecture.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips # Projects, Scripts, vRA8 +tags: + - vmware + - vsphere + - homelab +comment: true # Disable comment if false. +--- + +Way back in 2020, VMware released vSphere 7 Update 1 and introduced the new [vSphere Clustering Services (vCLS)](https://core.vmware.com/resource/introduction-vsphere-clustering-service-vcls) to improve how cluster services like the Distributed Resource Scheduler (DRS) operate. vCLS deploys lightweight agent VMs directly on the cluster being managed, and those VMs provide a decoupled and distributed control plane to offload some of the management responsibilities from the vCenter server. + +![vCLS VM](vcls-vm.png) + +That's very cool, particularly in large continent-spanning environments or those which reach into multiple clouds, but it may not make sense to add those additional workloads in resource-constrained homelabs[^esxi-arm]. And while the vCLS VMs are supposed to be automagically self-managed, sometimes things go a little wonky and that management fails to function correctly, which can negatively impact DRS. Recovering from such a scenario is complicated by the complete inability to manage the vCLS VMs through the vSphere UI. + +[^esxi-arm]: Or when [running the ESXi-ARM Fling](/esxi-arm-on-quartz64/), where the vCLS VMs aren't able to be created and will just [fill up the Tasks list with failures](https://flings.vmware.com/esxi-arm-edition/bugs/1099). + +Fortunately there's a somewhat-hidden way to disable (and re-enable) vCLS on a per-cluster basis, and it's easy to do once you know the trick. This can help if you want to permanently disable vCLS (like in a lab environment) or if you just need to turn it off and on again[^off-and-on] to clean up and redeploy uncooperative agent VMs. + +{{% notice warning "Proceed at your own risk" %}} +Disabling vCLS will break DRS, and could have other unintended side effects. Don't do this in prod if you can avoid it. +{{% /notice %}} + +[^off-and-on]: ![](off-and-on.gif) + +### Find the cluster's domain ID +It starts with determining the affected cluster's domain ID, which is very easy to do once you know where to look. Simply browse to the cluster object in the vSphere inventory, and look at the URL: +![Cluster domain ID](cluster-domain-id.png) + +That `ClusterComputeResource:domain-c13` portion tells me exactly what I need to know: the ID for the `NUC Cluster` is `domain-c13`. + +### Disable vCLS for a cluster +With that information gathered, you're ready to do the deed. Select the vCenter object in your vSphere inventory, head to the **Configure** tab, and open the **Advanced Settings** item. + +![vCenter Advanced Settings](vcenter-advanced-settings.png) + +Now click the **Edit Settings** button to open the editor panel. You'll need to create a new advanced setting so scroll to the bottom of the panel and enter: + +| Setting Name | Value | +|:--- |:--- | +| `config.vcls.clusters.domain-[id].enabled` | `false` | + +![Adding the advanced setting](add-advanced-setting.png) + +Then click **Add** and **Save** to apply the change. + +Within moments, the vCLS VM(s) will be powered off and deleted: +![Be gone, vCLS!](vcls-deleted.png) + +### Re-enable vCLS +If you need to bring back vCLS (such as when troubleshooting a problematic cluster), that's as simple as changing the advanced setting again: + +| Setting Name | Value | +|:--- |:--- | +| `config.vcls.clusters.domain-[id].enabled` | `true` | + +![Re-enabling vCLS](vcls-enabled.png) + +And the VM(s) will be automatically recreated as needed: +![Recreated vCLS VM](vcls-vm-recreated.png) + + + diff --git a/content/post/removing-recreating-vcls-vms/off-and-on.gif b/content/post/removing-recreating-vcls-vms/off-and-on.gif new file mode 100644 index 0000000..70fc84a Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/off-and-on.gif differ diff --git a/content/post/removing-recreating-vcls-vms/vcenter-advanced-settings.png b/content/post/removing-recreating-vcls-vms/vcenter-advanced-settings.png new file mode 100644 index 0000000..3c124ec Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/vcenter-advanced-settings.png differ diff --git a/content/post/removing-recreating-vcls-vms/vcls-deleted.png b/content/post/removing-recreating-vcls-vms/vcls-deleted.png new file mode 100644 index 0000000..397beb1 Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/vcls-deleted.png differ diff --git a/content/post/removing-recreating-vcls-vms/vcls-enabled.png b/content/post/removing-recreating-vcls-vms/vcls-enabled.png new file mode 100644 index 0000000..296cae7 Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/vcls-enabled.png differ diff --git a/content/post/removing-recreating-vcls-vms/vcls-vm-recreated.png b/content/post/removing-recreating-vcls-vms/vcls-vm-recreated.png new file mode 100644 index 0000000..d38fcc5 Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/vcls-vm-recreated.png differ diff --git a/content/post/removing-recreating-vcls-vms/vcls-vm.png b/content/post/removing-recreating-vcls-vms/vcls-vm.png new file mode 100644 index 0000000..d08f5df Binary files /dev/null and b/content/post/removing-recreating-vcls-vms/vcls-vm.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_administrators_placement.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_administrators_placement.png new file mode 100644 index 0000000..2412dad Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_administrators_placement.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_administrators_visibility.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_administrators_visibility.png new file mode 100644 index 0000000..887f442 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_administrators_visibility.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_cloud_assembly_new_version.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_cloud_assembly_new_version.png new file mode 100644 index 0000000..f6671c1 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_cloud_assembly_new_version.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_label.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_label.png new file mode 100644 index 0000000..2568fbe Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_label.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_placement.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_placement.png new file mode 100644 index 0000000..80b8089 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_placement.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_step.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_step.png new file mode 100644 index 0000000..9c60938 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210831_system_drive_size_step.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_action_constants.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_action_constants.png new file mode 100644 index 0000000..0a26f0c Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_action_constants.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_action_select_language.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_action_select_language.png new file mode 100644 index 0000000..3b02a50 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_action_select_language.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_create_action.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_create_action.png new file mode 100644 index 0000000..463fd28 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_create_action.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_create_action_constant.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_create_action_constant.png new file mode 100644 index 0000000..cb5d197 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_create_action_constant.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_map_constants_to_action.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_map_constants_to_action.png new file mode 100644 index 0000000..359a5e7 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210901_map_constants_to_action.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_action_run_success.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_action_run_success.png new file mode 100644 index 0000000..bd3d33a Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_action_run_success.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_extensibility_subscriptions.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_extensibility_subscriptions.png new file mode 100644 index 0000000..dd6a145 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_extensibility_subscriptions.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_new_subscription_1.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_new_subscription_1.png new file mode 100644 index 0000000..776fb2d Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_new_subscription_1.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_new_subscription_2.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_new_subscription_2.png new file mode 100644 index 0000000..09bcd42 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_new_subscription_2.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_old_subscription_blocking.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_old_subscription_blocking.png new file mode 100644 index 0000000..f4acc60 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_old_subscription_blocking.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_request.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_request.png new file mode 100644 index 0000000..382eb96 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_request.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_verify_disk_size.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_verify_disk_size.png new file mode 100644 index 0000000..ba37920 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_verify_disk_size.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_verify_local_admins.png b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_verify_local_admins.png new file mode 100644 index 0000000..d3d73b9 Binary files /dev/null and b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/20210903_verify_local_admins.png differ diff --git a/content/post/run-scripts-in-guest-os-with-vra-abx-actions/index.md b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/index.md new file mode 100644 index 0000000..026ee26 --- /dev/null +++ b/content/post/run-scripts-in-guest-os-with-vra-abx-actions/index.md @@ -0,0 +1,558 @@ +--- +series: vRA8 +date: "2021-09-03T00:00:00Z" +thumbnail: 20210903_action_run_success.png +usePageBundles: true +lastmod: "2021-09-20" +tags: +- vra +- abx +- powershell +- vmware +title: Run scripts in guest OS with vRA ABX Actions +--- +Thus far in my [vRealize Automation project](/series/vra8), I've primarily been handing the payload over to vRealize Orchestrator to do the heavy lifting on the back end. This approach works really well for complex multi-part workflows (like when [generating unique hostnames](/vra8-custom-provisioning-part-two#the-vro-workflow)), but it may be overkill for more linear tasks (such as just running some simple commands inside of a deployed guest OS). In this post, I'll explore how I use [vRA Action Based eXtensibility (ABX)](https://blogs.vmware.com/management/2020/09/vra-abx-flow.html) to do just that. + +### The Goal +My ABX action is going to use PowerCLI to perform a few steps inside a deployed guest OS (Windows-only for this demonstration): +1. Auto-update VM tools (if needed). +2. Add specified domain users/groups to the local Administrators group. +3. Extend the C: volume to fill the VMDK. +4. Set up Windows Firewall to enable remote access. +5. Create a scheduled task to attempt to automatically apply any available Windows updates. + +### Template Changes +#### Cloud Assembly +I'll need to start by updating the cloud template so that the requester can input an (optional) list of admin accounts to be added to the VM, and to enable specifying a disk size to override the default from the source VM template. + +I will also add some properties to tell PowerCLI (and the `Invoke-VmScript` cmdlet in particular) how to connect to the VM. + +##### Inputs section +I'll kick this off by going into Cloud Assembly and editing the `WindowsDemo` template I've been working on for the past few eons. I'll add a `diskSize` input: +```yaml +formatVersion: 1 +inputs: + site: [...] + image: [...] + size: [...] + diskSize: + title: 'System drive size' + default: 60 + type: integer + minimum: 60 + maximum: 200 + network: [...] + adJoin: [...] +[...] +``` + +The default value is set to 60GB to match the VMDK attached to the source template; that's also the minimum value since shrinking disks gets messy. + +I'll also drop in an `adminsList` input at the bottom of the section: +```yaml +[...] + poc_email: [...] + ticket: [...] + adminsList: + type: string + title: Administrators + description: Comma-separated list of domain accounts/groups which need admin access to this server. + default: '' +resources: + Cloud_vSphere_Machine_1: +[...] +``` + +##### Resources section +In the Resources section of the cloud template, I'm going to add a few properties that will tell the ABX script how to connect to the appropriate vCenter and then the VM. +- `vCenter`: The vCenter server where the VM will be deployed, and thus the server which PowerCLI will authenticate against. In this case, I've only got one vCenter, but a larger environment might have multiples. Defining this in the cloud template makes it easy to select automagically if needed. (For instance, if I had a `bow-vcsa` and a `dre-vcsa` for my different sites, I could do something like `vCenter: '${input.site}-vcsa.lab.bowdre.net'` here.) +- `vCenterUser`: The username with rights to the VM in vCenter. Again, this doesn't have to be a static assignment. +- `templateUser`: This is the account that will be used by `Invoke-VmScript` to log in to the guest OS. My template will use the default `Administrator` account for non-domain systems, but the `lab\vra` service account on domain-joined systems (using the `adJoin` input I [set up earlier](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template)). + +I'll also include the `adminsList` input from earlier so that can get passed to ABX as well. And I'm going to add in an `adJoin` property (mapped to the [existing `input.adJoin`](/joining-vms-to-active-directory-in-site-specific-ous-with-vra8#cloud-template)) so that I'll have that to work with later. + +```yaml +[...] +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + site: '${input.site}' + vCenter: vcsa.lab.bowdre.net + vCenterUser: vra@lab.bowdre.net + templateUser: '${input.adJoin ? "vra@lab" : "Administrator"}' + adminsList: '${input.adminsList}' + environment: '${input.environment}' + function: '${input.function}' + app: '${input.app}' + adJoin: '${input.adJoin}' + ignoreActiveDirectory: '${!input.adJoin}' +[...] +``` + +And I will add in a `storage` property as well which will automatically adjust the deployed VMDK size to match the specified input: +```yaml +[...] + description: '${input.description}' + networks: [...] + constraints: [...] + storage: + bootDiskCapacityInGB: '${input.diskSize}' + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: [...] +[...] +``` + +##### Complete template +Okay, all together now: +```yaml +formatVersion: 1 +inputs: + site: + type: string + title: Site + enum: + - BOW + - DRE + image: + type: string + title: Operating System + oneOf: + - title: Windows Server 2019 + const: ws2019 + default: ws2019 + size: + title: Resource Size + type: string + oneOf: + - title: 'Micro [1vCPU|1GB]' + const: micro + - title: 'Tiny [1vCPU|2GB]' + const: tiny + - title: 'Small [2vCPU|2GB]' + const: small + default: small + diskSize: + title: 'System drive size' + default: 60 + type: integer + minimum: 60 + maximum: 200 + network: + title: Network + type: string + adJoin: + title: Join to AD domain + type: boolean + default: true + staticDns: + title: Create static DNS record + type: boolean + default: false + environment: + type: string + title: Environment + oneOf: + - title: Development + const: D + - title: Testing + const: T + - title: Production + const: P + default: D + function: + type: string + title: Function Code + oneOf: + - title: Application (APP) + const: APP + - title: Desktop (DSK) + const: DSK + - title: Network (NET) + const: NET + - title: Service (SVS) + const: SVS + - title: Testing (TST) + const: TST + default: TST + app: + type: string + title: Application Code + minLength: 3 + maxLength: 3 + default: xxx + description: + type: string + title: Description + description: Server function/purpose + default: Testing and evaluation + poc_name: + type: string + title: Point of Contact Name + default: Jack Shephard + poc_email: + type: string + title: Point of Contact Email + default: jack.shephard@virtuallypotato.com + pattern: '^[^\s@]+@[^\s@]+\.[^\s@]+$' + ticket: + type: string + title: Ticket/Request Number + default: 4815162342 + adminsList: + type: string + title: Administrators + description: Comma-separated list of domain accounts/groups which need admin access to this server. + default: '' +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + site: '${input.site}' + vCenter: vcsa.lab.bowdre.net + vCenterUser: vra@lab.bowdre.net + templateUser: '${input.adJoin ? "vra@lab" : "Administrator"}' + adminsList: '${input.adminsList}' + environment: '${input.environment}' + function: '${input.function}' + app: '${input.app}' + adJoin: '${input.adJoin}' + ignoreActiveDirectory: '${!input.adJoin}' + activeDirectory: + relativeDN: '${"OU=Servers,OU=Computers,OU=" + input.site + ",OU=LAB"}' + customizationSpec: '${input.adJoin ? "vra-win-domain" : "vra-win-workgroup"}' + staticDns: '${input.staticDns}' + dnsDomain: lab.bowdre.net + poc: '${input.poc_name + " (" + input.poc_email + ")"}' + ticket: '${input.ticket}' + description: '${input.description}' + networks: + - network: '${resource.Cloud_vSphere_Network_1.id}' + assignment: static + constraints: + - tag: 'comp:${to_lower(input.site)}' + storage: + bootDiskCapacityInGB: '${input.diskSize}' + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: + networkType: existing + constraints: + - tag: 'net:${input.network}' +``` + +With the template sorted, I need to assign it a new version and release it to the catalog so that the changes will be visible to Service Broker: +![Releasing a new version of a Cloud Assembly template](20210831_cloud_assembly_new_version.png) + +#### Service Broker custom form +I now need to also make some updates to the custom form configuration in Service Broker so that the new fields will appear on the request form. First things first, though: after switching to the Service Broker UI, I go to **Content & Policies > Content Sources**, open the linked content source, and click the **Save & Import** button to force Service Broker to pull in the latest versions from Cloud Assembly. + +I can then go to **Content**, click the three-dot menu next to my `WindowsDemo` item, and select the **Customize Form** option. I drag-and-drop the `System drive size` from the *Schema Elements* section onto the canvas, placing it directly below the existing `Resource Size` field. +![Placing the system drive size field on the canvas](20210831_system_drive_size_placement.png) + +With the field selected, I use the **Properties** section to edit the label with a unit so that users will better understand what they're requesting. +![System drive size label](20210831_system_drive_size_label.png) + +On the **Values** tab, I change the *Step* option to `5` so that we won't wind up with users requesting a disk size of `62.357 GB` or anything crazy like that. +![System drive size step](20210831_system_drive_size_step.png) + +I'll drag-and-drop the `Administrators` field to the canvas, and put it right below the VM description: +![Administrators field placement](20210831_administrators_placement.png) + +I only want this field to be visible if the VM is going to be joined to the AD domain, so I'll set the *Visibility* accordingly: +![Administrators field visibility](20210831_administrators_visibility.png) + +That should be everything I need to add to the custom form so I'll be sure to hit that big **Save** button before moving on. + +### Extensibility +Okay, now it's time to actually make the stuff work on the back end. But before I get to writing the actual script, there's something else I'll need to do first. Remember how I added properties to store the usernames for vCenter and the VM template in the cloud template? My ABX action will also need to know the passwords for those accounts. I didn't add those to the cloud template since anything added as a property there (even if flagged as a secret!) would be visible in plain text to any external handlers (like vRO). Instead, I'll store those passwords as encrypted Action Constants. + +#### Action Constants +From the vRA Cloud Assembly interface, I'll navigate to **Extensibility > Library > Actions** and then click the **Action Constants** button up top. I can then click **New Action Constant** and start creating the ones I need: +- `vCenterPassword`: for logging into vCenter. +- `templatePassWinWorkgroup`: for logging into non-domain VMs. +- `templatePassWinDomain`: for logging into VMs with the designated domain credentials. + +I'll make sure to enable the *Encrypt the action constant value* toggle for each so they'll be protected. +![Creating an action constant](20210901_create_action_constant.png) + +![Created action constants](20210901_action_constants.png) + +Once all those constants are created I can move on to the meat of this little project: + +#### ABX Action +I'll click back to **Extensibility > Library > Actions** and then **+ New Action**. I give the new action a clever title and description: +![Create a new action](20210901_create_action.png)] + +I then hit the language dropdown near the top left and select to use `powershell` so that I can use those sweet, sweet PowerCLI cmdlets. +![Language selection](20210901_action_select_language.png) + +And I'll pop over to the right side to map the Action Constants I created earlier so that I can reference them in the script I'm about to write: +![Mapping constants in action](20210901_map_constants_to_action.png) + +Now for The Script: +```powershell +<# vRA 8.x ABX action to perform certain in-guest actions post-deploy: + Windows: + - auto-update VM tools + - add specified domain users/groups to local Administrators group + - extend C: volume to fill disk + - set up remote access + - create a scheduled task to (attempt to) apply Windows updates + + ## Action Secrets: + templatePassWinDomain # password for domain account with admin rights to the template (domain-joined deployments) + templatePassWinWorkgroup # password for local account with admin rights to the template (standalone deployments) + vCenterPassword # password for vCenter account passed from the cloud template + + ## Action Inputs: + ## Inputs from deployment: + resourceNames[0] # VM name [BOW-DVRT-XXX003] + customProperties.vCenterUser # user for connecting to vCenter [lab\vra] + customProperties.vCenter # vCenter instance to connect to [vcsa.lab.bowdre.net] + customProperties.dnsDomain # long-form domain name [lab.bowdre.net] + customProperties.adminsList # list of domain users/groups to be added as local admins [john, lab\vra, vRA-Admins] + customProperties.adJoin # boolean to determine if the system will be joined to AD (true) or not (false) + customProperties.templateUser # username used for connecting to the VM through vmtools [Administrator] / [root] +#> + +function handler($context, $inputs) { + # Initialize global variables + $vcUser = $inputs.customProperties.vCenterUser + $vcPassword = $context.getSecret($inputs."vCenterPassword") + $vCenter = $inputs.customProperties.vCenter + + # Create vmtools connection to the VM + $vmName = $inputs.resourceNames[0] + Connect-ViServer -Server $vCenter -User $vcUser -Password $vcPassword -Force + $vm = Get-VM -Name $vmName + Write-Host "Waiting for VM Tools to start..." + if (-not (Wait-Tools -VM $vm -TimeoutSeconds 180)) { + Write-Error "Unable to establish connection with VM tools" -ErrorAction Stop + } + + # Detect OS type + $count = 0 + While (!$osType) { + Try { + $osType = ($vm | Get-View).Guest.GuestFamily.ToString() + $toolsStatus = ($vm | Get-View).Guest.ToolsStatus.ToString() + } Catch { + # 60s timeout + if ($count -ge 12) { + Write-Error "Timeout exceeded while waiting for tools." -ErrorAction Stop + break + } + Write-Host "Waiting for tools..." + $count++ + Sleep 5 + } + } + Write-Host "$vmName is a $osType and its tools status is $toolsStatus." + + # Update tools on Windows if out of date + if ($osType.Equals("windowsGuest") -And $toolsStatus.Equals("toolsOld")) { + Write-Host "Updating VM Tools..." + Update-Tools $vm + Write-Host "Waiting for VM Tools to start..." + if (-not (Wait-Tools -VM $vm -TimeoutSeconds 180)) { + Write-Error "Unable to establish connection with VM tools" -ErrorAction Stop + } + } + + # Run OS-specific tasks + if ($osType.Equals("windowsGuest")) { + # Initialize Windows variables + $domainLong = $inputs.customProperties.dnsDomain + $adminsList = $inputs.customProperties.adminsList + $adJoin = $inputs.customProperties.adJoin + $templateUser = $inputs.customProperties.templateUser + $templatePassword = $adJoin.Equals("true") ? $context.getSecret($inputs."templatePassWinDomain") : $context.getSecret($inputs."templatePassWinWorkgroup") + + # Add domain accounts to local administrators group + if ($adminsList.Length -gt 0 -And $adJoin.Equals("true")) { + # Standardize users entered without domain as DOMAIN\username + if ($adminsList.Length -gt 0) { + $domainShort = $domainLong.split('.')[0] + $adminsArray = @(($adminsList -Split ',').Trim()) + For ($i=0; $i -lt $adminsArray.Length; $i++) { + If ($adminsArray[$i] -notmatch "$domainShort.*\\" -And $adminsArray[$i] -notmatch "@$domainShort") { + $adminsArray[$i] = $domainShort + "\" + $adminsArray[$i] + } + } + $admins = '"{0}"' -f ($adminsArray -join '","') + Write-Host "Administrators: $admins" + } + $adminScript = "Add-LocalGroupMember -Group Administrators -Member $admins" + Start-Sleep -s 10 + Write-Host "Attempting to add administrator accounts..." + $runAdminScript = Invoke-VMScript -VM $vm -ScriptText $adminScript -GuestUser $templateUser -GuestPassword $templatePassword + if ($runAdminScript.ScriptOutput.Length -eq 0) { + Write-Host "Successfully added [$admins] to Administrators group." + } else { + Write-Host "Attempt to add [$admins] to Administrators group completed with warnings:`n" $runAdminScript.ScriptOutput "`n" + } + } else { + Write-Host "No admins to add..." + } + # Extend C: volume to fill system drive + $partitionScript = "`$Partition = Get-Volume -DriveLetter C | Get-Partition; `$Partition | Resize-Partition -Size (`$Partition | Get-PartitionSupportedSize).sizeMax" + Start-Sleep -s 10 + Write-Host "Attempting to extend system volume..." + $runPartitionScript = Invoke-VMScript -VM $vm -ScriptText $partitionScript -GuestUser $templateUser -GuestPassword $templatePassword + if ($runPartitionScript.ScriptOutput.Length -eq 0) { + Write-Host "Successfully extended system partition." + } else { + Write-Host "Attempt to extend system volume completed with warnings:`n" $runPartitionScript.ScriptOutput "`n" + } + # Set up remote access + $remoteScript = "Enable-NetFirewallRule -DisplayGroup `"Remote Desktop`" + Enable-NetFirewallRule -DisplayGroup `"Windows Management Instrumentation (WMI)`" + Enable-NetFirewallRule -DisplayGroup `"File and Printer Sharing`" + Enable-PsRemoting + Set-ItemProperty -Path 'HKLM:\System\CurrentControlSet\Control\Terminal Server' -name `"fDenyTSConnections`" -Value 0" + Start-Sleep -s 10 + Write-Host "Attempting to enable remote access (RDP, WMI, File and Printer Sharing, PSRemoting)..." + $runRemoteScript = Invoke-VMScript -VM $vm -ScriptText $remoteScript -GuestUser $templateUser -GuestPassword $templatePassword + if ($runRemoteScript.ScriptOutput.Length -eq 0) { + Write-Host "Successfully enabled remote access." + } else { + Write-Host "Attempt to enable remote access completed with warnings:`n" $runRemoteScript.ScriptOutput "`n" + } + # Create scheduled task to apply updates + $updateScript = "`$action = New-ScheduledTaskAction -Execute 'C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe' -Argument '-NoProfile -WindowStyle Hidden -Command `"& {Install-WUUpdates -Updates (Start-WUScan)}`"' + `$trigger = New-ScheduledTaskTrigger -Once -At ([DateTime]::Now.AddMinutes(1)) + `$settings = New-ScheduledTaskSettingsSet -Compatibility Win8 -Hidden + Register-ScheduledTask -Action `$action -Trigger `$trigger -Settings `$settings -TaskName `"Initial_Updates`" -User `"NT AUTHORITY\SYSTEM`" -RunLevel Highest + `$task = Get-ScheduledTask -TaskName `"Initial_Updates`" + `$task.Triggers[0].StartBoundary = [DateTime]::Now.AddMinutes(1).ToString(`"yyyy-MM-dd'T'HH:mm:ss`") + `$task.Triggers[0].EndBoundary = [DateTime]::Now.AddHours(3).ToString(`"yyyy-MM-dd'T'HH:mm:ss`") + `$task.Settings.AllowHardTerminate = `$True + `$task.Settings.DeleteExpiredTaskAfter = 'PT0S' + `$task.Settings.ExecutionTimeLimit = 'PT2H' + `$task.Settings.Volatile = `$False + `$task | Set-ScheduledTask" + Start-Sleep -s 10 + Write-Host "Creating a scheduled task to apply updates..." + $runUpdateScript = Invoke-VMScript -VM $vm -ScriptText $updateScript -GuestUser $templateUser -GuestPassword $templatePassword + Write-Host "Created task:`n" $runUpdateScript.ScriptOutput "`n" + } elseif ($osType.Equals("linuxGuest")) { + #TODO + Write-Host "Linux systems not supported by this action... yet" + } + # Cleanup connection + Disconnect-ViServer -Server $vCenter -Force -Confirm:$false + +} +``` + +I like to think that it's fairly well documented (but I've also been staring at / tweaking this for a while); here's the gist of what it's doing: +1. Capture vCenter login credentials from the Action Constants and the `customProperties` of the deployment (from the cloud template). +2. Use those creds to `Connect-ViServer` to the vCenter instance. +3. Find the VM object which matches the `resourceName` from the vRA deployment. +4. Wait for VM tools to be running and accessible on that VM. +5. Determine the OS type of the VM (Windows/Linux). +6. If it's Windows and the tools are out of date, update them and wait for the reboot to complete. +7. If it's Windows, move on: +8. If it needs to add accounts to the Administrators group, assemble the needed script and run it in the guest via `Invoke-VmScript`. +9. Assemble a script to expand the C: volume to fill whatever size VMDK is attached as HDD1, and run it in the guest via `Invoke-VmScript`. +10. Assemble a script to set common firewall exceptions for remote access, and run it in the guest via `Invoke-VmScript`. +11. Assemble a script to schedule a task to (attempt to) apply Windows updates, and run it in the guest via `Invoke-VmScript`. + +It wouldn't be hard to customize the script to perform different actions (or even run against Linux systems - just set `$whateverScript = "apt update && apt upgrade"` (or whatever) and call it with `$runWhateverScript = Invoke-VMScript -VM $vm -ScriptText $whateverScript -GuestUser $templateUser -GuestPassword $templatePassword`), but this is as far as I'm going to take it for this demo. + +#### Event subscription +Before I can test the new action, I'll need to first add an extensibility subscription so that the ABX action will get called during the deployment. So I head to **Extensibility > Subscriptions** and click the **New Subscription** button. +![Extensibility subscriptions](20210903_extensibility_subscriptions.png) + +I'll be using this to call my new `configureGuest` action - so I'll name the subscription `Configure Guest`. I tie it to the `Compute Post Provision` event, and bind my action: +![Creating the new subscription](20210903_new_subscription_1.png) + +I do have another subsciption on that event already, [`VM Post-Provisioning`](/adding-vm-notes-and-custom-attributes-with-vra8#extensibility-subscription) which is used to modify the VM object with notes and custom attributes. I'd like to make sure that my work inside the guest happens after that other subscription is completed, so I'll enable blocking and give it a priority of `2`: +![Adding blocking to Configure Guest](20210903_new_subscription_2.png) + +After hitting the **Save** button, I go back to that other `VM Post-Provisioning` subscription, set it to enable blocking, and give it a priority of `1`: +![Blocking VM Post-Provisioning](20210903_old_subscription_blocking.png) + +This will ensure that the new subscription fires after the older one completes, and that should avoid any conflicts between the two. + +### Testing +Alright, now let's see if it worked. I head into Service Broker to submit the deployment request: +![Submitting the test deployment](20210903_request.png) + +Note that I've set the disk size to 65GB (up from the default of 60), and I'm adding `lab\testy` as a local admin on the deployed system. + +Once the deployment finishes, I can switch back to Cloud Assembly and check **Extensibility > Activity > Action Runs** and then click on the `configureGuest` run to see how it did. +![Successful action run](20210903_action_run_success.png) + +It worked! + +The Log tab lets me see the progress as the execution progresses: + +``` +Logging in to server. +logged in to server vcsa.lab.bowdre.net:443 +Read-only file system +09/03/2021 19:08:27 Get-VM Finished execution +09/03/2021 19:08:27 Get-VM +Waiting for VM Tools to start... +09/03/2021 19:08:29 Wait-Tools 5222b516-ae2c-5740-2926-77cd21441f27 +09/03/2021 19:08:29 Wait-Tools Finished execution +09/03/2021 19:08:29 Wait-Tools +09/03/2021 19:08:29 Get-View Finished execution +09/03/2021 19:08:29 Get-View +09/03/2021 19:08:29 Get-View Finished execution +09/03/2021 19:08:29 Get-View +BOW-PSVS-XXX001 is a windowsGuest and its tools status is toolsOld. +Updating VM Tools... +09/03/2021 19:08:30 Update-Tools 5222b516-ae2c-5740-2926-77cd21441f27 +09/03/2021 19:08:30 Update-Tools Finished execution +09/03/2021 19:08:30 Update-Tools +Waiting for VM Tools to start... +09/03/2021 19:09:00 Wait-Tools 5222b516-ae2c-5740-2926-77cd21441f27 +09/03/2021 19:09:00 Wait-Tools Finished execution +09/03/2021 19:09:00 Wait-Tools +Administrators: "lab\testy" +Attempting to add administrator accounts... +09/03/2021 19:09:10 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27 +09/03/2021 19:09:10 Invoke-VMScript Finished execution +09/03/2021 19:09:10 Invoke-VMScript +Successfully added ["lab\testy"] to Administrators group. +Attempting to extend system volume... +09/03/2021 19:09:27 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27 +09/03/2021 19:09:27 Invoke-VMScript Finished execution +09/03/2021 19:09:27 Invoke-VMScript +Successfully extended system partition. +Attempting to enable remote access (RDP, WMI, File and Printer Sharing, PSRemoting)... +09/03/2021 19:09:49 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27 +09/03/2021 19:09:49 Invoke-VMScript Finished execution +09/03/2021 19:09:49 Invoke-VMScript +Successfully enabled remote access. +Creating a scheduled task to apply updates... +09/03/2021 19:10:12 Invoke-VMScript 5222b516-ae2c-5740-2926-77cd21441f27 +09/03/2021 19:10:12 Invoke-VMScript Finished execution +09/03/2021 19:10:12 Invoke-VMScript +Created task: + +TaskPath TaskName State +-------- -------- ----- +\ Initial_Updates Ready +\ Initial_Updates Ready +``` + +So it *claims* to have successfully updated the VM tools, added `lab\testy` to the local `Administrators` group, extended the `C:` volume to fill the 65GB virtual disk, added firewall rules to permit remote access, and created a scheduled task to apply updates. I can open a console session to the VM to spot-check the results. +![Verifying local admins](20210903_verify_local_admins.png) +Yep, `testy` is an admin now! + +![Verify disk size](20210903_verify_disk_size.png) +And `C:` fills the disk! + +### Wrap-up +This is really just the start of what I've been able to do in-guest leveraging `Invoke-VmScript` from an ABX action. I've got a [slightly-larger version of this script](https://github.com/jbowdre/misc-scripts/blob/main/vRealize/configure_guest.ps1) which also performs similar actions in Linux guests as well. And I've also cobbled together ABX solutions for generating randomized passwords for local accounts and storing them in an organization's password management solution. I would like to get around to documenting those here in the future... we'll see. + +In any case, hopefully this information might help someone else to get started down this path. I'd love to see whatever enhancements you are able to come up with! diff --git a/content/post/tailscale-on-vmware-photon/Tailscale-AppIcon.png b/content/post/tailscale-on-vmware-photon/Tailscale-AppIcon.png new file mode 100644 index 0000000..0233dbe Binary files /dev/null and b/content/post/tailscale-on-vmware-photon/Tailscale-AppIcon.png differ diff --git a/content/post/tailscale-on-vmware-photon/index.md b/content/post/tailscale-on-vmware-photon/index.md new file mode 100644 index 0000000..c81fbb7 --- /dev/null +++ b/content/post/tailscale-on-vmware-photon/index.md @@ -0,0 +1,58 @@ +--- +title: "Tailscale on VMware Photon OS" # Title of the blog post. +date: 2022-12-14T10:21:12-06:00 # Date of post creation. +lastmod: 2022-12-15T10:21:12-06:00 # Date when last modified +description: "How to manually install Tailscale on VMware's Photon OS - or any other systemd-based platform without official Tailscale packages." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: false # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "Tailscale-AppIcon.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips # Projects, Scripts, vRA8, K8s on vSphere +tags: + - vmware + - linux + - wireguard + - networking + - security + - tailscale +comment: true # Disable comment if false. +--- +You might remember that I'm a [pretty big fan](/secure-networking-made-simple-with-tailscale/) of [Tailscale](https://tailscale.com), which makes it easy to connect your various devices together in a secure [tailnet](https://tailscale.com/kb/1136/tailnet/), or private network. Tailscale is super simple to set up on most platforms, but you'll need to [install it manually](https://tailscale.com/download/linux/static) if there isn't a prebuilt package for your system. + +Here's a condensed list of the [steps that I took to manually install Tailscale](/esxi-arm-on-quartz64/#installing-tailscale) on VMware's [Photon OS](https://github.com/vmware/photon), though the same (or similar) steps should also work on just about any other `systemd`-based system. + +1. Visit [https://pkgs.tailscale.com/stable/#static](https://pkgs.tailscale.com/stable/#static) to see the latest stable version for your system architecture, and copy the URL. For instance, I'll be using `https://pkgs.tailscale.com/stable/tailscale_1.34.1_arm64.tgz`. +2. Download and extract it to the system: +```shell +wget https://pkgs.tailscale.com/stable/tailscale_1.34.1_arm64.tgz +tar xvf tailscale_1.34.1_arm64.tgz +cd tailscale_1.34.1_arm64/ +``` +3. Install the binaries and service files: +```shell +sudo install -m 755 tailscale /usr/bin/ +sudo install -m 755 tailscaled /usr/sbin/ +sudo install -m 644 systemd/tailscaled.defaults /etc/default/tailscaled +sudo install -m 644 systemd/tailscaled.service /usr/lib/systemd/system/ +``` +4. Start the service: +```shell +sudo systemctl enable tailscaled +sudo systemctl start tailscaled +``` + +From that point, just [`sudo tailscale up`](https://tailscale.com/kb/1080/cli/#up) like normal. + +{{% notice info "Updating Tailscale" %}} +Since Tailscale was installed outside of any package manager, it won't get updated automatically. When new versions are released you'll need to update it manually. To do that: +1. Download and extract the new version. +2. Install the `tailscale` and `tailscaled` binaries as described above (no need to install the service files again). +3. Restart the service with `sudo systemctl restart tailscaled`. +{{% /notice %}} diff --git a/content/post/tanzu-community-edition-k8s-homelab/clusters_in_vsphere.png b/content/post/tanzu-community-edition-k8s-homelab/clusters_in_vsphere.png new file mode 100644 index 0000000..dc90bb5 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/clusters_in_vsphere.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/coffee_break.gif b/content/post/tanzu-community-edition-k8s-homelab/coffee_break.gif new file mode 100644 index 0000000..de5596e Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/coffee_break.gif differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/container_volume_in_vsphere.png b/content/post/tanzu-community-edition-k8s-homelab/container_volume_in_vsphere.png new file mode 100644 index 0000000..d80e711 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/container_volume_in_vsphere.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/create_new_agent.png b/content/post/tanzu-community-edition-k8s-homelab/create_new_agent.png new file mode 100644 index 0000000..f43c0fe Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/create_new_agent.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/creating_new_subnet.png b/content/post/tanzu-community-edition-k8s-homelab/creating_new_subnet.png new file mode 100644 index 0000000..f53772a Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/creating_new_subnet.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/dhcp_reservations.png b/content/post/tanzu-community-edition-k8s-homelab/dhcp_reservations.png new file mode 100644 index 0000000..26fb086 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/dhcp_reservations.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/five_minutes.gif b/content/post/tanzu-community-edition-k8s-homelab/five_minutes.gif new file mode 100644 index 0000000..e195211 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/five_minutes.gif differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/index.md b/content/post/tanzu-community-edition-k8s-homelab/index.md new file mode 100644 index 0000000..c0e2440 --- /dev/null +++ b/content/post/tanzu-community-edition-k8s-homelab/index.md @@ -0,0 +1,988 @@ +--- +title: "VMware Tanzu Community Edition Kubernetes Platform in a Homelab" # Title of the blog post. +date: 2022-01-12 # Date of post creation. +# lastmod: 2022-01-06T09:42:51-06:00 # Date when last modified +description: "Gaining familiarity with VMware Tanzu Community Edition by deploying phpIPAM on Kubernetes in my homelab" # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "tanzu_community_edition.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: K8s on vSphere +tags: + - vmware + - linux + - kubernetes + - docker + - containers + - tanzu + - homelab +comment: true # Disable comment if false. +--- + +Back in October, VMware [announced](https://tanzu.vmware.com/content/blog/vmware-tanzu-community-edition-announcement) [Tanzu Community Edition](https://tanzucommunityedition.io/) as way to provide "a full-featured, easy-to-manage Kubernetes platform that’s perfect for users and learners alike." TCE bundles a bunch of open-source components together in a modular, "batteries included but swappable" way: +![Tanzu Community Edition components](tanzu_community_edition.png) + +I've been meaning to brush up on my Kubernetes skills so I thought deploying and using TCE in my self-contained [homelab](/vmware-home-lab-on-intel-nuc-9/) would be a fun and rewarding learning exercise - and it was! + +Here's how I did it. + +### Planning +TCE supports several different deployment scenarios and targets. It can be configured as separate Management and Workload Clusters or as a single integrated Standalone Cluster, and deployed to cloud providers like AWS and Azure, on-premise vSphere, or even a local Docker environment[^yo_dawg]. I'll be using the standard Management + Workload Cluster setup in my on-prem vSphere, so I start by reviewing the [Prepare to Deploy a Cluster to vSphere](https://tanzucommunityedition.io/docs/latest/vsphere/) documentation to get an idea of what I'll need. + +Looking ahead, part of the installation process creates a local [KIND](https://kind.sigs.k8s.io/) cluster for bootstrapping the Management and Workload clusters. I do most of my home computing (and homelab work) by using the [Linux environment available on my Chromebook](/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications/). Unfortunately I know from past experience that KIND will not work within this environment so I'll be using a Debian 10 VM to do the deployment. + +[^yo_dawg]: Yo dawg, I heard you like containers... + +#### Networking +The Kubernetes node VMs will need to be attached to a network with a DHCP server to assign their addresses, and that network will need to be able to talk to vSphere. My router handles DHCP for the range `192.168.1.101-250` so I'll plan on using that. + +I'll also need to set aside a few static IPs for this project. These will need to be routable and within the same subnet as the DHCP range, but excluded from that DHCP range. + +| IP Address | Purpose | +| --- | --- | +| `192.168.1.60` | Control plane for Management cluster | +| `192.168.1.61` | Control plane for Workload cluster | +| `192.168.1.64 - 192.168.1.80` | IP range for Workload load balancer | + + +### Prerequisites +Moving on to the [Getting Started](https://tanzucommunityedition.io/docs/latest/getting-started/), I'll need to grab some software before I can actually Get Started. + +#### Kubernetes control plane image +I need to download a VMware OVA which can be used for deploying my Kubernetes nodes from the VMWare Customer Connect portal [here](https://customerconnect.vmware.com/downloads/get-download?downloadGroup=TCE-090)[^register]. There are a few different options available. I'll get the Photon release with the highest Kubernetes version currently available, `photon-3-kube-v1.21.2+vmware.1-tkg.2-12816990095845873721.ova`. + +Once the file is downloaded, I'll log into my vCenter and use the **Deploy OVF Template** action to deploy a new VM using the OVA. I won't bother booting the machine once deployed but will rename it to `k8s-node` to make it easier to identify later on and then convert it to a template. +![New k8s-node template](k8s-node_template.png) + +[^register]: Register [here](https://customerconnect.vmware.com/account-registration) if you don't yet have an account. + +#### Docker +I've already got Docker installed on this machine, but if I didn't I would follow the instructions [here](https://docs.docker.com/engine/install/debian/) to get it installed and then follow [these instructions](https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user) to enable management of Docker without root. + +I also verify that my install is using `cgroup` version 1 as version 2 is not currently supported: + +```bash +❯ docker info | grep -i cgroup + Cgroup Driver: cgroupfs + Cgroup Version: 1 +``` + +#### `kubectl` binary +Next up, I'll install `kubectl` [as described here](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) - though the latest version is currently `1.23` and that won't work with the `1.21` control plane node image I downloaded from VMware (`kubectl` needs to be within one minor version of the control plane). Instead I need to find the latest `1.22` release. + +I can look at the [releases page on GithHub](https://github.com/kubernetes/kubernetes/releases) to see that the latest release for me is `1.22.5`. With this newfound knowledge I can follow the [Install kubectl binary with curl on Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-kubectl-binary-with-curl-on-linux) instructions to grab that specific version: + +```bash +❯ curl -LO https://dl.k8s.io/release/v1.22.5/bin/linux/amd64/kubectl + + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 154 100 154 0 0 2298 0 --:--:-- --:--:-- --:--:-- 2298 +100 44.7M 100 44.7M 0 0 56.9M 0 --:--:-- --:--:-- --:--:-- 56.9M + +❯ sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + +[sudo] password for john: + +❯ kubectl version --client +Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.5", GitCommit:"5c99e2ac2ff9a3c549d9ca665e7bc05a3e18f07e", GitTreeState:"clean", BuildDate:"2021-12-16T08:38:33Z", GoVersion:"go1.16.12", Compiler:"gc", Platform:"linux/amd64"} +``` + +#### `kind` binary +It's not strictly a requirement, but having the `kind` executable available will be handy for troubleshooting during the bootstrap process in case anything goes sideways. It can be installed in basically the same was as `kubectl`: + +```bash +❯ curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 98 100 98 0 0 513 0 --:--:-- --:--:-- --:--:-- 513 +100 655 100 655 0 0 2212 0 --:--:-- --:--:-- --:--:-- 10076 +100 6660k 100 6660k 0 0 11.8M 0 --:--:-- --:--:-- --:--:-- 11.8M + +❯ sudo install -o root -g root -m 0755 kind /usr/local/bin/kind + +❯ kind version +kind v0.11.1 go1.16.5 linux/amd64 +``` + +#### Tanzu CLI +The final bit of required software is the Tanzu CLI, which can be downloaded from the [project on GitHub](https://github.com/vmware-tanzu/community-edition/releases). + +```bash +curl -H "Accept: application/vnd.github.v3.raw" \ + -L https://api.github.com/repos/vmware-tanzu/community-edition/contents/hack/get-tce-release.sh | \ + bash -s v0.9.1 linux +``` + +And then unpack it and run the installer: +```bash +tar xf tce-linux-amd64-v0.9.1.tar.gz +cd tce-linux-amd64-v0.9.1 +./install.sh +``` + +I can then verify the installation is working correctly: +```bash +❯ tanzu version +version: v0.2.1 +buildDate: 2021-09-29 +sha: ceaa474 +``` + +### Cluster creation +Okay, now it's time for the good stuff - creating some shiny new Tanzu clusters! The Tanzu CLI really does make this very easy to accomplish. + +#### Management cluster +I need to create a Management cluster first and I'd like to do that with the UI, so that's as simple as: +```bash +tanzu management-cluster create --ui +``` + +I should then be able to access the UI by pointing a web browser at `http://127.0.0.1:8080`... but I'm running this on a VM without a GUI, so I'll need to back up and tell it to bind on `0.0.0.0:8080` so the web installer will be accessible across the network. I can also include `--browser none` so that the installer doesn't bother with trying to launch a browser locally. + +```bash +❯ tanzu management-cluster create --ui --bind 0.0.0.0:8080 --browser none + +Validating the pre-requisites... +Serving kickstart UI at http://[::]:8080 +``` + +*Now* I can point my local browser to my VM and see the UI: +![The Tanzu Installer UI](installer_ui.png) + +And then I can click the button at the bottom left to save my eyes[^dark_mode] before selecting the option to deploy on vSphere. +![Configuring the IaaS Provider](installer_iaas_provider.png) + +I'll plug in the FQDN of my vCenter and provide a username and password to use to connect to it, then hit the **Connect** button. That will prompt me to accept the vCenter's certificate thumbprint, and then I'll be able to select the virtual datacenter that I want to use. Finally, I'll paste in the SSH public key[^gen_key] I'll use for interacting with the cluster. + +I click **Next** and move on to the Management Cluster Settings. +![Configuring the Management Cluster](installer_management_cluster.png) + +This is for a lab environment that's fairly memory-constrained, so I'll pick the single-node *Development* setup with a *small* instance type. I'll name the cluster `tce-mgmt` and stick with the default `kube-vip` control plane endpoint provider. I plug in the control plane endpoint IP that I'll use for connecting to the cluster and select the *small* instance type for the worker node type. + +I don't have an NSX Advanced Load Balancer or any Metadata to configure so I'll skip past those steps and move on to configuring the Resources. +![Configuring Resources](installer_resources.png) + +Here I pick to place the Tanzu-related resources in a VM folder named `Tanzu`, to store their data on my single host's single datastore, and to deploy to the one-host `physical-cluster` cluster. + +Now for the Kubernetes Networking Settings: +![Configuring Kubernetes Networking](installer_k8s_networking.png) + +This bit is actually pretty easy. For Network Name, I select the vSphere network where the `192.168.1.0/24` network I identified earlier lives, `d-Home-Mgmt`. I leave the service and pod CIDR ranges as default. + +I disable the Identity Management option and then pick the `k8s-node` template I had imported to vSphere earlier. +![Configuring the OS Image](installer_image.png) + +I skip the Tanzu Mission Control piece (since I'm still waiting on access to [TMC Starter](https://tanzu.vmware.com/tmc-starter)) and click the **Review Configuration** button at the bottom of the screen to review my selections. +![Reviewing the configuration](installer_review.png) + +See the option at the bottom to copy the CLI command? I'll need to use that since clicking the friendly **Deploy** button doesn't seem to work while connected to the web server remotely. + +```bash +tanzu management-cluster create --file /home/john/.config/tanzu/tkg/clusterconfigs/dr94t3m2on.yaml -v 6 +``` + +In fact, I'm going to copy that file into my working directory and give it a more descriptive name so that I can re-use it in the future. + +```bash +cp ~/.config/tanzu/tkg/clusterconfigs/dr94t3m2on.yaml ~/projects/tanzu-homelab/tce-mgmt.yaml +``` + +Now I can run the install command: + +```bash +tanzu management-cluster create --file ./tce-mgmt.yaml -v 6 +``` + +After a moment or two of verifying prerequisites, I'm met with a polite offer to enable Tanzu Kubernetes Grid Service in vSphere: + +``` +vSphere 7.0 Environment Detected. + +You have connected to a vSphere 7.0 environment which does not have vSphere with Tanzu enabled. vSphere with Tanzu includes +an integrated Tanzu Kubernetes Grid Service which turns a vSphere cluster into a platform for running Kubernetes workloads in dedicated +resource pools. Configuring Tanzu Kubernetes Grid Service is done through vSphere HTML5 client. + +Tanzu Kubernetes Grid Service is the preferred way to consume Tanzu Kubernetes Grid in vSphere 7.0 environments. Alternatively you may +deploy a non-integrated Tanzu Kubernetes Grid instance on vSphere 7.0. +Note: To skip the prompts and directly deploy a non-integrated Tanzu Kubernetes Grid instance on vSphere 7.0, you can set the 'DEPLOY_TKG_ON_VSPHERE7' configuration variable to 'true' + +Do you want to configure vSphere with Tanzu? [y/N]: n +Would you like to deploy a non-integrated Tanzu Kubernetes Grid management cluster on vSphere 7.0? [y/N]: y +``` + +That's not what I'm after in this case, though, so I'll answer with a `n` and a `y` to confirm that I want the non-integrated TKG deployment. + +And now I go get coffee as it'll take 10-15 minutes for the deployment to complete. +![Coffee break!](coffee_break.gif) + +Okay, I'm back - and so is my shell prompt! The deployment completed successfully: +``` +Waiting for additional components to be up and running... +Waiting for packages to be up and running... +Context set for management cluster tce-mgmt as 'tce-mgmt-admin@tce-mgmt'. + +Management cluster created! + + +You can now create your first workload cluster by running the following: + + tanzu cluster create [name] -f [file] + + +Some addons might be getting installed! Check their status by running the following: + + kubectl get apps -A + +``` + +I can run that last command to go ahead and verify that the addon installation has completed: + +```bash +❯ kubectl get apps -A +NAMESPACE NAME DESCRIPTION SINCE-DEPLOY AGE +tkg-system antrea Reconcile succeeded 26s 6m49s +tkg-system metrics-server Reconcile succeeded 36s 6m49s +tkg-system tanzu-addons-manager Reconcile succeeded 22s 8m54s +tkg-system vsphere-cpi Reconcile succeeded 19s 6m50s +tkg-system vsphere-csi Reconcile succeeded 36s 6m50s +``` + +And I can use the Tanzu CLI to get some other details about the new management cluster: +```bash +❯ tanzu management-cluster get tce-mgmt + NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES + tce-mgmt tkg-system running 1/1 1/1 v1.21.2+vmware.1 management + + +Details: + +NAME READY SEVERITY REASON SINCE MESSAGE +/tce-mgmt True 40m +├─ClusterInfrastructure - VSphereCluster/tce-mgmt True 41m +├─ControlPlane - KubeadmControlPlane/tce-mgmt-control-plane True 40m +│ └─Machine/tce-mgmt-control-plane-xtdnx True 40m +└─Workers + └─MachineDeployment/tce-mgmt-md-0 + └─Machine/tce-mgmt-md-0-745b858d44-4c9vv True 40m + + +Providers: + + NAMESPACE NAME TYPE PROVIDERNAME VERSION WATCHNAMESPACE + capi-kubeadm-bootstrap-system bootstrap-kubeadm BootstrapProvider kubeadm v0.3.23 + capi-kubeadm-control-plane-system control-plane-kubeadm ControlPlaneProvider kubeadm v0.3.23 + capi-system cluster-api CoreProvider cluster-api v0.3.23 + capv-system infrastructure-vsphere InfrastructureProvider vsphere v0.7.10 +``` + + +Excellent! Things are looking good so I can move on to create the cluster which will actually run my workloads. + +[^dark_mode]: Enabling dark mode is probably the most important part of this process. +[^gen_key]: If I didn't already have a key pair to use I would generate one with `ssh-keygen -t rsa -b 4096 -C "email@example.com"` and add it to my client with `ssh-add ~/.ssh/id_rsa`. +#### Workload cluster +I won't use the UI for this but will instead take a copy of my `tce-mgmt.yaml` file and adapt it to suit the workload needs (as described [here](https://tanzucommunityedition.io/docs/latest/workload-clusters/)). + +```bash +cp tce-mgmt.yaml tce-work.yaml +vi tce-work.yaml +``` + +I only need to change 2 of the parameters in this file: +- `CLUSTER_NAME`: from `tce-mgmt` to `tce-work` +- `VSPHERE_CONTROL_PLANE_ENDPOINT`: from `192.168.1.60` to `192.168.1.61` + +I *could* change a few others if I wanted to[^i_wont]: +- (Optional) `CLUSTER_PLAN` to change between `dev`/`prod` plans independently +- (Optional) `CONTROL_PLANE_MACHINE_COUNT` to deploy an increased number of control plane nodes (must but an odd integer) +- (Optional) `WORKER_MACHINE_COUNT` to add worker nodes +- (Optional) `NAMESPACE` to deploy the cluster in a specific Kubernetes namespace +- (Optional) `OS_NAME` and `OS_VERSION` to use a different machine image for the workload cluster + +After saving my changes to the `tce-work.yaml` file, I'm ready to deploy the cluster: + +```bash +❯ tanzu cluster create --file tce-work.yaml +Validating configuration... +Warning: Pinniped configuration not found. Skipping pinniped configuration in workload cluster. Please refer to the documentation to check if you can configure pinniped on workload cluster manually +Creating workload cluster 'tce-work'... +Waiting for cluster to be initialized... +Waiting for cluster nodes to be available... +Waiting for addons installation... +Waiting for packages to be up and running... + +Workload cluster 'tce-work' created +``` + +Right on! I'll use `tanzu cluster get` to check out the workload cluster: +```bash +❯ tanzu cluster get tce-work + NAME NAMESPACE STATUS CONTROLPLANE WORKERS KUBERNETES ROLES + tce-work default running 1/1 1/1 v1.21.2+vmware.1 +ℹ + +Details: + +NAME READY SEVERITY REASON SINCE MESSAGE +/tce-work True 9m31s +├─ClusterInfrastructure - VSphereCluster/tce-work True 10m +├─ControlPlane - KubeadmControlPlane/tce-work-control-plane True 9m31s +│ └─Machine/tce-work-control-plane-8km9m True 9m31s +└─Workers + └─MachineDeployment/tce-work-md-0 + └─Machine/tce-work-md-0-687444b744-cck4x True 8m31s +``` + +I can also go into vCenter and take a look at the VMs which constitute the two clusters: +![Cluster VMs](clusters_in_vsphere.png) + +I've highlighted the two Control Plane nodes. They got their IP addresses assigned by DHCP, but [VMware says](https://tanzucommunityedition.io/docs/latest/verify-deployment/#configure-dhcp-reservations-for-the-control-plane-nodes-vsphere-only) that I need to create reservations for them to make sure they don't change. So I'll do just that. +![DHCP reservations on Google Wifi](dhcp_reservations.png) + +Excellent, I've got a Tanzu management cluster and a Tanzu workload cluster. What now? + +[^i_wont]: I'm not going to, but I totally could. + +### Working with Tanzu + +If I run `kubectl get nodes` right now, I'll only get information about the management cluster: + +```bash +❯ kubectl get nodes +NAME STATUS ROLES AGE VERSION +tce-mgmt-control-plane-xtdnx Ready control-plane,master 18h v1.21.2+vmware.1 +tce-mgmt-md-0-745b858d44-4c9vv Ready 17h v1.21.2+vmware.1 +``` + +#### Setting the right context +To be able to deploy stuff to the workload cluster, I need to tell `kubectl` how to talk to it. And to do that, I'll first need to use `tanzu` to capture the cluster's kubeconfig: + +```bash +❯ tanzu cluster kubeconfig get tce-work --admin +Credentials of cluster 'tce-work' have been saved +You can now access the cluster by running 'kubectl config use-context tce-work-admin@tce-work' +``` + +I can now run `kubectl config get-contexts` and see that I have access to contexts on both management and workload clusters: + +```bash +❯ kubectl config get-contexts +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* tce-mgmt-admin@tce-mgmt tce-mgmt tce-mgmt-admin + tce-work-admin@tce-work tce-work tce-work-admin +``` + +And I can switch to the `tce-work` cluster like so: + +```bash +❯ kubectl config use-context tce-work-admin@tce-work +Switched to context "tce-work-admin@tce-work". +❯ kubectl get nodes +NAME STATUS ROLES AGE VERSION +tce-work-control-plane-8km9m Ready control-plane,master 17h v1.21.2+vmware.1 +tce-work-md-0-687444b744-cck4x Ready 17h v1.21.2+vmware.1 +``` + +There they are! + +#### Deploying the `yelb` demo app +Before I move on to deploying actually *useful* workloads, I'll start with deploying a quick demo application as described in William Lam's post on [Interesting Kubernetes application demos](https://williamlam.com/2020/06/interesting-kubernetes-application-demos.html). `yelb` is a web app which consists of a UI front end, application server, database server, and Redis caching service so it's a great little demo to make sure Kubernetes is working correctly. + +I can check out the sample deployment that William put together [here](https://github.com/lamw/vmware-k8s-app-demo/blob/master/yelb.yaml), and then deploy it with: + +```bash +❯ kubectl create ns yelb +namespace/yelb created + +❯ kubectl apply -f https://raw.githubusercontent.com/lamw/vmware-k8s-app-demo/master/yelb.yaml +service/redis-server created +service/yelb-db created +service/yelb-appserver created +service/yelb-ui created +deployment.apps/yelb-ui created +deployment.apps/redis-server created +deployment.apps/yelb-db created +deployment.apps/yelb-appserver created + +❯ kubectl -n yelb get pods +NAME READY STATUS RESTARTS AGE +redis-server-74556bbcb7-r9jqc 1/1 Running 0 10s +yelb-appserver-d584bb889-2jspg 1/1 Running 0 10s +yelb-db-694586cd78-wb8tt 1/1 Running 0 10s +yelb-ui-8f54fd88c-k2dw9 1/1 Running 0 10s +``` + +Once the app is running, I can point my web browser at it to see it in action. But what IP do I use? + +```bash +❯ kubectl -n yelb get svc/yelb-ui +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +yelb-ui NodePort 100.71.228.116 80:30001/TCP 84s +``` + +This demo is using a `NodePort` type service to expose the front end, which means it will be accessible on port `30001` on the node it's running on. I can find that IP by: +```bash +❯ kubectl -n yelb describe pod $(kubectl -n yelb get pods | grep yelb-ui | awk '{print $1}') | grep "Node:" +Node: tce-work-md-0-687444b744-cck4x/192.168.1.145 +``` + +So I can point my browser at `http://192.168.1.145:30001` and see the demo: +![yelb demo page](yelb_nodeport_demo.png) + +After marveling at my own magnificence[^magnificence] for a few minutes, I'm ready to move on to something more interesting - but first, I'll just delete the `yelb` namespace to clean up the work I just did: +```bash +❯ kubectl delete ns yelb +namespace "yelb" deleted +``` + +Now let's move on and try to deploy `yelb` behind a `LoadBalancer` service so it will get its own IP. William has a [deployment spec](https://github.com/lamw/vmware-k8s-app-demo/blob/master/yelb-lb.yaml) for that too. + +```bash +❯ kubectl create ns yelb +namespace/yelb created + +❯ kubectl apply -f https://raw.githubusercontent.com/lamw/vmware-k8s-app-demo/master/yelb-lb.yaml +service/redis-server created +service/yelb-db created +service/yelb-appserver created +service/yelb-ui created +deployment.apps/yelb-ui created +deployment.apps/redis-server created +deployment.apps/yelb-db created +deployment.apps/yelb-appserver created + +❯ kubectl -n yelb get pods +NAME READY STATUS RESTARTS AGE +redis-server-74556bbcb7-q6l62 1/1 Running 0 7s +yelb-appserver-d584bb889-p5qgd 1/1 Running 0 7s +yelb-db-694586cd78-hjtn4 1/1 Running 0 7s +yelb-ui-8f54fd88c-pm9qw 1/1 Running 0 7s +``` + +And I can take a look at that service... +```bash +❯ kubectl -n yelb get svc/yelb-ui +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +yelb-ui LoadBalancer 100.67.177.185 80:32339/TCP 15s +``` + +Wait a minute. That external IP is *still* ``. What gives? Oh yeah I need to actually deploy and configure a load balancer before I can balance anything. That's up next. + +[^magnificence]: Mr. Anderson. + +#### Deploying `kube-vip` as a load balancer +Fortunately, William Lam [wrote up some tips](https://williamlam.com/2021/10/quick-tip-install-kube-vip-as-service-load-balancer-with-tanzu-community-edition-tce.html) for handling that too. It's [based on work by Scott Rosenberg](https://github.com/vrabbi/tkgm-customizations). The quick-and-dirty steps needed to make this work are: + +```bash +git clone https://github.com/vrabbi/tkgm-customizations.git +cd tkgm-customizations/carvel-packages/kube-vip-package +kubectl apply -n tanzu-package-repo-global -f metadata.yml +kubectl apply -n tanzu-package-repo-global -f package.yaml +cat << EOF > values.yaml +vip_range: 192.168.1.64-192.168.1.80 +EOF +tanzu package install kubevip -p kubevip.terasky.com -v 0.3.9 -f values.yaml +``` + +Now I can check out the `yelb-ui` service again: +```bash +❯ kubectl -n yelb get svc/yelb-ui +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +yelb-ui LoadBalancer 100.67.177.185 192.168.1.65 80:32339/TCP 4h35m +``` + +And it's got an IP! I can point my browser to `http://192.168.1.65` now and see: +![Successful LoadBalancer test!](yelb_loadbalancer_demo.png) + +I'll keep the `kube-vip` load balancer since it'll come in handy, but I have no further use for `yelb`: +```bash +❯ kubectl delete ns yelb +namespace "yelb" deleted +``` + +#### Persistent Volume Claims, Storage Classes, and Storage Policies +At some point, I'm going to want to make sure that data from my Tanzu workloads stick around persistently - and for that, I'll need to [define some storage stuff](https://tanzucommunityedition.io/docs/latest/vsphere-cns/). + +First up, I'll add a new tag called `tkg-storage-local` to the `nuchost-local` vSphere datastore that I want to use for storing Tanzu volumes: +![Tag (and corresponding category) applied ](storage_tag.png) + +Then I create a new vSphere Storage Policy called `tkg-storage-policy` which states that data covered by the policy should be placed on the datastore(s) tagged with `tkg-storage-local`: +![My Tanzu storage policy](storage_policy.png) + +So that's the vSphere side of things sorted; now to map that back to the Kubernetes side. For that, I'll need to define a Storage Class tied to the vSphere Storage profile so I drop these details into a new file called `vsphere-sc.yaml`: +```yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: vsphere +provisioner: csi.vsphere.vmware.com +parameters: + storagePolicyName: tkg-storage-policy +``` + +And then apply it with : +```bash +❯ kubectl apply -f vsphere-sc.yaml +storageclass.storage.k8s.io/vsphere created +``` + +I can test that I can create a Persistent Volume Claim against the new `vsphere` Storage Class by putting this in a new file called `vsphere-pvc.yaml`: +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + name: vsphere-demo-1 + name: vsphere-demo-1 +spec: + accessModes: + - ReadWriteOnce + storageClassName: vsphere + resources: + requests: + storage: 5Gi +``` + +And applying it: +```bash +❯ kubectl apply -f demo-pvc.yaml +persistentvolumeclaim/vsphere-demo-1 created +``` + +I can see the new claim, and confirm that its status is `Bound`: +```bash +❯ kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +vsphere-demo-1 Bound pvc-36cc7c01-a1b3-4c1c-ba0d-dff3fd47f93b 5Gi RWO vsphere 4m25s +``` + +And for bonus points, I can see that the container volume was created on the vSphere side: +![Container Volume in vSphere](container_volume_in_vsphere.png) + +So that's storage sorted. I'll clean up my test volume before moving on: +```bash +❯ kubectl delete -f demo-pvc.yaml +persistentvolumeclaim "vsphere-demo-1" deleted +``` + +### A real workload - phpIPAM +Demos are all well and good, but how about a real-world deployment to tie it all together? I've been using a [phpIPAM instance for assigning static IP addresses for my vRealize Automation deployments](/integrating-phpipam-with-vrealize-automation-8/), but have *only* been using it to monitor IP usage within the network ranges to which vRA will provision machines. I recently decided that I'd like to expand phpIPAM's scope so it can keep an eye on *all* the network ranges within the environment. That's not a big ask in [my little self-contained homelab](/vmware-home-lab-on-intel-nuc-9/), but having a single system scanning all the ranges of a large production network probably wouldn't scale too well. + +Fortunately the phpIPAM project provides a [remote scanning agent](https://github.com/phpipam/phpipam-agent) which can be used for keeping an eye on networks and reporting back to the main phpIPAM server. With this, I could deploy an agent to each region (or multiple agents to a region!) and divide up the network into chunks that each agent would be responsible for scanning. But that's a pretty lightweight task for a single server to manage, and who wants to deal with configuring multiple instances of the same thing? Not this guy. + +So I set to work exploring some containerization options, and I found [phpipam-docker](https://github.com/phpipam-docker/phpipam-docker). That would easily replicate my existing setup in a trio of containers (one for the web front-end, one for the database back-end, and one with `cron` jobs to run scans at regular intervals)... but doesn't provide a remote scan capability. I also found a [dockerized phpipam-agent](https://github.com/pierrecdn/phpipam-agent), but this one didn't quite meet my needs. It did provide me a base to work off of though so a few days of [tinkering](https://github.com/jbowdre/phpipam-agent-docker) resulted in me publishing my first [Docker image](https://github.com/jbowdre/phpipam-agent-docker/pkgs/container/phpipam-agent). I've still some work to do before this application stack is fully ready for production but it's at a point where I think it's worth doing a test deploy. + +To start, I'll create a new namespace to keep things tidy: + +```bash +❯ kubectl create ns ipam +namespace/ipam created +``` + +I'm going to wind up with four pods: +- `phpipam-db` for the database back-end +- `phpipam-www` for the web front-end +- `phpipam-cron` for the local cron jobs, which will be largely but not completely[^dns_scans] replaced by: +- `phpipam-agent` for my remote scan agent + +I'll use each container's original `docker-compose` configuration and adapt that into something I can deploy on Kubernetes. + +[^dns_scans]: My `phpipam-agent` image won't (yet?) do the DNS lookups that `phpipam-cron` can. + +#### phpipam-db +The phpIPAM database will live inside a MariaDB container. Here's the relevant bit from `docker-compose`: +```yaml +services: + phpipam-db: + image: mariadb:latest + ports: + - "3306:3306" + environment: + - MYSQL_ROOT_PASSWORD=VMware1!VMWare1! + volumes: + - phpipam-db-data:/var/lib/mysql +``` + +So it will need a `Service` exposing the container's port `3306` so that other pods can connect to the database. For my immediate demo, using `type: ClusterIP` will be sufficient since all the connections will be coming from within the cluster. When I do this for real, it will need to be `type: LoadBalancer` so that the agent running on a different cluster can connect. And it will need a `PersistentVolumeClaim` so it can store the database data at `/var/lib/mysql`. It will also get passed an environment variable to set the initial `root` password on the database instance (which will be used later during the phpIPAM install to create the initial `phpipam` database). + +It might look like this on the Kubernetes side: +```yaml +# phpipam-db.yaml +apiVersion: v1 +kind: Service +metadata: + name: phpipam-db + labels: + app: phpipam-db + namespace: ipam +spec: + type: ClusterIP + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: 3306 + selector: + app: phpipam-db +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + name: phpipam-db + name: phpipam-db-pvc + namespace: ipam +spec: + accessModes: + - ReadWriteOnce + storageClassName: vsphere + resources: + requests: + storage: 5Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phpipam-db + namespace: ipam +spec: + selector: + matchLabels: + app: phpipam-db + replicas: 1 + template: + metadata: + labels: + app: phpipam-db + spec: + containers: + - name: phpipam-db + image: mariadb:latest + env: + - name: MYSQL_ROOT_PASSWORD + value: "VMware1!VMware1!" + ports: + - name: mysql + containerPort: 3306 + volumeMounts: + - name: phpipam-db-vol + mountPath: /var/lib/mysql + volumes: + - name: phpipam-db-vol + persistentVolumeClaim: + claimName: phpipam-db-pvc +``` + +Moving on: + +#### phpipam-www +This is the `docker-compose` excerpt for the web component: +```yaml +services: + phpipam-web: + image: phpipam/phpipam-www:1.5x + ports: + - "80:80" + environment: + - TZ=UTC + - IPAM_DATABASE_HOST=phpipam-db + - IPAM_DATABASE_PASS=VMware1! + - IPAM_DATABASE_WEBHOST=% + volumes: + - phpipam-logo:/phpipam/css/images/logo +``` + +Based on that, I can see that my `phpipam-www` pod will need a container running the `phpipam/phpipam-www:1.5x` image, a `Service` of type `LoadBalancer` to expose the web interface on port `80`, a `PersistentVolumeClaim` mounted to `/phpipam/css/images/logo`, and some environment variables passed in to configure the thing. Note that the `IPAM_DATABASE_PASS` variable defines the password used for the `phpipam` user on the database (not the `root` user referenced earlier), and the `IPAM_DATABASE_WEBHOST=%` variable will define which hosts that `phpipam` database user will be able to connect from; setting it to `%` will make sure that my remote agent can connect to the database even if I don't know where the agent will be running. + +Here's how I'd adapt that into a structure that Kubernetes will understand: +```yaml +# phpipam-www.yaml +apiVersion: v1 +kind: Service +metadata: + name: phpipam-www + labels: + app: phpipam-www + namespace: ipam +spec: + type: LoadBalancer + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: phpipam-www +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + name: phpipam-www + name: phpipam-www-pvc + namespace: ipam +spec: + accessModes: + - ReadWriteOnce + storageClassName: vsphere + resources: + requests: + storage: 100Mi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phpipam-www + namespace: ipam +spec: + selector: + matchLabels: + app: phpipam-www + replicas: 1 + template: + metadata: + labels: + app: phpipam-www + spec: + containers: + - name: phpipam-www + image: phpipam/phpipam-www:1.5x + env: + - name: TZ + value: "UTC" + - name: IPAM_DATABASE_HOST + value: "phpipam-db" + - name: IPAM_DATABASE_PASS + value: "VMware1!" + - name: IPAM_DATABASE_WEBHOST + value: "%" + ports: + - containerPort: 80 + volumeMounts: + - name: phpipam-www-vol + mountPath: /phpipam/css/images/logo + volumes: + - name: phpipam-www-vol + persistentVolumeClaim: + claimName: phpipam-www-pvc +``` + +#### phpipam-cron +This container has a pretty simple configuration in `docker-compose`: +```yaml +services: + phpipam-cron: + image: phpipam/phpipam-cron:1.5x + environment: + - TZ=UTC + - IPAM_DATABASE_HOST=phpipam-db + - IPAM_DATABASE_PASS=VMware1! + - SCAN_INTERVAL=1h +``` + +No exposed ports, no need for persistence - just a base image and a few variables to tell it how to connect to the database and how often to run the scans: + +```yaml +# phpipam-cron.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phpipam-cron + namespace: ipam +spec: + selector: + matchLabels: + app: phpipam-cron + replicas: 1 + template: + metadata: + labels: + app: phpipam-cron + spec: + containers: + - name: phpipam-cron + image: phpipam/phpipam-cron:1.5x + env: + - name: IPAM_DATABASE_HOST + value: "phpipam-db" + - name: IPAM_DATABASE_PASS + value: "VMWare1!" + - name: SCAN_INTERVAL + value: "1h" + - name: TZ + value: "UTC" +``` + +#### phpipam-agent +And finally, my remote scan agent. Here's the `docker-compose`: +```yaml +services: + phpipam-agent: + container_name: phpipam-agent + restart: unless-stopped + image: ghcr.io/jbowdre/phpipam-agent:latest + environment: + - IPAM_DATABASE_HOST=phpipam-db + - IPAM_DATABASE_NAME=phpipam + - IPAM_DATABASE_USER=phpipam + - IPAM_DATABASE_PASS=VMware1! + - IPAM_DATABASE_PORT=3306 + - IPAM_AGENT_KEY= + - IPAM_SCAN_INTERVAL=5m + - IPAM_RESET_AUTODISCOVER=true + - IPAM_REMOVE_DHCP=true + - TZ=UTC +``` + +It's got a few additional variables to make it extra-configurable, but still no need for persistence or network exposure. That `IPAM_AGENT_KEY` variable will need to get populated the appropriate key generated within the new phpIPAM deployment, but we can deal with that later. + +For now, here's how I'd tell Kubernetes about it: +```yaml +# phpipam-agent.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phpipam-agent + namespace: ipam +spec: + selector: + matchLabels: + app: phpipam-agent + replicas: 1 + template: + metadata: + labels: + app: phpipam-agent + spec: + containers: + - name: phpipam-agent + image: ghcr.io/jbowdre/phpipam-agent:latest + env: + - name: IPAM_DATABASE_HOST + value: "phpipam-db" + - name: IPAM_DATABASE_NAME + value: "phpipam" + - name: IPAM_DATABASE_USER + value: "phpipam" + - name: IPAM_DATABASE_PASS + value: "VMware1!" + - name: IPAM_DATABASE_PORT + value: "3306" + - name: IPAM_AGENT_KEY + value: "" + - name: IPAM_SCAN_INTERVAL + value: "5m" + - name: IPAM_RESET_AUTODISCOVER + value: "true" + - name: IPAM_REMOVE_DHCP + value: "true" + - name: TZ + value: "UTC" +``` + +#### Deployment and configuration of phpIPAM +I can now go ahead and start deploying these containers, starting with the database one (upon which all the others rely): +```bash +❯ kubectl apply -f phpipam-db.yaml +service/phpipam-db created +persistentvolumeclaim/phpipam-db-pvc created +deployment.apps/phpipam-db created +``` + +And the web server: +```bash +❯ kubectl apply -f phpipam-www.yaml +service/phpipam-www created +persistentvolumeclaim/phpipam-www-pvc created +deployment.apps/phpipam-www created +``` + +And the cron runner: +```bash +❯ kubectl apply -f phpipam-cron.yaml +deployment.apps/phpipam-cron created +``` + +I'll hold off on the agent container for now since I'll need to adjust the configuration slightly after getting phpIPAM set up, but I will go ahead and check out my work so far: + +```bash +❯ kubectl -n ipam get all +NAME READY STATUS RESTARTS AGE +pod/phpipam-cron-6c994897c4-6rsnp 1/1 Running 0 4m30s +pod/phpipam-db-5f4c47d4b9-sb5bd 1/1 Running 0 16m +pod/phpipam-www-769c95c68d-94klg 1/1 Running 0 5m59s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/phpipam-db ClusterIP 100.66.194.69 3306/TCP 16m +service/phpipam-www LoadBalancer 100.65.232.238 192.168.1.64 80:31400/TCP 5m59s + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/phpipam-cron 1/1 1 1 4m30s +deployment.apps/phpipam-db 1/1 1 1 16m +deployment.apps/phpipam-www 1/1 1 1 5m59s + +NAME DESIRED CURRENT READY AGE +replicaset.apps/phpipam-cron-6c994897c4 1 1 1 4m30s +replicaset.apps/phpipam-db-5f4c47d4b9 1 1 1 16m +replicaset.apps/phpipam-www-769c95c68d 1 1 1 5m59s +``` + +And I can point my browser to the `EXTERNAL-IP` associated with the `phpipam-www` service to see the initial setup page: +![phpIPAM installation page](phpipam_install_page.png) + +I'll click the **New phpipam installation** option to proceed to the next step: +![Database initialization options](phpipam_database_install_options.png) + +I'm all for easy so I'll opt for **Automatic database installation**, which will prompt me for the credentials of an account with rights to create a new database within the MariaDB instance. I'll enter `root` and the password I used for the `MYSQL_ROOT_PASSWORD` variable above: +![Automatic database install](phpipam_automatic_database_install.png) + +I click the **Install database** button and I'm then met with a happy success message saying that the `phpipam` database was successfully created. + +And that eventually gets me to the post-install screen, where I set an admin password and proceed to log in: +![We made it to the post-install!](phpipam_post_install.png) + +To create a new scan agent, I go to **Menu > Administration > Server management > Scan agents**. +![Scan agents screen](scan_agents.png) + +And click the button to create a new one: +![Creating a new agent](create_new_agent.png) + +I'll copy the agent code and plug it into my `phpipam-agent.yaml` file: +```yaml + - name: IPAM_AGENT_KEY + value: "4DC5GLo-F_35cy7BEPnGn7HivtjP_o-v" +``` + +And then deploy that: +```bash +❯ kubectl apply -f phpipam-agent.yaml +deployment.apps/phpipam-agent created +``` + +The scan agent isn't going to do anything until it's assigned to a subnet though, so now I head to **Administration > IP related management > Sections**. phpIPAM comes with a few default sections and ranges and such defined so I'll delete those and create a new one that I'll call `Lab`. +![Section management](section_management.png) + +Now I can create a new subnet within the `Lab` section by clicking the **Subnets** menu, selecting the `Lab` section, and clicking **+ Add subnet**. +![Empty subnets menu](subnets_empty.png) + +I'll define the new subnet as `192.168.1.0/24`. Once I enable the option to *Check hosts status*, I'll then be able to specify my new `remote-agent` as the scanner for this subnet. +![Creating a new subnet](creating_new_subnet.png) +![A new (but empty) subnet](new_subnet_pre_scan.png) + +It shows the scanner associated with the subnet, but no data yet. I'll need to wait a few minutes for the first scan to kick off (at the five-minute interval I defined in the configuration). +![](five_minutes.gif) +![Newly discovered IPs!](newly-discovered_IPs.png) + +Woah, it actually works! + +### Conclusion +I still need to do more work to the containerized phpIPAM stack ready for production, but I'm feeling pretty good for having deployed a functional demo of it at this point! And working on this was a nice excuse to get a bit more familiar with Tanzu Community Edition specifically, Kubernetes in general, and Docker (I learned a ton while assembling the `phpipam-agent` image!). I find I always learn more about a new-to-me technology when I have an actual project to do rather than just going through the motions of a lab exercise. Maybe my notes will be useful to you, too. \ No newline at end of file diff --git a/content/post/tanzu-community-edition-k8s-homelab/installer_iaas_provider.png b/content/post/tanzu-community-edition-k8s-homelab/installer_iaas_provider.png new file mode 100644 index 0000000..50be6f6 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/installer_iaas_provider.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/installer_image.png b/content/post/tanzu-community-edition-k8s-homelab/installer_image.png new file mode 100644 index 0000000..92ff57e Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/installer_image.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/installer_k8s_networking.png b/content/post/tanzu-community-edition-k8s-homelab/installer_k8s_networking.png new file mode 100644 index 0000000..69fe25c Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/installer_k8s_networking.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/installer_management_cluster.png b/content/post/tanzu-community-edition-k8s-homelab/installer_management_cluster.png new file mode 100644 index 0000000..604d5e0 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/installer_management_cluster.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/installer_resources.png b/content/post/tanzu-community-edition-k8s-homelab/installer_resources.png new file mode 100644 index 0000000..d10c7ab Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/installer_resources.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/installer_review.png b/content/post/tanzu-community-edition-k8s-homelab/installer_review.png new file mode 100644 index 0000000..a514c3a Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/installer_review.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/installer_ui.png b/content/post/tanzu-community-edition-k8s-homelab/installer_ui.png new file mode 100644 index 0000000..c914a4f Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/installer_ui.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/k8s-node_template.png b/content/post/tanzu-community-edition-k8s-homelab/k8s-node_template.png new file mode 100644 index 0000000..a4ab6a6 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/k8s-node_template.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/new_subnet_pre_scan.png b/content/post/tanzu-community-edition-k8s-homelab/new_subnet_pre_scan.png new file mode 100644 index 0000000..44465f7 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/new_subnet_pre_scan.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/newly-discovered_IPs.png b/content/post/tanzu-community-edition-k8s-homelab/newly-discovered_IPs.png new file mode 100644 index 0000000..e6fabb5 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/newly-discovered_IPs.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/phpipam_automatic_database_install.png b/content/post/tanzu-community-edition-k8s-homelab/phpipam_automatic_database_install.png new file mode 100644 index 0000000..bf80a0f Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/phpipam_automatic_database_install.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/phpipam_database_install_options.png b/content/post/tanzu-community-edition-k8s-homelab/phpipam_database_install_options.png new file mode 100644 index 0000000..ae658fe Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/phpipam_database_install_options.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/phpipam_install_page.png b/content/post/tanzu-community-edition-k8s-homelab/phpipam_install_page.png new file mode 100644 index 0000000..3d0317c Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/phpipam_install_page.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/phpipam_post_install.png b/content/post/tanzu-community-edition-k8s-homelab/phpipam_post_install.png new file mode 100644 index 0000000..aedfdf9 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/phpipam_post_install.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/scan_agents.png b/content/post/tanzu-community-edition-k8s-homelab/scan_agents.png new file mode 100644 index 0000000..98144d0 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/scan_agents.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/section_management.png b/content/post/tanzu-community-edition-k8s-homelab/section_management.png new file mode 100644 index 0000000..83b2f02 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/section_management.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/storage_policy.png b/content/post/tanzu-community-edition-k8s-homelab/storage_policy.png new file mode 100644 index 0000000..75578bb Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/storage_policy.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/storage_tag.png b/content/post/tanzu-community-edition-k8s-homelab/storage_tag.png new file mode 100644 index 0000000..e922902 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/storage_tag.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/subnets_empty.png b/content/post/tanzu-community-edition-k8s-homelab/subnets_empty.png new file mode 100644 index 0000000..d3895d7 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/subnets_empty.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/tanzu_community_edition.png b/content/post/tanzu-community-edition-k8s-homelab/tanzu_community_edition.png new file mode 100644 index 0000000..339bbb9 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/tanzu_community_edition.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/yelb_loadbalancer_demo.png b/content/post/tanzu-community-edition-k8s-homelab/yelb_loadbalancer_demo.png new file mode 100644 index 0000000..45c9000 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/yelb_loadbalancer_demo.png differ diff --git a/content/post/tanzu-community-edition-k8s-homelab/yelb_nodeport_demo.png b/content/post/tanzu-community-edition-k8s-homelab/yelb_nodeport_demo.png new file mode 100644 index 0000000..d17a695 Binary files /dev/null and b/content/post/tanzu-community-edition-k8s-homelab/yelb_nodeport_demo.png differ diff --git a/content/post/the-future-of-virtuallypotato/index.md b/content/post/the-future-of-virtuallypotato/index.md new file mode 100644 index 0000000..2335552 --- /dev/null +++ b/content/post/the-future-of-virtuallypotato/index.md @@ -0,0 +1,26 @@ +--- +title: "The Future of virtuallypotato" # Title of the blog post. +date: 2023-08-11T10:19:10-05:00 # Date of post creation. +# lastmod: 2023-08-11T10:19:10-05:00 # Date when last modified +description: "We've been around the internet long enough to know what a blog post about 'a commitment to' or 'the future of' means." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: true # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +# featureImage: "file.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +# series: Tips # Projects, Scripts, vRA8, K8s on vSphere +tags: + - meta +comment: true # Disable comment if false. +--- + +_You've probably been around the internet long enough to know what a blog post about "our commitment to" or "the future of" means..._ + +When I started sharing my technical notes here at `virtuallypotato.com`, I was excited about VMware's products and was really enjoying my early efforts with vRealize Automation (now Aria Automation). + diff --git a/content/post/upgrading-standalone-vsphere-host-with-esxcli/bundle_on_datastore.png b/content/post/upgrading-standalone-vsphere-host-with-esxcli/bundle_on_datastore.png new file mode 100644 index 0000000..f5e8199 Binary files /dev/null and b/content/post/upgrading-standalone-vsphere-host-with-esxcli/bundle_on_datastore.png differ diff --git a/content/post/upgrading-standalone-vsphere-host-with-esxcli/download_bundle.png b/content/post/upgrading-standalone-vsphere-host-with-esxcli/download_bundle.png new file mode 100644 index 0000000..73506ba Binary files /dev/null and b/content/post/upgrading-standalone-vsphere-host-with-esxcli/download_bundle.png differ diff --git a/content/post/upgrading-standalone-vsphere-host-with-esxcli/esxi8.png b/content/post/upgrading-standalone-vsphere-host-with-esxcli/esxi8.png new file mode 100644 index 0000000..53bb15e Binary files /dev/null and b/content/post/upgrading-standalone-vsphere-host-with-esxcli/esxi8.png differ diff --git a/content/post/upgrading-standalone-vsphere-host-with-esxcli/index.md b/content/post/upgrading-standalone-vsphere-host-with-esxcli/index.md new file mode 100644 index 0000000..947af7e --- /dev/null +++ b/content/post/upgrading-standalone-vsphere-host-with-esxcli/index.md @@ -0,0 +1,86 @@ +--- +title: "Upgrading a Standalone vSphere Host With esxcli" # Title of the blog post. +date: 2022-10-15T07:19:24-05:00 # Date of post creation. +# lastmod: 2022-10-14T07:19:24-05:00 # Date when last modified +description: "Using esxcli to upgrade a vSphere host from ESXi 7.x to 8.0." # Description used for search engine. +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +featureImage: "esxi8.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +# thumbnail: "thumbnail.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips # Projects, Scripts, vRA8 +tags: + - vmware + - homelab + - vsphere +comment: true # Disable comment if false. +--- +You may have heard that there's a new vSphere release out in the wild - [vSphere 8, which just reached Initial Availability this week](https://advocacy.vmware.com/Article/Redirect/9cfbc1b1-207f-4885-a520-cc0bfafcd6c0?uc=197618&g=2d17264e-593a-492d-8d91-3a2155e835f1&f=3104867). Upgrading the vCenter in my single-host homelab is a very straightforward task, and using the included Lifecycle Manager would make quick work of patching a cluster of hosts... but things get a little trickier with a single host. I could write the installer ISO to a USB drive, boot the host off of that, and go through the install interactively, but what if physical access to the host is kind of inconvenient? + +The other option for upgrading a host is using the `esxcli` command to apply an update from an offline bundle. It's a pretty easy solution (and can even be done remotely, such as when connected to [my homelab](/vmware-home-lab-on-intel-nuc-9) via the [Tailscale node running on my Quartz64 ESXi-ARM host](/esxi-arm-on-quartz64/#installing-tailscale)) *but I always forget the commands.* + +So here's quick note on how I upgraded my lone ESXi to the new ESXi 8 IA release so that maybe I'll remember how to do it next time and won't have to go [Neeva](https://neeva.com/search?q=upgrade%20standalone%20host)'ing for the answer again. + +### 0: Download the offline bundle +Downloading the Offline Bundle from [VMware Customer Connect](https://customerconnect.vmware.com/downloads/details?downloadGroup=ESXI800&productId=1345&rPId=95214) yields a file named `VMware-ESXi-8.0-20513097-depot.zip`. + +![Downloading the bundle](download_bundle.png) + +### 1: Transfer the bundle to the host +I've found that the easiest way to do this it to copy it to a datastore which is accessible from the host. +![Offline bundle stored on the local datastore](bundle_on_datastore.png) + +### 2. Power down VMs +The host will need to be in maintenance mode in order to apply the upgrade, and since it's a standalone host it won't enter maintenance mode until all of its VMs have been stopped. This can be easily accomplished through the ESXi embedded host client. + +### 3. Place host in maintenance mode +I can do that by SSH'ing to the host and running: +```shell +esxcli system maintenanceMode set -e true +``` + +And can confirm that it happened with: +```shell +esxcli system maintenanceMode get +Enabled +``` + +### 4. Identify the profile name +Because this is an *upgrade* from one major release to another rather than a simple *update*, I need to know the name of the profile which will be applied. I can identify that with: +```shell +esxcli software sources profile list -d /vmfs/volumes/nuchost-local/_Patches/VMware-ESXi-8.0-20513097-depot.zip +Name Vendor Acceptance Level Creation Time Modification Time +---------------------------- ------------ ---------------- ------------------- ----------------- +ESXi-8.0.0-20513097-standard VMware, Inc. PartnerSupported 2022-09-23T18:59:28 2022-09-23T18:59:28 +ESXi-8.0.0-20513097-no-tools VMware, Inc. PartnerSupported 2022-09-23T18:59:28 2022-09-23T18:59:28 +``` +{{% notice info "Absolute paths" %}} +When using the `esxcli` command to install software/updates, it's important to use absolute paths rather than relative paths. Otherwise you'll get errors and wind up chasing your tail for a while. +{{% /notice %}} + +In this case, I'll use the `ESXi-8.0.0-20513097-standard` profile. + +### 5. Install the upgrade +Now for the moment of truth: +```shell +esxcli software profile update -d /vmfs/volumes/nuchost-local/_Patches/VMware-ESXi-8.0-2051309 +7-depot.zip -p ESXi-8.0.0-20513097-standard +``` + +When it finishes (successfully), it leaves a little message that the update won't be complete until the host is rebooted, so I'll go ahead and do that as well: +```shell +reboot +``` + +And then wait (oh-so-patiently) for the host to come back up. + +### 6. Resume normal operation +Once the reboot is complete, log in to the host client to verify the upgrade was successful. You can then exit maintenance mode and start powering on the VMs again. + +The upgrade process took me about 20 minutes from start to finish, and now I'm ready to get on with exploring [what's new in vSphere 8](https://core.vmware.com/resource/whats-new-vsphere-8)! \ No newline at end of file diff --git a/content/post/using-powershell-and-a-scheduled-task-to-apply-windows-updates/index.md b/content/post/using-powershell-and-a-scheduled-task-to-apply-windows-updates/index.md new file mode 100644 index 0000000..ad58766 --- /dev/null +++ b/content/post/using-powershell-and-a-scheduled-task-to-apply-windows-updates/index.md @@ -0,0 +1,42 @@ +--- +series: Scripts +date: "2021-04-29T08:34:30Z" +usePageBundles: true +tags: +- windows +- powershell +title: Using PowerShell and a Scheduled Task to apply Windows Updates +toc: false +--- + +In the same vein as [my script to automagically resize a Linux LVM volume to use up free space on a disk](/automatic-unattended-expansion-of-linux-root-lvm-volume-to-fill-disk), I wanted a way to automatically apply Windows updates for servers deployed by [my vRealize Automation environment](/series/vra8). I'm only really concerned with Windows Server 2019, which includes the [built-in Windows Update Provider PowerShell module](https://4sysops.com/archives/scan-download-and-install-windows-updates-with-powershell/). So this could be as simple as `Install-WUUpdates -Updates (Start-WUScan)` to scan for and install any available updates. + +Unfortunately, I found that this approach can take a long time to run and often exceeded the timeout limits imposed upon my ABX script, causing the PowerShell session to end and terminating the update process. I really needed a way to do this without requiring a persistent session. + +After further experimentation, I settled on using PowerShell to create a one-time scheduled task that would run the updates and reboot, if necessary. I also wanted the task to automatically delete itself after running to avoid cluttering up the task scheduler library - and that last item had me quite stumped until I found [this blog post with the solution](https://iamsupergeek.com/self-deleting-scheduled-task-via-powershell/). + +So here's what I put together: +```powershell +# This can be easily pasted into a remote PowerShell session to automatically install any available updates and reboot. +# It creates a scheduled task to start the update process after a one-minute delay so that you don't have to maintain +# the session during the process (or have the session timeout), and it also sets the task to automatically delete itself 2 hours later. +# +# This leverages the Windows Update Provider PowerShell module which is included in Windows 10 1709+ and Windows Server 2019. +# +# Adapted from https://iamsupergeek.com/self-deleting-scheduled-task-via-powershell/ + +$action = New-ScheduledTaskAction -Execute 'C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe' -Argument '-NoProfile -WindowStyle Hidden -Command "& {Install-WUUpdates -Updates (Start-WUScan); if (Get-WUIsPendingReboot) {shutdown.exe /f /r /d p:2:4 /t 120 /c `"Rebooting to apply updates`"}}"' +$trigger = New-ScheduledTaskTrigger -Once -At ([DateTime]::Now.AddMinutes(1)) +$settings = New-ScheduledTaskSettingsSet -Compatibility Win8 -Hidden +Register-ScheduledTask -Action $action -Trigger $trigger -Settings $settings -TaskName "Initial_Updates" -User "NT AUTHORITY\SYSTEM" -RunLevel Highest +$task = Get-ScheduledTask -TaskName "Initial_Updates" +$task.Triggers[0].StartBoundary = [DateTime]::Now.AddMinutes(1).ToString("yyyy-MM-dd'T'HH:mm:ss") +$task.Triggers[0].EndBoundary = [DateTime]::Now.AddHours(2).ToString("yyyy-MM-dd'T'HH:mm:ss") +$task.Settings.AllowHardTerminate = $True +$task.Settings.DeleteExpiredTaskAfter = 'PT0S' +$task.Settings.ExecutionTimeLimit = 'PT2H' +$task.Settings.Volatile = $False +$task | Set-ScheduledTask +``` + +It creates the task, sets it to run in one minute, and then updates the task's configuration to make it auto-expire and delete two hours later. When triggered, the task installs all available updates and (if necessary) reboots the system after a 2-minute countdown (which an admin could cancel with `shutdown /a`, if needed). This could be handy for pasting in from a remote PowerShell session and works great when called from a vRA ABX script too! \ No newline at end of file diff --git a/content/post/using-vsphere-diagnostic-tool-fling/download.png b/content/post/using-vsphere-diagnostic-tool-fling/download.png new file mode 100644 index 0000000..839af2a Binary files /dev/null and b/content/post/using-vsphere-diagnostic-tool-fling/download.png differ diff --git a/content/post/using-vsphere-diagnostic-tool-fling/index.md b/content/post/using-vsphere-diagnostic-tool-fling/index.md new file mode 100644 index 0000000..e3e4bb5 --- /dev/null +++ b/content/post/using-vsphere-diagnostic-tool-fling/index.md @@ -0,0 +1,259 @@ +--- +title: "Using the vSphere Diagnostic Tool Fling" # Title of the blog post. +date: 2022-08-28 # Date of post creation. +# lastmod: 2022-08-23T15:02:50-05:00 # Date when last modified +description: "Save time and energy by using the VMware vSphere Diagnostic Tool to quickly investigate potential configuration problems in your VMware environment." +featured: false # Sets if post is a featured post, making appear on the home page side bar. +draft: false # Sets whether to render this page. Draft of true will not be rendered. +toc: true # Controls if a table of contents should be generated for first-level links automatically. +usePageBundles: true +# menu: main +featureImage: "vdt.png" # Sets featured image on blog post. +# featureImageAlt: 'Description of image' # Alternative text for featured image. +# featureImageCap: 'This is the featured image.' # Caption (optional). +thumbnail: "pulse2.png" # Sets thumbnail image appearing inside card on homepage. +# shareImage: "share.png" # Designate a separate image for social media sharing. +codeLineNumbers: false # Override global value for showing of line numbers within code block. +series: Tips # Projects, Scripts, vRA8 +tags: + - vmware + - vsphere + - python +comment: true # Disable comment if false. +--- +VMware vCenter does wonders for abstracting away the layers of complexity involved in managing a large virtual infrastructure, but when something goes wrong it can be challenging to find exactly where the problem lies. And it can be even harder to proactively address potential issues before they occur. + +Fortunately there's a super-handy utility which can making diagnosing vCenter significantly easier, and it comes in the form of the [vSphere Diagnostic Tool Fling](https://flings.vmware.com/vsphere-diagnostic-tool). VDT is a Python script which can be run directly on a vCenter Server appliance (version 6.5 and newer) to quickly check for problems and misconfigurations affecting: +- vCenter Basic Info +- Lookup Service +- Active Directory +- vCenter Certificates +- Core Files +- Disk Health +- vCenter DNS +- vCenter NTP +- vCenter Port +- Root Account +- vCenter Services +- VCHA + +For any problems which are identified, VDT will provide simple instructions and/or links to Knowledge Base articles for more detailed instructions on how to proceed with resolving the issues. Sounds pretty useful, right? And yet, somehow, I keep forgetting that VDT is a thing. So here's a friendly reminder to myself of how to obtain and use VDT to fix vSphere woes. Let's get started. + +### 1. Obtain +Obtaining the vSphere Diagnostic Tool is very easy. Just point a browser to https://flings.vmware.com/vsphere-diagnostic-tool, tick the box to agree to the Technical Preview License, and click the big friendly **Download** button. +![Download](download.png) + +It will show up in `.zip` format. + +### 2. Deploy +This needs to be run directly on the vCenter appliance so you'll need to copy the `.zip` package onto that server. [Secure Copy Protocol (SCP)](https://en.wikipedia.org/wiki/Secure_copy_protocol) is a great way to make that happen. By default, though, the VCSA uses a limited Appliance Shell which won't allow for file transfers. Fortunately it's easy to [follow this KB](https://kb.vmware.com/s/article/2100508) to switch that to a more conventional `bash` shell: + +1. SSH to the VCSA. +2. Execute `shell` to launch the `bash` shell. +3. Execute `chsh -s /bin/bash` to set `bash` as the default shell. + + +Once that's done, just execute this on your local workstation to copy the `.zip` from your `~/Downloads/` folder to the VCSA's `/tmp/` directory: +```shell +scp ~/Downloads/vdt-v1.1.4.zip root@vcsa.lab.bowdre.net:/tmp/ +``` + +### 3. Extract +Now pop back over to an SSH session to the VCSA, extract the `.zip`, and get ready for action: +```shell +root@VCSA [ ~ ]# cd /tmp + +root@VCSA [ /tmp ]# unzip vdt-v1.1.4.zip +Archive: vdt-v1.1.4.zip +3557676756cffd658fd61aab5a6673269104e83c + creating: vdt-v1.1.4/ + ... + inflating: vdt-v1.1.4/vdt.py + +root@VCSA [ /tmp ]# cd vdt-v1.1.4/ +``` + +### 4. Execute +Now for the fun part: +```shell +root@VCSA [ /tmp/vdt-v1.1.4 ]# python vdt.py +_________________________ + RUNNING PULSE CHECK + +Today: Sunday, August 28 19:53:00 +Version: 1.1.4 +Log Level: INFO + +Provide password for administrator@vsphere.local: +``` + +After entering the SSO password, VDT will run for a few minutes and generate an on-screen report of its findings. Reports can also be found in the `/var/log/vmware/vdt/` directory. + +### 5. Review +Once the script has completed, it's time to look through the results and fix whatever can be found. As an example, here are some of the findings from my _deliberately-broken-for-the-purposes-of-this-post_ vCenter: + +#### Hostname/PNID mismatch +```log {hl_lines=[8,9,23,24]} + VCENTER BASIC INFO +BASIC: + Current Time: 2022-08-28 19:54:08.370889 + vCenter Uptime: up 2 days + vCenter Load Average: 0.26, 0.19, 0.12 + Number of CPUs: 2 + Total Memory: 11.71 + vCenter Hostname: VCSA + vCenter PNID: vcsa.lab.bowdre.net + vCenter IP Address: 192.168.1.12 + Proxy Configured: "no" + NTP Servers: pool.ntp.org + vCenter Node Type: vCenter with Embedded PSC + vCenter Version: 7.0.3.00800 - 20150588 +DETAILS: + vCenter SSO Domain: vsphere.local + vCenter AD Domain: No DOMAIN + Number of ESXi Hosts: 2 + Number of Virtual Machines: 25 + Number of Clusters: 1 + Disabled Plugins: None + +[FAIL] The hostname and PNID do not match! + Please see https://kb.vmware.com/s/article/2130599 for more details. +``` +Silly me - I must have changed the hostname at some point, which is not generally a Thing Which Should Be done. I can quickly [consult the referenced KB](https://kb.vmware.com/s/article/2130599) to figure out how to fix my mistake using the `/opt/vmware/share/vami/vami_config_net` utility. + +#### Missing DNS +```log {hl_lines=[3,4,5,12,13]} +Nameserver Queries +192.168.1.5 + [FAIL] DNS with UDP - unable to resolve vcsa to 192.168.1.12 + [FAIL] Reverse DNS - unable to resolve 192.168.1.12 to vcsa + [FAIL] DNS with TCP - unable to resolve vcsa to 192.168.1.12 + + Commands used: + dig +short + dig +noall +answer -x + dig +short +tcp + +RESULT: [FAIL] +Please see KB: https://kb.vmware.com/s/article/54682 +``` +Whoops - I guess I should go recreate the appropriate DNS records. + +#### Old core files +```log + CORE FILE CHECK +INFO: +These core files are older than 72 hours. consider deleting them +at your discretion to reduce the size of log bundles. + FILES: + /storage/core/core.SchedulerCron.p.11919 Size: 34.36MB Last Modified: 2022-08-03T22:28:01 + /storage/core/core.python.1445 Size: 20.8MB Last Modified: 2022-08-03T22:13:37 + /storage/core/core.python.27513 Size: 41.12MB Last Modified: 2022-07-28T04:43:55 + /storage/core/core.ParGC.6 Size: 802.82MB Last Modified: 2022-07-28T04:38:54 + /storage/core/core.python.12536 Size: 39.82MB Last Modified: 2022-07-28T04:18:41 + /storage/core/core.python.50223 Size: 281.55MB Last Modified: 2022-07-13T22:22:13 + /storage/core/core.lsassd.56082 Size: 256.82MB Last Modified: 2022-07-13T22:16:53 + /storage/core/core.SchedulerCron.p.21078 Size: 29.52MB Last Modified: 2022-06-25T11:05:01 + /storage/core/core.python.19815 Size: 25.44MB Last Modified: 2022-06-25T11:03:06 + /storage/core/core.python.51946 Size: 25.8MB Last Modified: 2022-06-18T10:22:08 + /storage/core/core.python.40291 Size: 25.44MB Last Modified: 2022-06-13T11:21:26 + /storage/core/core.python.14872 Size: 43.97MB Last Modified: 2022-06-13T10:35:04 + /storage/core/core.python.11833 Size: 20.71MB Last Modified: 2022-06-13T10:30:01 + /storage/core/core.python.35275 Size: 42.87MB Last Modified: 2022-06-13T07:17:27 + /storage/core/core.VM.6 Size: 1.21GB Last Modified: 2022-06-13T00:38:56 +[INFO] Number of core files: 15 +``` +Those core files can be useful for investigating specific issues, but holding on to them long-term doesn't really do much good. _After checking to be sure I don't need them_, I can get rid of them all pretty easily like so: + +```shell +find /storage/core/ -name "core.*" -type f -mtime +3 -exec rm {} \; +``` + +#### NTP status +```log + VC NTP CHECK +[FAIL] NTP and Host time are both disabled! +``` +Oh yeah, let's turn that back on with `systemctl start ntpd`. + +#### Account status +```log + Root Account Check +[FAIL] Root password expires in 13 days + Please search for 'Change the Password of the Root User' + in vCenter documentation. +``` +That's a good thing to know. I'll [take care of that](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenter.configuration.doc/GUID-48BAF973-4FD3-4FF3-B1B6-5F7286C9B59A.html) while I'm thinking about it. + +```shell +chage -M -1 -E -1 root +``` + +#### Recheck +Now that I've corrected these issues, I can run VDT again to confirm that everything is back in a good state: + +```log {hl_lines=[8,9,"25-27",32,35,"55-56",59]} + VCENTER BASIC INFO + BASIC: + Current Time: 2022-08-28 20:13:25.192503 + vCenter Uptime: up 2 days + vCenter Load Average: 0.28, 0.14, 0.10 + Number of CPUs: 2 + Total Memory: 11.71 + vCenter Hostname: vcsa.lab.bowdre.net + vCenter PNID: vcsa.lab.bowdre.net + vCenter IP Address: 192.168.1.12 + Proxy Configured: "no" + NTP Servers: pool.ntp.org + vCenter Node Type: vCenter with Embedded PSC + vCenter Version: 7.0.3.00800 - 20150588 +DETAILS: + vCenter SSO Domain: vsphere.local + vCenter AD Domain: No DOMAIN + Number of ESXi Hosts: 2 + Number of Virtual Machines: 25 + Number of Clusters: 1 + Disabled Plugins: None +[...] +Nameserver Queries +192.168.1.5 + [PASS] DNS with UDP - resolved vcsa.lab.bowdre.net to 192.168.1.12 + [PASS] Reverse DNS - resolved 192.168.1.12 to vcsa.lab.bowdre.net + [PASS] DNS with TCP - resolved vcsa.lab.bowdre.net to 192.168.1.12 + Commands used: + dig +short + dig +noall +answer -x + dig +short +tcp +RESULT: [PASS] +[...] + CORE FILE CHECK +[PASS] Number of core files: 0 +[PASS] Number of hprof files: 0 +[...] +NTP Status Check ++-----------------------------------LEGEND-----------------------------------+ +| remote: NTP peer server | +| refid: server that this peer gets its time from | +| when: number of seconds passed since last response | +| poll: poll interval in seconds | +| delay: round-trip delay to the peer in milliseconds | +| offset: time difference between the server and client in milliseconds | ++-----------------------------------PREFIX-----------------------------------+ +| * Synchronized to this peer | +| # Almost synchronized to this peer | +| + Peer selected for possible synchronization | +| – Peer is a candidate for selection | +| ~ Peer is statically configured | ++----------------------------------------------------------------------------+ + remote refid st t when poll reach delay offset jitter +============================================================================== +*104.171.113.34 130.207.244.240 2 u 1 64 17 16.831 -34.597 0.038 +RESULT: [PASS] +[...] + Root Account Check +[PASS] Root password never expires +``` +All better! + +### Conclusion +The vSphere Diagnostic Tool makes a great addition to your arsenal of troubleshooting skills and utilities. It makes it easy to troubleshoot errors which might occur in your vSphere environment, as well as to uncover dormant issues which could cause serious problems in the future. \ No newline at end of file diff --git a/content/post/using-vsphere-diagnostic-tool-fling/pulse2.png b/content/post/using-vsphere-diagnostic-tool-fling/pulse2.png new file mode 100644 index 0000000..f45da7a Binary files /dev/null and b/content/post/using-vsphere-diagnostic-tool-fling/pulse2.png differ diff --git a/content/post/using-vsphere-diagnostic-tool-fling/vdt.png b/content/post/using-vsphere-diagnostic-tool-fling/vdt.png new file mode 100644 index 0000000..7a0dc2f Binary files /dev/null and b/content/post/using-vsphere-diagnostic-tool-fling/vdt.png differ diff --git a/content/post/virtually-potato-migrated-to-github-pages/20210720-jekyll.png b/content/post/virtually-potato-migrated-to-github-pages/20210720-jekyll.png new file mode 100644 index 0000000..26a5214 Binary files /dev/null and b/content/post/virtually-potato-migrated-to-github-pages/20210720-jekyll.png differ diff --git a/content/post/virtually-potato-migrated-to-github-pages/20210720-party.gif b/content/post/virtually-potato-migrated-to-github-pages/20210720-party.gif new file mode 100644 index 0000000..38d757d Binary files /dev/null and b/content/post/virtually-potato-migrated-to-github-pages/20210720-party.gif differ diff --git a/content/post/virtually-potato-migrated-to-github-pages/index.md b/content/post/virtually-potato-migrated-to-github-pages/index.md new file mode 100644 index 0000000..d52966d --- /dev/null +++ b/content/post/virtually-potato-migrated-to-github-pages/index.md @@ -0,0 +1,74 @@ +--- +date: "2021-07-20T22:20:00Z" +thumbnail: 20210720-jekyll.png +usePageBundles: true +tags: +- linux +- meta +- chromeos +- crostini +- jekyll +title: Virtually Potato migrated to GitHub Pages! +--- + +After a bit less than a year of hosting my little technical blog with [Hashnode](https://hashnode.com), I spent a few days [migrating the content](/script-to-update-image-embed-links-in-markdown-files) over to a new format hosted with [GitHub Pages](https://pages.github.com/). + +![Party!](20210720-party.gif) + +### So long, Hashnode +Hashnode served me well for the most part, but it was never really a great fit for me. Hashnode's focus is on developer content, and I'm not really a developer; I'm a sysadmin who occasionally develops solutions to solve my needs, but the code is never the end goal for me. As a result, I didn't spend much time in the (large and extremely active) community associated with Hashnode. It's a perfectly adequate blogging platform apart from the community, but it's really built to prop up that community aspect and I found that to be a bit limiting - particularly once Hashnode stopped letting you create tags to be used within your blog and instead only allowed you to choose from [the tags](https://hashnode.com/tags) already popular in the community. There are hundreds of tags for different coding languages, but not any that would cover the infrastructure virtualization or other technical projects that I tend to write about. + +### Hello, GitHub Pages +I knew about GitHub Pages, but had never seriously looked into it. Once I did, though, it seemed like a much better fit for v{:potato:} - particularly when combined with [Jekyll](https://jekyllrb.com/) to take in Markdown posts and render them into static HTML. This approach would provide me more flexibility (and the ability to use whatever [tags](/tags) I want!), while still letting me easily compose my posts with Markdown. And I can now do my composition locally (and even offline!), and just do a `git push` to publish. Very cool! + +#### Getting started +I found that the quite-popular [Minimal Mistakes](https://mademistakes.com/work/minimal-mistakes-jekyll-theme/) theme for Jekyll offers a [remote theme starter](https://github.com/mmistakes/mm-github-pages-starter/generate) that can be used to quickly get things going. I just used that generator to spawn a new repository in my GitHub account ([`jbowdre.github.io`](https://github.com/jbowdre/jbowdre.github.io)). And that was it - I had a starter GitHub Pages-hosted Jekyll-powered static site with an elegant theme applied. I could even make changes to the various configuration and sample post files, point any browser to `https://jbowdre.github.io`, and see the results almost immediately. I got to work digging through the lengthy [configuration documentation](https://mmistakes.github.io/minimal-mistakes/docs/configuration/) to start making the site my own, like [connecting with my custom domain](https://docs.github.com/en/pages/configuring-a-custom-domain-for-your-github-pages-site/managing-a-custom-domain-for-your-github-pages-site) and enabling [GitHub Issue-based comments](https://github.com/apps/utterances). + +#### Working locally +A quick `git clone` operation was sufficient to create a local copy of my new site in my Lenovo Chromebook Duet's [Linux environment](/setting-up-linux-on-a-new-lenovo-chromebook-duet-bonus-arm64-complications). That lets me easily create and edit Markdown posts or configuration files with VS Code, commit them to the local copy of the repo, and then push them back to GitHub when I'm ready to publish the changes. + +In order to view the local changes, I needed to install Jekyll locally as well. I started by installing Ruby and other prerequisites: +```shell +sudo apt-get install ruby-full build-essential zlib1g-dev +``` + +I added the following to my `~/.zshrc` file so that the gems would be installed under my home directory rather than somewhere more privileged: +```shell +export GEM_HOME="$HOME/gems" +export PATH="$HOME/gems/bin:$PATH" +``` + +And then ran `source ~/.zshrc` so the change would take immediate effect. + +I could then install Jekyll: +```shell +gem install jekyll bundler +``` + +I then `cd`ed to the local repo and ran `bundle install` to also load up the components specified in the repo's `Gemfile`. + +And, finally, I can run this to start up the local Jekyll server instance: +```shell +❯ bundle exec jekyll serve -l --drafts +Configuration file: /home/jbowdre/projects/jbowdre.github.io/_config.yml + Source: /home/jbowdre/projects/jbowdre.github.io + Destination: /home/jbowdre/projects/jbowdre.github.io/_site + Incremental build: enabled + Generating... + Remote Theme: Using theme mmistakes/minimal-mistakes + Jekyll Feed: Generating feed for posts + GitHub Metadata: No GitHub API authentication could be found. Some fields may be missing or have incorrect data. + done in 30.978 seconds. + Auto-regeneration: enabled for '/home/jbowdre/projects/jbowdre.github.io' +LiveReload address: http://0.0.0.0:35729 + Server address: http://0.0.0.0:4000 + Server running... press ctrl-c to stop. +``` + +And there it is! +![Jekyll running locally on my Chromebook](20210720-jekyll.png) + +### `git push` time +Alright that's enough rambling for now. I'm very happy with this new setup, particularly with the automatically-generated Table of Contents to help folks navigate some of my longer posts. (I can't believe I was having to piece those together manually in this blog's previous iteration!) + +I'll continue to make some additional tweaks in the coming weeks but for now I'll `git push` this post and get back to documenting my never-ending [vRA project](/series/vra8). \ No newline at end of file diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/42n3aMim5.png b/content/post/vmware-home-lab-on-intel-nuc-9/42n3aMim5.png new file mode 100644 index 0000000..2a1ac3e Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/42n3aMim5.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/4o5bqRiTJ.png b/content/post/vmware-home-lab-on-intel-nuc-9/4o5bqRiTJ.png new file mode 100644 index 0000000..065d457 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/4o5bqRiTJ.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/6-auEYd-W.png b/content/post/vmware-home-lab-on-intel-nuc-9/6-auEYd-W.png new file mode 100644 index 0000000..b42ea89 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/6-auEYd-W.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/7aNJa2Hlm.png b/content/post/vmware-home-lab-on-intel-nuc-9/7aNJa2Hlm.png new file mode 100644 index 0000000..3bb279e Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/7aNJa2Hlm.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/OOP_lstyM.png b/content/post/vmware-home-lab-on-intel-nuc-9/OOP_lstyM.png new file mode 100644 index 0000000..94a1779 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/OOP_lstyM.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/PZ6FzmJcx.png b/content/post/vmware-home-lab-on-intel-nuc-9/PZ6FzmJcx.png new file mode 100644 index 0000000..9541704 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/PZ6FzmJcx.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/SIDah-Lag.png b/content/post/vmware-home-lab-on-intel-nuc-9/SIDah-Lag.png new file mode 100644 index 0000000..224968a Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/SIDah-Lag.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/Wu3ZIIVTs.png b/content/post/vmware-home-lab-on-intel-nuc-9/Wu3ZIIVTs.png new file mode 100644 index 0000000..ad67f3c Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/Wu3ZIIVTs.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/XDe98S4Fx.png b/content/post/vmware-home-lab-on-intel-nuc-9/XDe98S4Fx.png new file mode 100644 index 0000000..f60ba4f Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/XDe98S4Fx.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/arA7gurqh.png b/content/post/vmware-home-lab-on-intel-nuc-9/arA7gurqh.png new file mode 100644 index 0000000..f3cfe68 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/arA7gurqh.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/index.md b/content/post/vmware-home-lab-on-intel-nuc-9/index.md new file mode 100644 index 0000000..e3f1601 --- /dev/null +++ b/content/post/vmware-home-lab-on-intel-nuc-9/index.md @@ -0,0 +1,264 @@ +--- +series: vRA8 +date: "2021-02-05T08:34:30Z" +thumbnail: SIDah-Lag.png +usePageBundles: true +tags: +- vmware +- homelab +- vra +- lcm +title: VMware Home Lab on Intel NUC 9 +featured: false +--- + +I picked up an Intel NUC 9 Extreme kit a few months back (thanks, VMware!) and have been slowly tinkering with turning it into an extremely capable self-contained home lab environment. I'm pretty happy with where things sit right now so figured it was about time to start documenting and sharing what I've done. + +![But boy would I love some more RAM](SIDah-Lag.png) + +### Hardware +*(Caution: here be affiliate links)* +- [Intel NUC 9 Extreme (NUC9i9QNX)](https://amzn.to/2JezeEH) +- [Crucial 64GB DDR4 SO-DIMM kit (CT2K32G4SFD8266)](https://amzn.to/34BtPPy) +- [Intel 665p 1TB NVMe SSD (SSDPEKNW010T9X1)](https://amzn.to/3nMi5kW) +- Random 8GB USB thumbdrive I found in a drawer somewhere + +The NUC runs ESXi 7.0u1 and currently hosts the following: +- vCenter Server 7.0u1 +- Windows 2019 domain controller +- [VyOS router](https://vyos.io/) +- [Home Assistant OS 5.9](https://www.home-assistant.io/hassio/installation/) +- vRealize Lifecycle Manager 8.2 +- vRealize Identity Manager 3.3.2 +- vRealize Automation 8.2 +- 3-node [nested ESXi 7.0u1](https://williamlam.com/nested-virtualization/nested-esxi-virtual-appliance) vSAN cluster + +I'm leveraging my $200 [vMUG Advantage subscription](https://www.vmug.com/membership/vmug-advantage-membership) to provide 365-day licenses for all the VMware bits (particularly vRA, which doesn't come with a built-in evaluation option). + +### Basic Infrastructure +#### Setting up the NUC +The NUC connects to my home network through its onboard gigabit Ethernet interface (`vmnic0`). (The NUC does have a built-in WiFi adapter but for some reason VMware hasn't yet allowed their hypervisor to connect over WiFi - weird, right?) I wanted to use a small 8GB thumbdrive as the host's boot device so I installed that in one of the NUC's internal USB ports. For the purpose of installation, I connected a keyboard and monitor to the NUC, and I configured the BIOS to automatically boot up when power is restored after a power failure. + +I used the Chromebook Recovery Utility to write the ESXi installer ISO to *another* USB drive (how-to [here](/burn-an-iso-to-usb-with-the-chromebook-recovery-utility)), inserted that bootable drive to a port on the front of the NUC, and booted the NUC from the drive. Installing ESXi 7.0u1 was as easy as it could possibly be. All hardware was automatically detected and the appropriate drivers loaded. Once the host booted up, I used the DCUI to configure a static IP address (`192.168.1.11`). I then shut down the NUC, disconnected the keyboard and monitor, and moved it into the cabinet where it will live out its headless existence. + +I was then able to point my web browser to `https://192.168.1.11/ui/` to log in to the host and get down to business. First stop: networking. For now, I only need a single standard switch (`vSwitch0`) with two portgroups: one for the host's vmkernel interface, and the other for the VMs (including the nested ESXi appliances) that are going to run directly on this physical host. The one "gotcha" when working with a nested environment is that you'll need to edit the virtual switch's security settings to "Allow promiscuous mode" and "Allow forged transmits" (for reasons described [here](https://williamlam.com/2013/11/why-is-promiscuous-mode-forged.html)). +![Allowing promiscuous mode and forged transmits](w0HeFSi7Q.png) + +I created a single datastore to span the entirety of that 1TB NVMe drive. The nested ESXi hosts will use VMDKs stored here to provide storage to the nested VMs. +![The new datastore](XDe98S4Fx.png) + +#### Domain Controller +I created a new Windows VM with 2 vCPUs, 4GB of RAM, and a 90GB virtual hard drive, and I booted it off a [Server 2019 evaluation ISO](https://www.microsoft.com/en-US/evalcenter/evaluate-windows-server-2019?filetype=ISO). I gave it a name, a static IP address, and proceeded to install and configure the Active Directory Domain Services and DNS Server roles. I created static A and PTR records for the vCenter Server Appliance I'd be deploying next (`vcsa.`) and the physical host (`nuchost.`). I configured ESXi to use this new server for DNS resolutions, and confirmed that I could resolve the VCSA's name from the host. + +![AD and DNS](4o5bqRiTJ.png) + +Before moving on, I installed the Chrome browser on this new Windows VM and also set up remote access via [Chrome Remote Desktop](https://remotedesktop.google.com/access/). This will let me remotely access and manage my lab environment without having to punch holes in the router firewall (or worry about securing said holes). And it's got "chrome" in the name so it will work just fine from my Chromebooks! + +#### vCenter +I attached the vCSA installation ISO to the Windows VM and performed the vCenter deployment from there. (See, I told you that Chrome Remote Desktop would come in handy!) +![vCenter deployment process](OOP_lstyM.png) + +After the vCenter was deployed and the basic configuration completed, I created a new cluster to contain the physical host. There's likely only ever going to be the one physical host but I like being able to logically group hosts in this way, particularly when working with PowerCLI. I then added the host to the vCenter by its shiny new FQDN. +![Shiny new cluser](Wu3ZIIVTs.png) + +I've now got a fully-functioning VMware lab, complete with a physical hypervisor to run the workloads, a vCenter server to manage the workloads, and a Windows DNS server to tell the workloads how to talk to each other. Since the goal is to ultimately simulate a (small) production environment, let's set up some additional networking before we add anything else. + +### Networking +#### Overview +My home network uses the generic `192.168.1.0/24` address space, with internet router providing DHCP addresses in the range `.100-.250`. I'm using the range `192.168.1.2-.99` for statically-configured IPs, particularly those within my lab environment. Here are the addresses being used by the lab so far: + +| IP Address | Hostname | Purpose | +| ---- | ---- | ---- | +| `192.168.1.1` | | Gateway | +| `192.168.1.5` | `win01` | AD DC, DNS | +| `192.168.1.11` | `nuchost` | Physical ESXi host | +| `192.168.1.12` | `vcsa` | vCenter Server | + +Of course, not everything that I'm going to deploy in the lab will need to be accessible from outside the lab environment. This goes for obvious things like the vMotion and vSAN networks of the nested ESXi hosts, but it will also be useful to have internal networks that can be used by VMs provisioned by vRA. So I'll be creating these networks: + +| VLAN ID | Network | Purpose | +| ---- | ---- | ---- | +| 1610 | `172.16.10.0/24` | Management | +| 1620 | `172.16.20.0/24` | Servers-1 | +| 1630 | `172.16.30.0/24` | Servers-2 | +| 1698 | `172.16.98.0/24` | vSAN | +| 1699 | `172.16.99.0/24` | vMotion | + +#### vSwitch1 +I'll start by adding a second vSwitch to the physical host. It doesn't need a physical adapter assigned since this switch will be for internal traffic. I create two port groups: one tagged for the VLAN 1610 Management traffic, which will be useful for attaching VMs on the physical host to the internal network; and the second will use VLAN 4095 to pass all VLAN traffic to the nested ESXi hosts. And again, this vSwitch needs to have its security policy set to allow Promiscuous Mode and Forged Transmits. I also set the vSwitch to support an MTU of 9000 so I can use Jumbo Frames on the vMotion and vSAN networks. + +![Second vSwitch](7aNJa2Hlm.png) + +#### VyOS +Wouldn't it be great if the VMs that are going to be deployed on those `1610`, `1620`, and `1630` VLANs could still have their traffic routed out of the internal networks? But doing routing requires a router (or so my network friends tell me)... so I deployed a VM running the open-source VyOS router platform. I used [William Lam's instructions for installing VyOS](https://williamlam.com/2020/02/how-to-automate-the-creation-multiple-routable-vlans-on-single-l2-network-using-vyos.html), making sure to attach the first network interface to the Home-Network portgroup and the second to the Isolated portgroup (VLAN 4095). I then set to work [configuring the router](https://docs.vyos.io/en/latest/quick-start.html). + +After logging in to the VM, I entered the router's configuration mode: + +```shell +vyos@vyos:~$ configure +[edit] +vyos@vyos# +``` + +I then started with setting up the interfaces - `eth0` for the `192.168.1.0/24` network, `eth1` on the trunked portgroup, and a number of VIFs on `eth1` to handle the individual VLANs I'm interested in using. + +```shell +set interfaces ethernet eth0 address '192.168.1.8/24' +set interfaces ethernet eth0 description 'Outside' +set interfaces ethernet eth1 mtu '9000' +set interfaces ethernet eth1 vif 1610 address '172.16.10.1/24' +set interfaces ethernet eth1 vif 1610 description 'VLAN 1610 for Management' +set interfaces ethernet eth1 vif 1610 mtu '1500' +set interfaces ethernet eth1 vif 1620 address '172.16.20.1/24' +set interfaces ethernet eth1 vif 1620 description 'VLAN 1620 for Servers-1' +set interfaces ethernet eth1 vif 1620 mtu '1500' +set interfaces ethernet eth1 vif 1630 address '172.16.30.1/24' +set interfaces ethernet eth1 vif 1630 description 'VLAN 1630 for Servers-2' +set interfaces ethernet eth1 vif 1630 mtu '1500' +set interfaces ethernet eth1 vif 1698 description 'VLAN 1698 for vSAN' +set interfaces ethernet eth1 vif 1698 mtu '9000' +set interfaces ethernet eth1 vif 1699 description 'VLAN 1699 for vMotion' +set interfaces ethernet eth1 vif 1699 mtu '9000' +``` + +I also set up NAT for the networks that should be routable: + +```shell +set nat source rule 10 outbound-interface 'eth0' +set nat source rule 10 source address '172.16.10.0/24' +set nat source rule 10 translation address 'masquerade' +set nat source rule 20 outbound-interface 'eth0' +set nat source rule 20 source address '172.16.20.0/24' +set nat source rule 20 translation address 'masquerade' +set nat source rule 30 outbound-interface 'eth0' +set nat source rule 30 source address '172.16.30.0/24' +set nat source rule 30 translation address 'masquerade' +set nat source rule 100 outbound-interface 'eth0' +set nat source rule 100 translation address 'masquerade' +set protocols static route 0.0.0.0/0 next-hop 192.168.1.1 +``` + +And I configured DNS forwarding: + +```shell +set service dns forwarding allow-from '0.0.0.0/0' +set service dns forwarding domain 10.16.172.in-addr.arpa. server '192.168.1.5' +set service dns forwarding domain 20.16.172.in-addr.arpa. server '192.168.1.5' +set service dns forwarding domain 30.16.172.in-addr.arpa. server '192.168.1.5' +set service dns forwarding domain lab.bowdre.net server '192.168.1.5' +set service dns forwarding listen-address '172.16.10.1' +set service dns forwarding listen-address '172.16.20.1' +set service dns forwarding listen-address '172.16.30.1' +set service dns forwarding name-server '192.168.1.1' +``` + +Finally, I also configured VyOS's DHCP server so that I won't have to statically configure the networking for VMs deployed from vRA: + +```shell +set service dhcp-server shared-network-name SCOPE_10_MGMT authoritative +set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 default-router '172.16.10.1' +set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 dns-server '192.168.1.5' +set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 domain-name 'lab.bowdre.net' +set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 lease '86400' +set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 range 0 start '172.16.10.100' +set service dhcp-server shared-network-name SCOPE_10_MGMT subnet 172.16.10.0/24 range 0 stop '172.16.10.200' +set service dhcp-server shared-network-name SCOPE_20_SERVERS authoritative +set service dhcp-server shared-network-name SCOPE_20_SERVERS subnet 172.16.20.0/24 default-router '172.16.20.1' +set service dhcp-server shared-network-name SCOPE_20_SERVERS subnet 172.16.20.0/24 dns-server '192.168.1.5' +set service dhcp-server shared-network-name SCOPE_20_SERVERS subnet 172.16.20.0/24 domain-name 'lab.bowdre.net' +set service dhcp-server shared-network-name SCOPE_20_SERVERS subnet 172.16.20.0/24 lease '86400' +set service dhcp-server shared-network-name SCOPE_20_SERVERS subnet 172.16.20.0/24 range 0 start '172.16.20.100' +set service dhcp-server shared-network-name SCOPE_20_SERVERS subnet 172.16.20.0/24 range 0 stop '172.16.20.200' +set service dhcp-server shared-network-name SCOPE_30_SERVERS authoritative +set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 default-router '172.16.30.1' +set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 dns-server '192.168.1.5' +set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 domain-name 'lab.bowdre.net' +set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 lease '86400' +set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 range 0 start '172.16.30.100' +set service dhcp-server shared-network-name SCOPE_30_SERVERS subnet 172.16.30.0/24 range 0 stop '172.16.30.200' +``` + +Satisfied with my work, I ran the `commit` and `save` commands. BOOM, this server jockey just configured a router! + +### Nested vSAN Cluster +Alright, it's time to start building up the nested environment. To start, I grabbed the latest [Nested ESXi Virtual Appliance .ova](https://williamlam.com/nested-virtualization/nested-esxi-virtual-appliance), courtesy of William Lam. I went ahead and created DNS records for the hosts I'd be deploying, and I mapped out what IPs would be used on each VLAN: + +|Hostname|1610-Management|1698-vSAN|1699-vMotion| +|----|----|----|----| +|`esxi01.lab.bowdre.net`|`172.16.10.21`|`172.16.98.21`|`172.16.99.21`| +|`esxi02.lab.bowdre.net`|`172.16.10.22`|`172.16.98.22`|`172.16.99.22`| +|`esxi03.lab.bowdre.net`|`172.16.10.23`|`172.16.98.23`|`172.16.99.23`| + +Deploying the virtual appliances is just like any other "Deploy OVF Template" action. I placed the VMs on the `physical-cluster` compute resource, and selected to thin provision the VMDKs on the local datastore. I chose the "Isolated" VM network which uses VLAN 4095 to make all the internal VLANs available on a single portgroup. + +![Deploying the nested ESXi OVF](zOJp-jqVb.png) + +And I set the networking properties accordingly: + +![OVF networking settings](PZ6FzmJcx.png) + +These virtual appliances come with 3 hard drives. The first will be used as the boot device, the second for vSAN caching, and the third for vSAN capacity. I doubled the size of the second and third drives, to 8GB and 16GB respectively: + +![OVF storage configuration](nkdH7Jfxw.png) + +After booting the new host VMs, I created a new cluster in vCenter and then added the nested hosts: +![New nested hosts added to a cluster](z8fvzu4Km.png) + +Next, I created a new Distributed Virtual Switch to break out the VLAN trunk on the nested host "physical" adapters into the individual VLANs I created on the VyOS router. Again, each port group will need to allow Promiscuous Mode and Forged Transmits, and I set the dvSwitch MTU size to 9000 (to support Jumbo Frames on the vSAN and vMotion portgroups). +![New dvSwitch for nested traffic](arA7gurqh.png) + +I migrated the physical NICs and `vmk0` to the new dvSwitch, and then created new vmkernel interfaces for vMotion and vSAN traffic on each of the nested hosts: +![ESXi vmkernel interfaces](6-auEYd-W.png) + +I then ssh'd into the hosts and used `vmkping` to make sure they could talk to each other over these interfaces. I changed the vMotion interface to use the vMotion TCP/IP stack so needed to append the `-S vmotion` flag to the command: + +```shell +[root@esxi01:~] vmkping -I vmk1 172.16.98.22 +PING 172.16.98.22 (172.16.98.22): 56 data bytes +64 bytes from 172.16.98.22: icmp_seq=0 ttl=64 time=0.243 ms +64 bytes from 172.16.98.22: icmp_seq=1 ttl=64 time=0.260 ms +64 bytes from 172.16.98.22: icmp_seq=2 ttl=64 time=0.262 ms + +--- 172.16.98.22 ping statistics --- +3 packets transmitted, 3 packets received, 0% packet loss +round-trip min/avg/max = 0.243/0.255/0.262 ms + +[root@esxi01:~] vmkping -I vmk2 172.16.99.22 -S vmotion +PING 172.16.99.22 (172.16.99.22): 56 data bytes +64 bytes from 172.16.99.22: icmp_seq=0 ttl=64 time=0.202 ms +64 bytes from 172.16.99.22: icmp_seq=1 ttl=64 time=0.312 ms +64 bytes from 172.16.99.22: icmp_seq=2 ttl=64 time=0.242 ms + +--- 172.16.99.22 ping statistics --- +3 packets transmitted, 3 packets received, 0% packet loss +round-trip min/avg/max = 0.202/0.252/0.312 ms +``` + +Okay, time to throw some vSAN on these hosts. Select the cluster object, go to the configuration tab, scroll down to vSAN, and click "Turn on vSAN". This will be a single site cluster, and I don't need to enable any additional services. When prompted, I claim the 8GB drives for the cache tier and the 16GB drives for capacity. +![Configuring vSAN](mw-rsq_1a.png) + +It'll take a few minutes for vSAN to get configured on the cluster. +![vSAN capacity is.... not much, but it's a start](mye0LdtNj.png) + +Huzzah! Next stop: + +### vRealize Automation 8.2 +The [vRealize Easy Installer](https://docs.vmware.com/en/vRealize-Automation/8.2/installing-vrealize-automation-easy-installer/GUID-CEF1CAA6-AD6F-43EC-B249-4BA81AA2B056.html) makes it, well, *easy* to install vRealize Automation (and vRealize Orchestrator, on the same appliance) and its prerequisites, vRealize Suite Lifecycle Manager (LCM) and Workspace ONE Access (formerly VMware Identity Manager) - provided that you've got enough resources. The vRA virtual appliance deploys with a whopping **40GB** of memory allocated to it. Post-deployment, I found that I was able to trim that down to 30GB without seeming to break anything, but going much lower than that would result in services failing to start. + +Anyhoo, each of these VMs will need to be resolvable in DNS so I started by creating some A records: + +|FQDN|IP| +|----|----| +|`lcm.lab.bowdre.net`|`192.168.1.40`| +|`idm.lab.bowdre.net`|`192.168.1.41`| +|`vra.lab.bowdre.net`|`192.168.1.42`| + +I then attached the installer ISO to my Windows VM and ran through the installation from there. +![vRealize Easy Installer](42n3aMim5.png) + +Similar to the vCenter deployment process, this one prompts you for all the information it needs up front and then takes care of everything from there. That's great news because this is a pretty long deployment; it took probably two hours from clicking the final "Okay, do it" button to being able to log in to my shiny new vRealize Automation environment. + +### Wrap-up +So that's a glimpse into how I built my nested ESXi lab - all for the purpose of being able to develop and test vRealize Automation templates and vRealize Orchestrator workflows in a semi-realistic environment. I've used this setup to write a [vRA integration for using phpIPAM](https://github.com/jbowdre/phpIPAM-for-vRA8) to assign static IP addresses to deployed VMs. I wrote a complicated vRO workflow for generating unique hostnames which fit a corporate naming standard *and* don't conflict with any other names in vCenter, Active Directory, or DNS. I also developed a workflow for (optionally) creating AD objects under appropriate OUs based on properties generated on the cloud template; VMware [just announced](https://blogs.vmware.com/management/2021/02/whats-new-with-vrealize-automation-8-3-technical-overview.html#:~:text=New%20Active%20Directory%20Cloud%20Template%20Properties) similar functionality with vRA 8.3 and, honestly, my approach works much better for my needs anyway. And, most recently, I put the finishing touches on a solution for (optionally) creating static records in a Microsoft DNS server from vRO. + +I'll post more about all that work soon but this post has already gone on long enough. Stay tuned! diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/mw-rsq_1a.png b/content/post/vmware-home-lab-on-intel-nuc-9/mw-rsq_1a.png new file mode 100644 index 0000000..ba1aebb Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/mw-rsq_1a.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/mye0LdtNj.png b/content/post/vmware-home-lab-on-intel-nuc-9/mye0LdtNj.png new file mode 100644 index 0000000..29935b8 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/mye0LdtNj.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/nkdH7Jfxw.png b/content/post/vmware-home-lab-on-intel-nuc-9/nkdH7Jfxw.png new file mode 100644 index 0000000..f30a600 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/nkdH7Jfxw.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/w0HeFSi7Q.png b/content/post/vmware-home-lab-on-intel-nuc-9/w0HeFSi7Q.png new file mode 100644 index 0000000..4391e8b Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/w0HeFSi7Q.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/z8fvzu4Km.png b/content/post/vmware-home-lab-on-intel-nuc-9/z8fvzu4Km.png new file mode 100644 index 0000000..9144421 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/z8fvzu4Km.png differ diff --git a/content/post/vmware-home-lab-on-intel-nuc-9/zOJp-jqVb.png b/content/post/vmware-home-lab-on-intel-nuc-9/zOJp-jqVb.png new file mode 100644 index 0000000..82f64a4 Binary files /dev/null and b/content/post/vmware-home-lab-on-intel-nuc-9/zOJp-jqVb.png differ diff --git a/content/post/vra8-automatic-deployment-naming-another-take/Ivv0ia8oX.png b/content/post/vra8-automatic-deployment-naming-another-take/Ivv0ia8oX.png new file mode 100644 index 0000000..c3aa302 Binary files /dev/null and b/content/post/vra8-automatic-deployment-naming-another-take/Ivv0ia8oX.png differ diff --git a/content/post/vra8-automatic-deployment-naming-another-take/NoN-72Qf6.png b/content/post/vra8-automatic-deployment-naming-another-take/NoN-72Qf6.png new file mode 100644 index 0000000..b6c6079 Binary files /dev/null and b/content/post/vra8-automatic-deployment-naming-another-take/NoN-72Qf6.png differ diff --git a/content/post/vra8-automatic-deployment-naming-another-take/aumfETl1l.gif b/content/post/vra8-automatic-deployment-naming-another-take/aumfETl1l.gif new file mode 100644 index 0000000..b21b63a Binary files /dev/null and b/content/post/vra8-automatic-deployment-naming-another-take/aumfETl1l.gif differ diff --git a/content/post/vra8-automatic-deployment-naming-another-take/index.md b/content/post/vra8-automatic-deployment-naming-another-take/index.md new file mode 100644 index 0000000..cda6f0a --- /dev/null +++ b/content/post/vra8-automatic-deployment-naming-another-take/index.md @@ -0,0 +1,55 @@ +--- +series: vRA8 +date: "2021-05-20T08:34:30Z" +thumbnail: wl-WPQpEl.png +usePageBundles: true +tags: +- vmware +- vra +- vro +- javascript +title: vRA8 Automatic Deployment Naming - Another Take +toc: false +--- + +A [few days ago](/vra8-custom-provisioning-part-four#automatic-deployment-naming), I shared how I combined a Service Broker Custom Form with a vRO action to automatically generate a unique and descriptive deployment name based on user inputs. That approach works *fine* but while testing some other components I realized that calling that action each time a user makes a selection isn't necessarily ideal. After a bit of experimentation, I settled on what I believe to be a better solution. + +Instead of setting the "Deployment Name" field to use an External Source (vRO), I'm going to configure it to use a Computed Value. This is a bit less flexible, but all the magic happens right there in the form without having to make an expensive vRO call. +![Computed Value option](Ivv0ia8oX.png) + +After setting `Value source` to `Computed value`, I also set the `Operation` to `Concatenate` (since it is, after all, the only operation choice. I can then use the **Add Value** button to add some fields. Each can be either a *Constant* (like a separator) or linked to a *Field* on the request form. By combining those, I can basically reconstruct the same arrangement that I was previously generating with vRO: +![Fields and Constants!](zN3EN6lrG.png) + +So this will generate a name that looks something like `[user]_[catalog_item]_[site]-[env][function]-[app]`, all without having to call vRO! That gets me pretty close to what I want... but there's always the chance that the generated name won't be truly unique. Being able to append a timestamp on to the end would be a big help here. + +That does mean that I'll need to add another vRO call, but I can set this up so that it only gets triggered once, when the form loads, instead of refreshing each time the inputs change. + +So I hop over to vRO and create a new action, which I call `getTimestamp`. It doesn't require any inputs, and returns a single string. Here's the code: +```js +// JavaScript: getTimestamp action +// Inputs: None +// Returns: result (String) + +var date = new Date(); +var result = date.toISOString(); +return result +``` + +I then drag a Text Field called `Timestamp` onto the Service Broker Custom Form canvas, and set it to not be visible: +![Invisible timestamp](rtTeG3ZoR.png) + +And I set it to pull its value from my new `net.bowdre.utility/getTimestamp` action: +![Calling the action](NoN-72Qf6.png) + +Now when the form loads, this field will store a timestamp with thousandths-of-a-second precision. + +The last step is to return to the Deployment Name field and link in the new Timestamp field so that it will get tacked on to the end of the generated name. +![Linked!](wl-WPQpEl.png) + +The old way looked like this, where it had to churn a bit after each selection: +![The Churn](vH-npyz9s.gif) + +Here's the newer approach, which feels much snappier: +![Snappy!](aumfETl1l.gif) + +Not bad! Now I can make the Deployment Name field hidden again and get back to work! \ No newline at end of file diff --git a/content/post/vra8-automatic-deployment-naming-another-take/rtTeG3ZoR.png b/content/post/vra8-automatic-deployment-naming-another-take/rtTeG3ZoR.png new file mode 100644 index 0000000..745faef Binary files /dev/null and b/content/post/vra8-automatic-deployment-naming-another-take/rtTeG3ZoR.png differ diff --git a/content/post/vra8-automatic-deployment-naming-another-take/vH-npyz9s.gif b/content/post/vra8-automatic-deployment-naming-another-take/vH-npyz9s.gif new file mode 100644 index 0000000..9b76fbb Binary files /dev/null and b/content/post/vra8-automatic-deployment-naming-another-take/vH-npyz9s.gif differ diff --git a/content/post/vra8-automatic-deployment-naming-another-take/wl-WPQpEl.png b/content/post/vra8-automatic-deployment-naming-another-take/wl-WPQpEl.png new file mode 100644 index 0000000..030ae92 Binary files /dev/null and b/content/post/vra8-automatic-deployment-naming-another-take/wl-WPQpEl.png differ diff --git a/content/post/vra8-automatic-deployment-naming-another-take/zN3EN6lrG.png b/content/post/vra8-automatic-deployment-naming-another-take/zN3EN6lrG.png new file mode 100644 index 0000000..e1d67d5 Binary files /dev/null and b/content/post/vra8-automatic-deployment-naming-another-take/zN3EN6lrG.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/0-9BaWJqq.png b/content/post/vra8-custom-provisioning-part-four/0-9BaWJqq.png new file mode 100644 index 0000000..06985f3 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/0-9BaWJqq.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/09faF5-Fm.png b/content/post/vra8-custom-provisioning-part-four/09faF5-Fm.png new file mode 100644 index 0000000..86c07fb Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/09faF5-Fm.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/1NJvDeA7r.png b/content/post/vra8-custom-provisioning-part-four/1NJvDeA7r.png new file mode 100644 index 0000000..3b952f7 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/1NJvDeA7r.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/4X1dPG_Rq.png b/content/post/vra8-custom-provisioning-part-four/4X1dPG_Rq.png new file mode 100644 index 0000000..bffd2f3 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/4X1dPG_Rq.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/4flvfGC54.png b/content/post/vra8-custom-provisioning-part-four/4flvfGC54.png new file mode 100644 index 0000000..22fbd19 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/4flvfGC54.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/CDy518peA.png b/content/post/vra8-custom-provisioning-part-four/CDy518peA.png new file mode 100644 index 0000000..8a4d9e6 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/CDy518peA.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/GMCWhns7u.png b/content/post/vra8-custom-provisioning-part-four/GMCWhns7u.png new file mode 100644 index 0000000..70e1b57 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/GMCWhns7u.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/Hlnnd_8Ed.png b/content/post/vra8-custom-provisioning-part-four/Hlnnd_8Ed.png new file mode 100644 index 0000000..2828e8e Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/Hlnnd_8Ed.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/IdrT-Un8H1.png b/content/post/vra8-custom-provisioning-part-four/IdrT-Un8H1.png new file mode 100644 index 0000000..f55a976 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/IdrT-Un8H1.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/J_RG9JNPz.png b/content/post/vra8-custom-provisioning-part-four/J_RG9JNPz.png new file mode 100644 index 0000000..b47c9ee Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/J_RG9JNPz.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/REZ08yA2E.png b/content/post/vra8-custom-provisioning-part-four/REZ08yA2E.png new file mode 100644 index 0000000..d8c8a72 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/REZ08yA2E.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/TQGyrUqIx.png b/content/post/vra8-custom-provisioning-part-four/TQGyrUqIx.png new file mode 100644 index 0000000..2fb73bd Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/TQGyrUqIx.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/ZPsS0oZuc.png b/content/post/vra8-custom-provisioning-part-four/ZPsS0oZuc.png new file mode 100644 index 0000000..50b1b5d Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/ZPsS0oZuc.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/af-OEP5Tu.png b/content/post/vra8-custom-provisioning-part-four/af-OEP5Tu.png new file mode 100644 index 0000000..1c7b9dc Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/af-OEP5Tu.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/bCKrtn05o.png b/content/post/vra8-custom-provisioning-part-four/bCKrtn05o.png new file mode 100644 index 0000000..32dc0fd Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/bCKrtn05o.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/fh37T__nb.gif b/content/post/vra8-custom-provisioning-part-four/fh37T__nb.gif new file mode 100644 index 0000000..9a0781b Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/fh37T__nb.gif differ diff --git a/content/post/vra8-custom-provisioning-part-four/hFPeakMxn.png b/content/post/vra8-custom-provisioning-part-four/hFPeakMxn.png new file mode 100644 index 0000000..ddf0e7a Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/hFPeakMxn.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/iScnhmzVY.png b/content/post/vra8-custom-provisioning-part-four/iScnhmzVY.png new file mode 100644 index 0000000..87f27e5 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/iScnhmzVY.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/index.md b/content/post/vra8-custom-provisioning-part-four/index.md new file mode 100644 index 0000000..b26ee24 --- /dev/null +++ b/content/post/vra8-custom-provisioning-part-four/index.md @@ -0,0 +1,217 @@ +--- +series: vRA8 +date: "2021-05-18T08:34:30Z" +lastmod: "2021-05-20" +thumbnail: hFPeakMxn.png +usePageBundles: true +tags: +- vmware +- vra +- vro +- javascript +title: 'vRA8 Custom Provisioning: Part Four' +--- + +My [last post in this series](/vra8-custom-provisioning-part-three) marked the completion of the vRealize Orchestrator workflow that I use for pre-provisioning tasks, namely generating a unique *sequential* hostname which complies with a defined naming standard and doesn't conflict with any existing records in vSphere, Active Directory, or DNS. That takes care of many of the "back-end" tasks for a simple deployment. + +This post will add in some "front-end" operations, like creating a customized VM request form in Service Broker and dynamically populating a drop-down with a list of networks available at the user-selected deployment site. I'll also take care of some housekeeping items like automatically generating a unique deployment name. + +### Getting started with Service Broker Custom Forms +So far, I've been working either in the Cloud Assembly or Orchestrator UIs, both of which are really geared toward administrators. Now I'm going to be working with Service Broker which will provide the user-facing front-end. This is where "normal" users will be able to submit provisioning requests without having to worry about any of the underlying infrastructure or orchestration. + +Before I can do anything with my Cloud Template in the Service Broker UI, though, I'll need to release it from Cloud Assembly. I do this by opening the template on the *Design* tab and clicking the *Version* button at the bottom of the screen. I'll label this as `1.0` and tick the checkbox to *Release this version to the catalog*. +![Releasing the Cloud Template to the Service Broker catalog](0-9BaWJqq.png) + +I can then go to the Service Broker UI and add a new Content Source for my Cloud Assembly templates. +![Add a new Content Source](4X1dPG_Rq.png) +![Adding a new Content Source](af-OEP5Tu.png) +After hitting the *Create & Import* button, all released Cloud Templates in the selected Project will show up in the Service Broker *Content* section: +![New content!](Hlnnd_8Ed.png) + +In order for users to deploy from this template, I also need to go to *Content Sharing*, select the Project, and share the content. This can be done either at the Project level or by selecting individual content items. +![Content sharing](iScnhmzVY.png) + +That template now appears on the Service Broker *Catalog* tab: +![Catalog items](09faF5-Fm.png) + +That's cool and all, and I could go ahead and request a deployment off of that catalog item right now - but I'm really interested in being able to customize the request form. I do that by clicking on the little three-dot menu icon next to the Content entry and selecting the *Customize form* option. +![Customize form](ZPsS0oZuc.png) + +When you start out, the custom form kind of jumbles up the available fields. So I'm going to start by dragging-and-dropping the fields to resemble the order defined in the Cloud Template: +![Starting to customize the custom form](oLwUg1k6T.png) + +In addition to rearranging the request form fields, Custom Forms also provide significant control over how the form behaves. You can change how a field is displayed, define default values, make fields dependent upon other fields and more. For instance, all of my templates and resources belong to a single project so making the user select the project (from a set of 1) is kind of redundant. Every deployment has to be tied to a project so I can't just remove that field, but I can select the "Project" field on the canvas and change its *Visibility* to "No" to hide it. It will silently pass along the correct project ID in the background without cluttering up the form. +![Hiding the Project field](4flvfGC54.png) + +How about that Deployment Name field? In my tests, I'd been manually creating a string of numbers to uniquely identify the deployment, but I'm not going to ask my users to do that. Instead, I'll leverage another great capability of Custom Forms - tying a field value to a result of a custom vRO action! + +### Automatic deployment naming +*[Update] I've since come up with what I think is a better approach to handling this. Check it out [here](/vra8-automatic-deployment-naming-another-take)!* + +That means it's time to dive back into the vRealize Orchestrator interface and whip up a new action for this purpose. I created a new action within my existing `net.bowdre.utility` module called `createDeploymentName`. +![createDeploymentName action](GMCWhns7u.png) + +A good deployment name *must* be globally unique, and it would be great if it could also convey some useful information like who requested the deployment, which template it is being deployed from, and the purpose of the server. The `siteCode (String)`, `envCode (String)`, `functionCode (String)`, and `appCode (String)` variables from the request form will do a great job of describing the server's purpose. I can also pass in some additional information from the Service Broker form like `catalogItemName (String)` to get the template name and `requestedByName (String)` to identify the user making the request. So I'll set all those as inputs to my action: +![createDeploymentName inputs](bCKrtn05o.png) + +I also went ahead and specified that the action will return a String. + +And now for the code. I really just want to mash all those variables together into a long string, and I'll also add a timestamp to make sure each deployment name is truly unique. + +```js +// JavaScript: createDeploymentName +// Inputs: catalogItemName (String), requestedByName (String), siteCode (String), +// envCode (String), functionCode (String), appCode (String) +// Returns: deploymentName (String) + +var deploymentName = '' + +// we don't want to evaluate this until all requested fields have been completed +if (catalogItemName != '' && requestedByName != null && siteCode != null && envCode != null && functionCode != null && appCode != null) { + var date = new Date() + deploymentName = requestedByName + "_" + catalogItemName + "_" + siteCode + "-" + envCode.substring(0,1) + functionCode + "-" + appCode.toUpperCase() + "-(" + date.toISOString() + ")" + System.debug("Returning deploymentName: " + deploymentName) +} +return deploymentName +``` + +With that sorted, I can go back to the Service Broker interface to modify the custom form a bit more. I select the "Deployment Name" field and click over to the Values tab on the right. There, I set the *Value source* to "External source" and *Select action* to the new action I just created, `net.bowdre.utility/createDeploymentName`. (If the action doesn't appear in the search field, go to *Infrastructure > Integrations > Embedded-VRO* and click the "Start Data Collection" button to force vRA to update its inventory of vRO actions and workflows.) I then map all the action's inputs to properties available on the request form. +![Linking the action](mpbPukEeB.png) + +The last step before testing is to click that *Enable* button to activate the custom form, and then the *Save* button to save my work. So did it work? Let's head to the *Catalog* tab and open the request: +![Watching the deployment name change](tybyj-5dG.gif) + +Cool! So it's dynamically generating the deployment name based on selections made on the form. Now that it works, I can go back to the custom form and set the "Deployment Name" field to be invisible just like the "Project" one. + +### Per-site network selection +So far, vRA has been automatically placing VMs on networks based solely on [which networks are tagged as available](/vra8-custom-provisioning-part-one#using-tags-for-resource-placement) for the selected site. I'd like to give my users a bit more control over which network their VMs get attached to, particularly as some networks may be set aside for different functions or have different firewall rules applied. + +As a quick recap, I've got five networks available for vRA, split across my two sites using tags: + +|Name |Subnet |Site |Tags | +| --- | --- | --- | --- | +| d1620-Servers-1 | 172.16.20.0/24 | BOW | `net:bow` | +| d1630-Servers-2 | 172.16.30.0/24 | BOW | `net:bow` | +| d1640-Servers-3 | 172.16.40.0/24 | BOW | `net:bow` | +| d1650-Servers-4 | 172.16.50.0/24 | DRE | `net:dre` | +| d1660-Servers-5 | 172.16.60.0/24 | DRE | `net:dre` | + +I'm going to add additional tags to these networks to further define their purpose. + +|Name |Purpose |Tags | +| --- | --- | --- | +| d1620-Servers-1 |Management | `net:bow`, `net:mgmt` | +| d1630-Servers-2 | Front-end | `net:bow`, `net:front` | +| d1640-Servers-3 | Back-end | `net:bow`, `net:back` | +| d1650-Servers-4 | Front-end | `net:dre`, `net:front` | +| d1660-Servers-5 | Back-end | `net:dre`, `net:back` | + +I *could* just use those tags to let users pick the appropriate network, but I've found that a lot of times users don't know why they're picking a certain network, they just know the IP range they need to use. So I'll take it a step further and add a giant tag to include the Site, Purpose, and Subnet, and this is what will ultimately be presented to the users: + +|Name |Tags | +| --- | --- | +| d1620-Servers-1 | `net:bow`, `net:mgmt`, `net:bow-mgmt-172.16.20.0` | +| d1630-Servers-2 | `net:bow`, `net:front`, `net:bow-front-172.16.30.0` | +| d1640-Servers-3 | `net:bow`, `net:back`, `net:bow-back-172.16.40.0` | +| d1650-Servers-4 | `net:dre`, `net:front`, `net:dre-front-172.16.50.0` | +| d1660-Servers-5 | `net:dre`, `net:back`, `net:dre-back-172.16.60.0` | + +![Tagged networks](J_RG9JNPz.png) + +So I can now use a single tag to positively identify a single network, as long as I know its site and either its purpose or its IP space. I'll reference these tags in a vRO action that will populate a dropdown in the request form with the available networks for the selected site. Unfortunately I couldn't come up with an easy way to dynamically pull the tags into vRO so I create another Configuration Element to store them: +![networksPerSite configuration element](xfEultDM_.png) + +This gets filed under the existing `CustomProvisioning` folder, and I name it `networksPerSite`. Each site gets a new variable of type `Array/string`. The name of the variable matches the site ID, and the contents are just the tags minus the `net:` prefix. + +I created a new action named (appropriately) `getNetworksForSite`. This will accept `siteCode (String)` as its input from the Service Broker request form, and will return an array of strings containing the available networks. +![getNetworksForSite action](IdrT-Un8H1.png) + +```js +// JavaScript: getNetworksForSite +// Inputs: siteCode (String) +// Returns: site.value (Array/String) + +// Get networksPerSite configurationElement +var category = Server.getConfigurationElementCategoryWithPath("CustomProvisioning") +var elements = category.configurationElements +for (var i in elements) { + if (elements[i].name == "networksPerSite") { + var networksPerSite = elements[i] + } +} + +// Lookup siteCode and find available networks +try { + var site = networksPerSite.getAttributeWithKey(siteCode) +} catch (e) { + System.debug("Invalid site."); +} finally { + return site.value +} +``` + +Back in Cloud Assembly, I edit the Cloud Template to add an input field called `network`: + +```yaml +inputs: + [...] + network: + type: string + title: Network + [...] +``` + +and update the resource configuration for the network entity to constrain it based on `input.network` instead of `input.site` as before: + +```yaml +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + <...> + networks: + - network: '${resource.Cloud_vSphere_Network_1.id}' + assignment: static + constraints: + - tag: 'comp:${to_lower(input.site)}' + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: + networkType: existing + constraints: + # - tag: 'net:${to_lower(input.site)}' + - tag: 'net:${input.network}' +``` + +Remember that the `networksPerSite` configuration element contains the portion of the tags *after* the `net:` prefix so that's why I include the prefix in the constraint tag here. I just didn't want it to appear in the selection dropdown. + +After making this change to the Cloud Template I use the "Create Version" button again to create a new version and tick the option to release it so that it can be picked up by Service Broker. +![Another new version](REZ08yA2E.png) + +Back on the Service Broker UI, I hit my `LAB` Content Source again to Save & Import the new change, and then go to customize the form for `WindowsDemo` again. After dragging-and-dropping the new `Network` field onto the request form blueprint, I kind of repeat the steps I used for adjusting the Deployment Name field earlier. On the Appearance tab I set it to be a DropDown, and on the Values tab I set it to an external source, `net.bowdre.utility/getNetworksForSite`. This action only needs a single input so I map `Site` on the request form to the `siteCode` input. +![Linking the Network field to the getNetworksForSite action](CDy518peA.png) + +Now I can just go back to the Catalog tab and request a new deployment to check out my-- +![Ew, an ugly error](zWFTuOYOG.png) + +Oh yeah. That vRO action gets called as soon as the request form loads - before selecting the required site code as an input. I could modify the action so that returns an empty string if the site hasn't been selected yet, but I'm kind of lazy so I'll instead just modify the custom form so that the Site field defaults to the `BOW` site. +![BOW is default](yb77nH2Fp.png) + +*Now* I can open up the request form and see how well it works: +![Network selection in action](fh37T__nb.gif) + +Noice! + +### Putting it all together now +At this point, I'll actually kick off a deployment and see how everything works out. +![The request](hFPeakMxn.png) + +After hitting Submit, I can see that this deployment has a much more friendly name than the previous ones: +![Auto generated deployment name!](TQGyrUqIx.png) + +And I can also confirm that the VM got named appropriately (based on the [naming standard I implemented earlier](vra8-custom-provisioning-part-two)), and it also got placed on the `172.16.60.0/24` network I selected. +![Network placement - check!](1NJvDeA7r.png) + +Very slick. And I think that's a great stopping point for today. + +Coming up, I'll describe how I create AD computer objects in site-specific OUs, add notes and custom attributes to the VM in vSphere, and optionally create static DNS records on a Windows DNS server. \ No newline at end of file diff --git a/content/post/vra8-custom-provisioning-part-four/mpbPukEeB.png b/content/post/vra8-custom-provisioning-part-four/mpbPukEeB.png new file mode 100644 index 0000000..c8260d1 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/mpbPukEeB.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/oLwUg1k6T.png b/content/post/vra8-custom-provisioning-part-four/oLwUg1k6T.png new file mode 100644 index 0000000..782a7b7 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/oLwUg1k6T.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/tybyj-5dG.gif b/content/post/vra8-custom-provisioning-part-four/tybyj-5dG.gif new file mode 100644 index 0000000..ceb80cb Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/tybyj-5dG.gif differ diff --git a/content/post/vra8-custom-provisioning-part-four/xfEultDM_.png b/content/post/vra8-custom-provisioning-part-four/xfEultDM_.png new file mode 100644 index 0000000..e49e70c Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/xfEultDM_.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/yb77nH2Fp.png b/content/post/vra8-custom-provisioning-part-four/yb77nH2Fp.png new file mode 100644 index 0000000..3a2cb51 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/yb77nH2Fp.png differ diff --git a/content/post/vra8-custom-provisioning-part-four/zWFTuOYOG.png b/content/post/vra8-custom-provisioning-part-four/zWFTuOYOG.png new file mode 100644 index 0000000..ec33116 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-four/zWFTuOYOG.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/3-UIo1Ykn.png b/content/post/vra8-custom-provisioning-part-one/3-UIo1Ykn.png new file mode 100644 index 0000000..15aeb0c Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/3-UIo1Ykn.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/3vQER.png b/content/post/vra8-custom-provisioning-part-one/3vQER.png new file mode 100644 index 0000000..737ff4d Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/3vQER.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/4dNwfNNDY.png b/content/post/vra8-custom-provisioning-part-one/4dNwfNNDY.png new file mode 100644 index 0000000..1779508 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/4dNwfNNDY.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/6k06ySON7.png b/content/post/vra8-custom-provisioning-part-one/6k06ySON7.png new file mode 100644 index 0000000..9bed6d1 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/6k06ySON7.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/AZsVThaRO.png b/content/post/vra8-custom-provisioning-part-one/AZsVThaRO.png new file mode 100644 index 0000000..4092e23 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/AZsVThaRO.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/BA2BWCd6K.png b/content/post/vra8-custom-provisioning-part-one/BA2BWCd6K.png new file mode 100644 index 0000000..3cd0c9a Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/BA2BWCd6K.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/HC6vQMeVT.png b/content/post/vra8-custom-provisioning-part-one/HC6vQMeVT.png new file mode 100644 index 0000000..0e1b700 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/HC6vQMeVT.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/KUCwEgEhN.png b/content/post/vra8-custom-provisioning-part-one/KUCwEgEhN.png new file mode 100644 index 0000000..2bf2e4f Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/KUCwEgEhN.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/LST4LisFl.png b/content/post/vra8-custom-provisioning-part-one/LST4LisFl.png new file mode 100644 index 0000000..41c0c72 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/LST4LisFl.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/PIeW8xA2j.png b/content/post/vra8-custom-provisioning-part-one/PIeW8xA2j.png new file mode 100644 index 0000000..63b102a Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/PIeW8xA2j.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/Q-2ZQg_ji.png b/content/post/vra8-custom-provisioning-part-one/Q-2ZQg_ji.png new file mode 100644 index 0000000..8795229 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/Q-2ZQg_ji.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/RtMljqM9x.png b/content/post/vra8-custom-provisioning-part-one/RtMljqM9x.png new file mode 100644 index 0000000..2e988b0 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/RtMljqM9x.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/URW7vc1ih.png b/content/post/vra8-custom-provisioning-part-one/URW7vc1ih.png new file mode 100644 index 0000000..c0025d1 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/URW7vc1ih.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/VZaK4btzl.png b/content/post/vra8-custom-provisioning-part-one/VZaK4btzl.png new file mode 100644 index 0000000..2fa65fc Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/VZaK4btzl.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/XVD9QVU-S.png b/content/post/vra8-custom-provisioning-part-one/XVD9QVU-S.png new file mode 100644 index 0000000..41257b8 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/XVD9QVU-S.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/XmtEm51h2.png b/content/post/vra8-custom-provisioning-part-one/XmtEm51h2.png new file mode 100644 index 0000000..974fa5c Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/XmtEm51h2.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/index.md b/content/post/vra8-custom-provisioning-part-one/index.md new file mode 100644 index 0000000..352bb3f --- /dev/null +++ b/content/post/vra8-custom-provisioning-part-one/index.md @@ -0,0 +1,244 @@ +--- +series: vRA8 +date: "2021-03-29T08:34:30Z" +thumbnail: VZaK4btzl.png +usePageBundles: true +tags: +- vmware +- vra +title: 'vRA8 Custom Provisioning: Part One' +--- + +I recently shared [some details about my little self-contained VMware homelab](/vmware-home-lab-on-intel-nuc-9) as well as how I [integrated {php}IPAM into vRealize Automation 8 for assigning IPs to deployed VMs](/integrating-phpipam-with-vrealize-automation-8). For my next trick, I'll be crafting a flexible Cloud Template and accompanying vRealize Orchestrator workflow that will help to deploy and configure virtual machines based on a vRA user's input. Buckle up, this is going to be A Ride. + +### Objectives +Before getting into the *how* it would be good to start with the *what* - what exactly are we hoping to accomplish here? For my use case, I'll need a solution which can: +- use a single Cloud Template to provision Windows VMs to one of several designated compute resources across multiple sites. +- generate a unique VM name which matches a defined naming standard. +- allow the requester to specify which site-specific network should be used, and leverage {php}IPAM to assign a static IP. +- pre-stage a computer account in Active Directory in a site-specific Organizational Unit and automatically join the new computer to the domain. +- create a static record in Microsoft DNS for non-domain systems. +- expand the VM's virtual disk *and extend the volume inside the guest* based on request input. +- add specified domain accounts to the guest's local Administrators group based on request input. +- annotate the VM in vCenter with a note to describe the server's purpose and custom attributes to identify the responsible party and originating ticket number. + +Looking back, that's kind of a lot. I can see why I've been working on this for months! + +### vSphere setup +In production, I'll want to be able to deploy to different computer clusters spanning multiple vCenters. That's a bit difficult to do on a single physical server, but I still wanted to be able to simulate that sort of dynamic resource selection. So for development and testing in my lab, I'll be using two sites - `BOW` and `DRE`. I ditched the complicated "just because I can" vSAN I'd built previously and instead spun up two single-host nested clusters, one for each of my sites: +![vCenter showing the BOW and DRE clusters](KUCwEgEhN.png) + +Those hosts have one virtual NIC each on a standard switch connected to my home network, and a second NIC each connected to the ["isolated" internal lab network](vmware-home-lab-on-intel-nuc-9#networking) with all the VLANs for the guests to run on: +![dvSwitch showing attached hosts and dvPortGroups](y8vZEnWqR.png) + +### vRA setup +On the vRA side of things, I logged in to the Cloud Assembly portion and went to the Infrastructure tab. I first created a Project named `LAB`, added the vCenter as a Cloud Account, and then created a Cloud Zone for the vCenter as well. On the Compute tab of the Cloud Zone properties, I manually added both the `BOW` and `DRE` clusters. +![BOW and DRE Clusters added to Cloud Zone](sCQKUH07e.png) + +I also created a Network Profile and added each of the nested dvPortGroups I had created for this purpose. +![Network Profile with added vSphere networks](LST4LisFl.png) + +Each network also gets associated with the related IP Range which was [imported from {php}IPAM](/integrating-phpipam-with-vrealize-automation-8). +![IP Range bound to a network](AZsVThaRO.png) + +Since each of my hosts only has 100GB of datastore and my Windows template specifies a 60GB VMDK, I went ahead and created a Storage Profile so that deployments would default to being Thin Provisioned. +![Thin-provision storage profile](3vQER.png) + +I created a few Flavor Mappings ranging from `micro` (1vCPU|1GB RAM) to `giant` (8vCPU|16GB) but for this resource-constrained lab I'll stick mostly to the `micro`, `tiny` (1vCPU|2GB), and `small` (2vCPU|2GB) sizes. +![T-shirt size Flavor Mappings](lodJlc8Hp.png) + +And I created an Image Mapping named `ws2019` which points to a Windows Server 2019 Core template I have stored in my lab's Content Library (cleverly-named "LABrary" for my own amusement). +![Windows Server Image Mapping](6k06ySON7.png) + +And with that, my vRA infrastructure is ready for testing a *very* basic deployment. + +### My First Cloud Template +Now it's time to leave the Infrastructure tab and visit the Design one, where I'll create a new Cloud Template (what previous versions of vRA called "Blueprints"). I start by dragging one each of the **vSphere > Machine** and **vSphere > Network** entities onto the workspace. I then pop over to the Code tab on the right to throw together some simple YAML statements: +![My first Cloud Template!](RtMljqM9x.png) + +VMware's got a [pretty great document](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-6BA1DA96-5C20-44BF-9C81-F8132B9B4872.html#list-of-input-properties-2) describing the syntax for these input properties, plus a lot of it is kind of self-explanatory. Let's step through this real quick: +```yaml +formatVersion: 1 +inputs: + # Image Mapping + image: + type: string + title: Operating System + oneOf: + - title: Windows Server 2019 + const: ws2019 + default: ws2019 +``` +`formatVersion` is always gonna be 1 so we'll skip right past that. + +The first input is going to ask the user to select the desired Operating System for this deployment. The `oneOf` type will be presented as a dropdown (with only one option in this case, but I'll leave it this way for future flexibility); the user will see the friendly "Windows Server 2019" `title` which is tied to the `ws2019` `const` value. For now, I'll also set the `default` value of the field so I don't have to actually click the dropdown each time I test the deployment. + +```yaml + # Flavor Mapping + size: + title: Resource Size + type: string + oneOf: + - title: 'Micro [1vCPU|1GB]' + const: micro + - title: 'Tiny [1vCPU|2GB]' + const: tiny + - title: 'Small [2vCPU|2GB]' + const: small + default: small +``` + +Now I'm asking the user to pick the t-shirt size of the VM. These will correspond to the Flavor Mappings I defined earlier. I again chose to use a `oneOf` data type so that I can show the user more information for each option than is embedded in the name. And I'm setting a `default` value to avoid unnecessary clicking. + +The `resources` section is where the data from the inputs gets applied to the deployment: + +```yaml +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + networks: + - network: '${resource.Cloud_vSphere_Network_1.id}' + assignment: static + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: + networkType: existing +``` + +So I'm connecting the selected `input.image` to the Image Mapping configured in vRA, and the selected `input.size` goes back to the Flavor Mapping that will be used for the deployment. I also specify that `Cloud_vSphere_Machine_1` should be connected to `Cloud_vSphere_Network_1` and that it should use a `static` (as opposed to `dynamic`) IP address. Finally, vRA is told that the `Cloud_vSphere_Network_1` should be an existing vSphere network. + +All together now: + +```yaml +formatVersion: 1 +inputs: + # Image Mapping + image: + type: string + title: Operating System + oneOf: + - title: Windows Server 2019 + const: ws2019 + default: ws2019 + # Flavor Mapping + size: + title: Resource Size + type: string + oneOf: + - title: 'Micro [1vCPU|1GB]' + const: micro + - title: 'Tiny [1vCPU|2GB]' + const: tiny + - title: 'Small [2vCPU|2GB]' + const: small + default: small +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + networks: + - network: '${resource.Cloud_vSphere_Network_1.id}' + assignment: static + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: + networkType: existing +``` + +Cool! But does it work? Hitting the **Test** button at the bottom right is a great way to validate a template before actually running a deployment. That will confirm that the template syntax, infrastructure, and IPAM configuration is all set up correctly to support this particular deployment. +![Test inputs](lNmduGWr1.png) +![Test results](BA2BWCd6K.png) + +Looks good! I like to click on the **Provisioning Diagram** link to see a bit more detail about where components were placed and why. That's also an immensely helpful troubleshooting option if the test *isn't* successful. +![Provisioning diagram](PIeW8xA2j.png) + +And finally, I can hit that **Deploy** button to actually spin up this VM. +![Deploy this sucker](XmtEm51h2.png) + +Each deployment has to have a *unique* deployment name. I got tired of trying to keep up with what names I had already used so kind of settled on a [DATE]_[TIME] format for my test deployments. I'll automatic this tedious step away in the future. + +I then confirm that the (automatically-selected default) inputs are correct and kick it off. +![Deployment inputs](HC6vQMeVT.png) + +The deployment will take a few minutes. I like to click over to the **History** tab to see a bit more detail as things progress. +![Deployment history](uklHiv46Y.png) + +It doesn't take too long for activity to show up on the vSphere side of things: +![vSphere is cloning the source template](4dNwfNNDY.png) + +And there's the completed VM - notice the statically-applied IP address courtesy of {php}IPAM! +![Completed test VM](3-UIo1Ykn.png) + +And I can pop over to the IPAM interface to confirm that the IP has been marked as reserved as well: +![Newly-created IPAM reservation](mAfdPLKnp.png) + +Fantastic! But one of my objectives from earlier was to let the user control where a VM gets provisioned. Fortunately it's pretty easy to implement thanks to vRA 8's use of tags. + +### Using tags for resource placement +Just about every entity within vRA 8 can have tags applied to it, and you can leverage those tags in some pretty creative and useful ways. For now, I'll start by applying tags to my compute resources; I'll use `comp:bow` for the "BOW Cluster" and `comp:dre` for the "DRE Cluster". +![Compute tags](oz1IAp-i0.png) + +I'll also use the `net:bow` and `net:dre` tags to logically divide up the networks between my sites: +![Network tags](ngSWbVI4Y.png) + +I can now add an input to the Cloud Template so the user can pick which site they need to deploy to: + +```yaml +inputs: + # Datacenter location + site: + type: string + title: Site + enum: + - BOW + - DRE + # Image Mapping +``` + +I'm using the `enum` option now instead of `oneOf` since the site names shouldn't require further explanation. + +And then I'll add some `constraints` to the `resources` section, making use of the `to_lower` function from the [cloud template expression syntax](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-12F0BC64-6391-4E5F-AA48-C5959024F3EB.html) to automatically convert the selected site name from all-caps to lowercase so it matches the appropriate tag: + +```yaml +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + networks: + - network: '${resource.Cloud_vSphere_Network_1.id}' + assignment: static + constraints: + - tag: 'comp:${to_lower(input.site)}' + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: + networkType: existing + constraints: + - tag: 'net:${to_lower(input.site)}' +``` + +So the VM will now only be deployed to the compute resource and networks which are tagged to match the selected Site identifier. I ran another test to make sure I didn't break anything: +![Testing against the DRE site](Q-2ZQg_ji.png) + +It came back successful, so I clicked through to see the provisioning diagram. On the network tab, I see that only the last two networks (`d1650-Servers-4` and `d1660-Servers-5`) were considered since the first three didn't match the required `net:dre` tag: +![Network provisioning diagram](XVD9QVU-S.png) + +And it's a similar story on the compute tab: +![Compute provisioning diagram](URW7vc1ih.png) + +As a final test for this change, I kicked off one deployment to each site to make sure things worked as expected. +![vSphere showing one VM at each site](VZaK4btzl.png) + +Nice! + +### Conclusion +This was kind of an easy introduction into what I've been doing with vRA 8 these past several months. The basic infrastructure (both in vSphere and vRA) will support far more interesting and flexible deployments as I dig deeper. For now, being able to leverage vRA tags for placing workloads on specific compute resources is a great start. + +Things will get *much* more interesting in the next post, where I'll dig into how I'm using vRealize Orchestrator to generate unique computer names which fit a defined naming standard. \ No newline at end of file diff --git a/content/post/vra8-custom-provisioning-part-one/lNmduGWr1.png b/content/post/vra8-custom-provisioning-part-one/lNmduGWr1.png new file mode 100644 index 0000000..d5dd36f Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/lNmduGWr1.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/lodJlc8Hp.png b/content/post/vra8-custom-provisioning-part-one/lodJlc8Hp.png new file mode 100644 index 0000000..507d167 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/lodJlc8Hp.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/mAfdPLKnp.png b/content/post/vra8-custom-provisioning-part-one/mAfdPLKnp.png new file mode 100644 index 0000000..e0f594b Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/mAfdPLKnp.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/ngSWbVI4Y.png b/content/post/vra8-custom-provisioning-part-one/ngSWbVI4Y.png new file mode 100644 index 0000000..fbb3dee Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/ngSWbVI4Y.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/oz1IAp-i0.png b/content/post/vra8-custom-provisioning-part-one/oz1IAp-i0.png new file mode 100644 index 0000000..f2a16c5 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/oz1IAp-i0.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/sCQKUH07e.png b/content/post/vra8-custom-provisioning-part-one/sCQKUH07e.png new file mode 100644 index 0000000..8f6e094 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/sCQKUH07e.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/uklHiv46Y.png b/content/post/vra8-custom-provisioning-part-one/uklHiv46Y.png new file mode 100644 index 0000000..f9db302 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/uklHiv46Y.png differ diff --git a/content/post/vra8-custom-provisioning-part-one/y8vZEnWqR.png b/content/post/vra8-custom-provisioning-part-one/y8vZEnWqR.png new file mode 100644 index 0000000..4cac331 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-one/y8vZEnWqR.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/6HBIUf6KE.png b/content/post/vra8-custom-provisioning-part-three/6HBIUf6KE.png new file mode 100644 index 0000000..ebe9ba7 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/6HBIUf6KE.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/7MfV-1uiO.png b/content/post/vra8-custom-provisioning-part-three/7MfV-1uiO.png new file mode 100644 index 0000000..d8fc131 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/7MfV-1uiO.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/GZKQbELfM.png b/content/post/vra8-custom-provisioning-part-three/GZKQbELfM.png new file mode 100644 index 0000000..3501c72 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/GZKQbELfM.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/JT7pbzM-5.png b/content/post/vra8-custom-provisioning-part-three/JT7pbzM-5.png new file mode 100644 index 0000000..bf06e90 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/JT7pbzM-5.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/K6vcxpDj8.png b/content/post/vra8-custom-provisioning-part-three/K6vcxpDj8.png new file mode 100644 index 0000000..4fd8a00 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/K6vcxpDj8.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/U6oMWDal2.png b/content/post/vra8-custom-provisioning-part-three/U6oMWDal2.png new file mode 100644 index 0000000..a87b4ef Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/U6oMWDal2.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/iB1bjdC8C.png b/content/post/vra8-custom-provisioning-part-three/iB1bjdC8C.png new file mode 100644 index 0000000..37f1257 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/iB1bjdC8C.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/index.md b/content/post/vra8-custom-provisioning-part-three/index.md new file mode 100644 index 0000000..6635d91 --- /dev/null +++ b/content/post/vra8-custom-provisioning-part-three/index.md @@ -0,0 +1,230 @@ +--- +series: vRA8 +date: "2021-04-19T08:34:30Z" +thumbnail: K6vcxpDj8.png +usePageBundles: true +lastmod: "2021-10-01" +tags: +- vmware +- vra +- vro +- javascript +- powershell +title: 'vRA8 Custom Provisioning: Part Three' +--- + +Picking up after [Part Two](/vra8-custom-provisioning-part-two), I now have a pretty handy vRealize Orchestrator workflow to generate unique hostnames according to a defined naming standard. It even checks against the vSphere inventory to validate the uniqueness. Now I'm going to take it a step (or two, rather) further and extend those checks against Active Directory and DNS. + +### Active Directory +#### Adding an AD endpoint +Remember how I [used the built-in vSphere plugin](/vra8-custom-provisioning-part-two#interlude-connecting-vro-to-vcenter) to let vRO query my vCenter(s) for VMs with a specific name? And how that required first configuring the vCenter endpoint(s) in vRO? I'm going to take a very similar approach here. + +So as before, I'll first need to run the preinstalled "Add an Active Directory server" workflow: +![Add an Active Directory server workflow](uUDJXtWKz.png) + +I fill out the Connection tab like so: +![Connection tab](U6oMWDal2.png) +*I don't have SSL enabled on my homelab AD server so I left that unchecked.* + +On the Authentication tab, I tick the box to use a shared session and specify the service account I'll use to connect to AD. It would be great for later steps if this account has the appropriate privileges to create/delete computer accounts at least within designated OUs. +![Authentication tab](7MfV-1uiO.png) + +If you've got multiple AD servers, you can use the options on the Alternative Hosts tab to specify those, saving you from having to create a new configuration for each. I've just got the one AD server in my lab, though, so at this point I just hit Run. + +Once it completes successfully, I can visit the Inventory section of the vRO interface to confirm that the new Active Directory endpoint shows up: +![New AD endpoint](vlnle_ekN.png) + +#### checkForAdConflict Action +Since I try to keep things modular, I'm going to write a new vRO action within the `net.bowdre.utility` module called `checkForAdConflict` which can be called from the `Generate unique hostname` workflow. It will take in `computerName (String)` as an input and return a boolean `True` if a conflict is found or `False` if the name is available. +![Action: checkForAdConflict](JT7pbzM-5.png) + +It's basically going to loop through the Active Directory hosts defined in vRO and search each for a matching computer name. Here's the full code: + +```js +// JavaScript: checkForAdConflict action +// Inputs: computerName (String) +// Outputs: (Boolean) + +var adHosts = AD_HostManager.findAllHosts(); +for each (var adHost in adHosts) { + var computer = ActiveDirectory.getComputerAD(computerName,adHost); + System.log("Searched AD for: " + computerName); + if (computer) { + System.log("Found: " + computer.name); + return true; + } else { + System.log("No AD objects found."); + return false; + } +} +``` + +#### Adding it to the workflow +Now I can pop back over to my massive `Generate unique hostname` workflow and drop in a new scriptable task between the `check for VM name conflicts` and `return nextVmName` tasks. It will bring in `candidateVmName (String)` as well as `conflict (Boolean)` as inputs, return `conflict (Boolean)` as an output, and `errMsg (String)` will be used for exception handling. If `errMsg (String)` is thrown, the flow will follow the dashed red line back to the `conflict resolution` action. +![Action: check for AD conflict](iB1bjdC8C.png) + +I'm using this as a scriptable task so that I can do a little bit of processing before I call the action I created earlier - namely, if `conflict (Boolean)` was already set, the task should skip any further processing. That does mean that I'll need to call the action by both its module and name using `System.getModule("net.bowdre.utility").checkForAdConflict(candidateVmName)`. So here's the full script: + +```js +// JavaScript: check for AD conflict task +// Inputs: candidateVmName (String), conflict (Boolean) +// Outputs: conflict (Boolean) + +if (conflict) { + System.log("Existing conflict found, skipping AD check...") +} else { + var result = System.getModule("net.bowdre.utility").checkForAdConflict(candidateVmName); + // remember this returns 'true' if a conflict is encountered + if (result == true) { + conflict = true; + errMsg = "Conflicting AD object found!" + System.warn(errMsg) + throw(errMsg) + } else { + System.log("No AD conflict found for " + candidateVmName) + } +} +``` + +Cool, so that's the AD check in the bank. Onward to DNS! + +### DNS +**[Update]** Thanks to a [kind commenter](https://github.com/jbowdre/jbowdre.github.io/issues/10#issuecomment-932541245), I've learned that my DNS-checking solution detailed below is somewhat unnecessarily complicated. I overlooked it at the time I was putting this together, but vRO _does_ provide a `System.resolveHostName()` function to easily perform DNS lookups. I've updated the [Adding it to the workflow](#adding-it-to-the-workflow-1) section below with the simplified script which eliminates the need for building an external script with dependencies and importing that as a vRO action, but I'm going to leave those notes in place as well in case anyone else (or Future John) might need to leverage a similar approach to solve another issue. + +Seriously. Go ahead and skip to [here](#adding-it-to-the-workflow-1). + +#### The Challenge (Deprecated) +JavaScript can't talk directly to Active Directory on its own, but in the previous action I was able to leverage the AD plugin built into vRO to bridge that gap. Unfortunately ~~there isn't~~ _I couldn't find_ a corresponding pre-installed plugin that will work as a DNS client. vRO 8 does introduce support for using other languages like (cross-platform) PowerShell or Python instead of being restricted to just JavaScript... but I wasn't able to find an easy solution for querying DNS from those languages either without requiring external modules. (The cross-platform version of PowerShell doesn't include handy Windows-centric cmdlets like `Get-DnsServerResourceRecord`.) + +So I'll have to get creative here. + +#### The Solution (Deprecated) +Luckily, vRO does provide a way to import scripts bundled with their required modules, and I found the necessarily clues for doing that [here](https://docs.vmware.com/en/vRealize-Orchestrator/8.3/com.vmware.vrealize.orchestrator-using-client-guide.doc/GUID-3C0CEB11-4079-43DF-B134-08C1D62EE3A4.html). And I found a DNS client written for cross-platform PowerShell in the form of the [DnsClient-PS](https://github.com/rmbolger/DnsClient-PS) module. So I'll write a script locally, package it up with the DnsClient-PS module, and import it as a vRO action. + +I start by creating a folder to store the script and needed module, and then I create the required `handler.ps1` file. + +```shell +❯ mkdir checkDnsConflicts +❯ cd checkDnsConflicts +❯ touch handler.ps1 +``` + +I then create a `Modules` folder and install the DnsClient-PS module: + +```shell +❯ mkdir Modules +❯ pwsh -c "Save-Module -Name DnsClient-PS -Path ./Modules/ -Repository PSGallery" +``` + +And then it's time to write the PowerShell script in `handler.ps1`: + +```powershell +# PowerShell: checkForDnsConflict script +# Inputs: $inputs.hostname (String), $inputs.domain (String) +# Outputs: $queryresult (String) +# +# Returns true if a conflicting record is found in DNS. + +Import-Module DnsClient-PS + +function handler { + Param($context, $inputs) + $hostname = $inputs.hostname + $domain = $inputs.domain + $fqdn = $hostname + '.' + $domain + Write-Host "Querying DNS for $fqdn..." + $resolution = (Resolve-DNS $fqdn) + If (-not $resolution.HasError) { + Write-Host "Record found:" ($resolution | Select-Object -Expand Answers).ToString() + $queryresult = "true" + } Else { + Write-Host "No record found." + $queryresult = "false" + } + return $queryresult +} +``` + +Now to package it up in a `.zip` which I can then import into vRO: + +```shell +❯ zip -r --exclude=\*.zip -X checkDnsConflicts.zip . + adding: Modules/ (stored 0%) + adding: Modules/DnsClient-PS/ (stored 0%) + adding: Modules/DnsClient-PS/1.0.0/ (stored 0%) + adding: Modules/DnsClient-PS/1.0.0/Public/ (stored 0%) + adding: Modules/DnsClient-PS/1.0.0/Public/Set-DnsClientSetting.ps1 (deflated 67%) + adding: Modules/DnsClient-PS/1.0.0/Public/Resolve-Dns.ps1 (deflated 67%) + adding: Modules/DnsClient-PS/1.0.0/Public/Get-DnsClientSetting.ps1 (deflated 65%) + adding: Modules/DnsClient-PS/1.0.0/lib/ (stored 0%) + adding: Modules/DnsClient-PS/1.0.0/lib/DnsClient.1.3.1-netstandard2.0.xml (deflated 91%) + adding: Modules/DnsClient-PS/1.0.0/lib/DnsClient.1.3.1-netstandard2.0.dll (deflated 57%) + adding: Modules/DnsClient-PS/1.0.0/lib/System.Buffers.4.4.0-netstandard2.0.xml (deflated 72%) + adding: Modules/DnsClient-PS/1.0.0/lib/System.Buffers.4.4.0-netstandard2.0.dll (deflated 44%) + adding: Modules/DnsClient-PS/1.0.0/DnsClient-PS.psm1 (deflated 56%) + adding: Modules/DnsClient-PS/1.0.0/Private/ (stored 0%) + adding: Modules/DnsClient-PS/1.0.0/Private/Get-NameserverList.ps1 (deflated 68%) + adding: Modules/DnsClient-PS/1.0.0/Private/Resolve-QueryOptions.ps1 (deflated 63%) + adding: Modules/DnsClient-PS/1.0.0/Private/MockWrappers.ps1 (deflated 47%) + adding: Modules/DnsClient-PS/1.0.0/PSGetModuleInfo.xml (deflated 73%) + adding: Modules/DnsClient-PS/1.0.0/DnsClient-PS.Format.ps1xml (deflated 80%) + adding: Modules/DnsClient-PS/1.0.0/DnsClient-PS.psd1 (deflated 59%) + adding: handler.ps1 (deflated 49%) +❯ ls +checkDnsConflicts.zip handler.ps1 Modules +``` + +#### checkForDnsConflict action (Deprecated) +And now I can go into vRO, create a new action called `checkForDnsConflict` inside my `net.bowdre.utilities` module. This time, I change the Language to `PowerCLI 12 (PowerShell 7.0)` and switch the Type to `Zip` to reveal the Import button. +![Preparing to import the zip](sjCtvoZA0.png) + +Clicking that button lets me browse to the file I need to import. I can also set up the two input variables that the script requires, `hostname (String)` and `domain (String)`. +![Package imported and variables defined](xPvBx3oVX.png) + +#### Adding it to the workflow +Just like with the `check for AD conflict` action, I'll add this onto the workflow as a scriptable task, this time between that action and the `return nextVmName` one. This will take `candidateVmName (String)`, `conflict (Boolean)`, and `requestProperties (Properties)` as inputs, and will return `conflict (Boolean)` as its sole output. The task will use `errMsg (String)` as its exception binding, which will divert flow via the dashed red line back to the `conflict resolution` task. + +![Task: check for DNS conflict](uSunGKJfH.png) + +_[Update] The below script has been altered to drop the unneeded call to my homemade `checkForDnsConflict` action and instead use the built-in `System.resolveHostName()`. Thanks @powertim!_ + +```js +// JavaScript: check for DNS conflict +// Inputs: candidateVmName (String), conflict (Boolean), requestProperties (Properties) +// Outputs: conflict (Boolean) + +var domain = requestProperties.dnsDomain +if (conflict) { + System.log("Existing conflict found, skipping DNS check...") +} else { + if (System.resolveHostName(candidateVmName + "." + domain)) { + conflict = true; + errMsg = "Conflicting DNS record found!" + System.warn(errMsg) + throw(errMsg) + } else { + System.log("No DNS conflict for " + candidateVmName) + } +} +``` + +### Testing +Once that's all in place, I kick off another deployment to make sure that everything works correctly. After it completes, I can navigate to the **Extensibility > Workflow runs** section of the vRA interface to review the details: +![Workflow run success](GZKQbELfM.png) + +It worked! + +But what if there *had* been conflicts? It's important to make sure that works too. I know that if I run that deployment again, the VM will get named `DRE-DTST-XXX008` and then `DRE-DTST-XXX009`. So I'm going to force conflicts by creating an AD object for one and a DNS record for the other. +![Making conflicts](6HBIUf6KE.png) + +And I'll kick off another deployment and see what happens. +![Workflow success even with conflicts](K6vcxpDj8.png) + +The workflow saw that the last VM was created as `-007` so it first grabbed `-008`. It saw that `-008` already existed in AD so incremented up to try `-009`. The workflow then found that a record for `-009` was present in DNS so bumped it up to `-010`. That name finally passed through the checks and so the VM was deployed with the name `DRE-DTST-XXX010`. Success! + +### Next steps +So now I've got a pretty capable workflow for controlled naming of my deployed VMs. The names conform with my established naming scheme and increment predictably in response to naming conflicts in vSphere, Active Directory, and DNS. + +In the next post, I'll be enhancing my cloud template to let users pick which network to use for the deployed VM. That sounds simple, but I'll want the list of available networks to be filtered based on the selected site - that means using a Service Broker custom form to query another vRO action. I will also add the ability to create AD computer objects in a site-specific OU and automatically join the server to the domain. And I'll add notes to the VM to make it easier to remember why it was deployed. + +Stay tuned! diff --git a/content/post/vra8-custom-provisioning-part-three/sjCtvoZA0.png b/content/post/vra8-custom-provisioning-part-three/sjCtvoZA0.png new file mode 100644 index 0000000..962a41a Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/sjCtvoZA0.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/uSunGKJfH.png b/content/post/vra8-custom-provisioning-part-three/uSunGKJfH.png new file mode 100644 index 0000000..8ec94d1 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/uSunGKJfH.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/uUDJXtWKz.png b/content/post/vra8-custom-provisioning-part-three/uUDJXtWKz.png new file mode 100644 index 0000000..f112c55 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/uUDJXtWKz.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/vlnle_ekN.png b/content/post/vra8-custom-provisioning-part-three/vlnle_ekN.png new file mode 100644 index 0000000..2c727c6 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/vlnle_ekN.png differ diff --git a/content/post/vra8-custom-provisioning-part-three/xPvBx3oVX.png b/content/post/vra8-custom-provisioning-part-three/xPvBx3oVX.png new file mode 100644 index 0000000..257b529 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-three/xPvBx3oVX.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/0fSl55whe.png b/content/post/vra8-custom-provisioning-part-two/0fSl55whe.png new file mode 100644 index 0000000..a88bbce Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/0fSl55whe.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/4B6wN8QeG.png b/content/post/vra8-custom-provisioning-part-two/4B6wN8QeG.png new file mode 100644 index 0000000..8173065 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/4B6wN8QeG.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/5QFTPHp5H.png b/content/post/vra8-custom-provisioning-part-two/5QFTPHp5H.png new file mode 100644 index 0000000..3ccd60b Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/5QFTPHp5H.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/5bWfqh4ZSE.png b/content/post/vra8-custom-provisioning-part-two/5bWfqh4ZSE.png new file mode 100644 index 0000000..cbbdc7f Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/5bWfqh4ZSE.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/65ECa7nej.png b/content/post/vra8-custom-provisioning-part-two/65ECa7nej.png new file mode 100644 index 0000000..d989dc7 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/65ECa7nej.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/6Gpxapzd3.png b/content/post/vra8-custom-provisioning-part-two/6Gpxapzd3.png new file mode 100644 index 0000000..2ac6a47 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/6Gpxapzd3.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/6PA6lIOcP.png b/content/post/vra8-custom-provisioning-part-two/6PA6lIOcP.png new file mode 100644 index 0000000..2a2fbd5 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/6PA6lIOcP.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/7Sb3j2PS3.png b/content/post/vra8-custom-provisioning-part-two/7Sb3j2PS3.png new file mode 100644 index 0000000..595beb6 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/7Sb3j2PS3.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/AiFwzSpWS.png b/content/post/vra8-custom-provisioning-part-two/AiFwzSpWS.png new file mode 100644 index 0000000..2dd926d Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/AiFwzSpWS.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/BOIwhMxKy.png b/content/post/vra8-custom-provisioning-part-two/BOIwhMxKy.png new file mode 100644 index 0000000..5b58d66 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/BOIwhMxKy.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/BhBnBh8VB.png b/content/post/vra8-custom-provisioning-part-two/BhBnBh8VB.png new file mode 100644 index 0000000..2b3ca3e Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/BhBnBh8VB.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/ByIWO66PC.png b/content/post/vra8-custom-provisioning-part-two/ByIWO66PC.png new file mode 100644 index 0000000..a79b2d1 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/ByIWO66PC.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/F0IZHRj-J.png b/content/post/vra8-custom-provisioning-part-two/F0IZHRj-J.png new file mode 100644 index 0000000..04921e7 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/F0IZHRj-J.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/G0TEJ30003.png b/content/post/vra8-custom-provisioning-part-two/G0TEJ30003.png new file mode 100644 index 0000000..b5bc633 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/G0TEJ30003.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/HXrAMJrH.png b/content/post/vra8-custom-provisioning-part-two/HXrAMJrH.png new file mode 100644 index 0000000..18f35c5 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/HXrAMJrH.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/IzaMb39C-.png b/content/post/vra8-custom-provisioning-part-two/IzaMb39C-.png new file mode 100644 index 0000000..af6ef12 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/IzaMb39C-.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/PubHnv_jM.png b/content/post/vra8-custom-provisioning-part-two/PubHnv_jM.png new file mode 100644 index 0000000..0506cea Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/PubHnv_jM.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/R9d8edeFP.png b/content/post/vra8-custom-provisioning-part-two/R9d8edeFP.png new file mode 100644 index 0000000..b94afa0 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/R9d8edeFP.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/RuqJhj00_.png b/content/post/vra8-custom-provisioning-part-two/RuqJhj00_.png new file mode 100644 index 0000000..fe9c3e4 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/RuqJhj00_.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/SOPs3mzTs.png b/content/post/vra8-custom-provisioning-part-two/SOPs3mzTs.png new file mode 100644 index 0000000..912b2c1 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/SOPs3mzTs.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/UIafeShcv.png b/content/post/vra8-custom-provisioning-part-two/UIafeShcv.png new file mode 100644 index 0000000..f49290e Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/UIafeShcv.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/XATryy20y.png b/content/post/vra8-custom-provisioning-part-two/XATryy20y.png new file mode 100644 index 0000000..16a6755 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/XATryy20y.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/Zyha7vAwi.png b/content/post/vra8-custom-provisioning-part-two/Zyha7vAwi.png new file mode 100644 index 0000000..44f2908 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/Zyha7vAwi.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/aQg91t93a.png b/content/post/vra8-custom-provisioning-part-two/aQg91t93a.png new file mode 100644 index 0000000..9a0afe0 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/aQg91t93a.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/add_new.png b/content/post/vra8-custom-provisioning-part-two/add_new.png new file mode 100644 index 0000000..345af28 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/add_new.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/afDacKjVx.png b/content/post/vra8-custom-provisioning-part-two/afDacKjVx.png new file mode 100644 index 0000000..9eb7ebc Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/afDacKjVx.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/cONrdrbb6.png b/content/post/vra8-custom-provisioning-part-two/cONrdrbb6.png new file mode 100644 index 0000000..5527e21 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/cONrdrbb6.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/dhcjdDo-E.png b/content/post/vra8-custom-provisioning-part-two/dhcjdDo-E.png new file mode 100644 index 0000000..b336174 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/dhcjdDo-E.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/eZ1BUfesQ.png b/content/post/vra8-custom-provisioning-part-two/eZ1BUfesQ.png new file mode 100644 index 0000000..e5a71d1 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/eZ1BUfesQ.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/fWlSrD56N.png b/content/post/vra8-custom-provisioning-part-two/fWlSrD56N.png new file mode 100644 index 0000000..fa82efc Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/fWlSrD56N.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/gIEFRnilq.png b/content/post/vra8-custom-provisioning-part-two/gIEFRnilq.png new file mode 100644 index 0000000..4042cf4 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/gIEFRnilq.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/h_PHeT6af.png b/content/post/vra8-custom-provisioning-part-two/h_PHeT6af.png new file mode 100644 index 0000000..793649c Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/h_PHeT6af.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/index.md b/content/post/vra8-custom-provisioning-part-two/index.md new file mode 100644 index 0000000..0e50b4c --- /dev/null +++ b/content/post/vra8-custom-provisioning-part-two/index.md @@ -0,0 +1,614 @@ +--- +series: vRA8 +date: "2021-04-02T08:34:30Z" +lastmod: "2022-03-23" +thumbnail: HXrAMJrH.png +usePageBundles: true +tags: +- vmware +- vra +- vro +- javascript +title: 'vRA8 Custom Provisioning: Part Two' +--- + +We [last left off this series](/vra8-custom-provisioning-part-one) after I'd set up vRA, performed a test deployment off of a minimal cloud template, and then enhanced the simple template to use vRA tags to let the user specify where a VM should be provisioned. But these VMs have kind of dumb names; right now, they're just getting named after the user who requests it + a random couple of digits, courtesy of a simple [naming template defined on the project's Provisioning page](https://docs.vmware.com/en/vRealize-Automation/8.3/Using-and-Managing-Cloud-Assembly/GUID-AD400ED7-EB3A-4D36-B9A7-81E100FB3003.html?hWord=N4IghgNiBcIHZgLYEs4HMQF8g): +![Naming template](zAF26KJnO.png) + +I could use this naming template to *almost* accomplish what I need from a naming solution, but I don't like that the numbers are random rather than an sequence (I want to deploy `server001` followed by `server002` rather than `server343` followed by `server718`). And it's not enough for me that a VM's name be unique just within the scope of vRA - the hostname should be unique across my entire environment. + +So I'm going to have to get my hands dirty and develop a new solution using vRealize Orchestrator. For right now, it should create a name for a VM that fits a defined naming schema, while also ensuring that the name doesn't already exist within vSphere. (I'll add checks against Active Directory and DNS in the next post.) + +### What's in a name? +For my environment, servers should be named like `BOW-DAPP-WEB001` where: +- `BOW` indicates the site code. +- `D` describes the environment, Development in this case. +- `APP` designates the server's function; this one is an application server. +- `WEB` describes the primary application running on the server; this one hosts a web server. +- `001` is a three-digit sequential designation to differentiate between similar servers. + +So in vRA's custom naming template syntax, this could look something like: +- `${site}-${environment}${function}-${application}${###}` + +Okay, this plan is coming together. + +### Adding more inputs to the cloud template +I'll start by adding those fields as inputs on my cloud template. + +I already have a `site` input at the top of the template, used for selecting the deployment location. I'll leave that there: + +```yaml +inputs: + site: + type: string + title: Site + enum: + - BOW + - DRE +``` + +I'll add the rest of the naming components below the prompts for image selection and size, starting with a dropdown of environments to pick from: + +```yaml + environment: + type: string + title: Environment + enum: + - Development + - Testing + - Production + default: Development +``` + +And a dropdown for those function options: + +```yaml + function: + type: string + title: Function Code + oneOf: + - title: Application (APP) + const: APP + - title: Desktop (DSK) + const: DSK + - title: Network (NET) + const: NET + - title: Service (SVS) + const: SVS + - title: Testing (TST) + const: TST + default: TST +``` + +And finally a text entry field for the application descriptor. Note that this one includes the `minLength` and `maxLength` constraints to enforce the three-character format. + +```yaml + app: + type: string + title: Application Code + minLength: 3 + maxLength: 3 + default: xxx +``` + +*We won't discuss what kind of content this server is going to host...* + +I then need to map these inputs to the resource entity at the bottom of the template so that they can be passed to vRO as custom properties. All of these are direct mappings except for `environment` since I only want the first letter. I use the `substring()` function to achieve that, but wrap it in a conditional so that it won't implode if the environment hasn't been picked yet. I'm also going to add in a `dnsDomain` property that will be useful later when I need to query for DNS conflicts. + +```yaml +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + site: '${input.site}' + environment: '${input.environment != "" ? substring(input.environment,0,1) : ""}' + function: '${input.function}' + app: '${input.app}' + dnsDomain: lab.bowdre.net +``` + +So here's the complete template: + +```yaml +formatVersion: 1 +inputs: + site: + type: string + title: Site + enum: + - BOW + - DRE + image: + type: string + title: Operating System + oneOf: + - title: Windows Server 2019 + const: ws2019 + default: ws2019 + size: + title: Resource Size + type: string + oneOf: + - title: 'Micro [1vCPU|1GB]' + const: micro + - title: 'Tiny [1vCPU|2GB]' + const: tiny + - title: 'Small [2vCPU|2GB]' + const: small + default: small + environment: + type: string + title: Environment + enum: + - Development + - Testing + - Production + default: Development + function: + type: string + title: Function Code + oneOf: + - title: Application (APP) + const: APP + - title: Desktop (DSK) + const: DSK + - title: Network (NET) + const: NET + - title: Service (SVS) + const: SVS + - title: Testing (TST) + const: TST + default: TST + app: + type: string + title: Application Code + minLength: 3 + maxLength: 3 + default: xxx +resources: + Cloud_vSphere_Machine_1: + type: Cloud.vSphere.Machine + properties: + image: '${input.image}' + flavor: '${input.size}' + site: '${input.site}' + environment: '${input.environment != "" ? substring(input.environment,0,1) : ""}' + function: '${input.function}' + app: '${input.app}' + dnsDomain: lab.bowdre.net + networks: + - network: '${resource.Cloud_vSphere_Network_1.id}' + assignment: static + constraints: + - tag: 'comp:${to_lower(input.site)}' + Cloud_vSphere_Network_1: + type: Cloud.vSphere.Network + properties: + networkType: existing + constraints: + - tag: 'net:${to_lower(input.site)}' +``` + +Great! Here's what it looks like on the deployment request: +![Deployment request with naming elements](iqsHm5zQR.png) + +...but the deployed VM got named `john-329`. Why? +![VM deployed with a lame name](lUo1odZ03.png) + +Oh yeah, I need to create a thing that will take these naming elements, mash them together, check for any conflicts, and then apply the new name to the VM. vRealize Orchestrator, it's your time! + +### Setting up vRO config elements +When I first started looking for a naming solution, I found a [really handy blog post from Michael Poore](https://blog.v12n.io/custom-naming-in-vrealize-automation-8x-1/) that described his solution to doing custom naming. I wound up following his general approach but had to adapt it a bit to make the code work in vRO 8 and to add in the additional checks I wanted. So credit to Michael for getting me pointed in the right direction! + +I start by hopping over to the Orchestrator interface and navigating to the Configurations section. I'm going to create a new configuration folder named `CustomProvisioning` that will store all the Configuration Elements I'll use to configure my workflows on this project. +![Configuration Folder](y7JKSxsqE.png) + +Defining certain variables within configurations separates those from the workflows themselves, making the workflows much more portable. That will allow me to transfer the same code between multiple environments (like my homelab and my lab environment at work) without having to rewrite a bunch of hardcoded values. + +Now I'll create a new configuration within the new folder. This will hold information about the naming schema so I name it `namingSchema`. In it, I create two strings to define the base naming format (up to the numbers on the end) and full name format (including the numbers). I define `baseFormat` and `nameFormat` as templates based on what I put together earlier. +![The namingSchema configuration](zLec-3X_D.png) + +I also create another configuration named `computerNames`. When vRO picks a name for a VM, it will record it here as a `number` variable named after the "base name" (`BOW-DAPP-WEB`) and the last-used sequence as the value (`001`). This will make it quick-and-easy to see what the next VM should be named. For now, though, I just need the configuration to not be empty so I add a single variable named `sample` just to take up space. +![The computerNames configuration](pqrvUNmsj.png) + +Okay, now it's time to get messy. + +### The vRO workflow +Just like with the configuration elements, I create a new workflow folder named `CustomProvisioning` to keep all my workflows together. And then I make a `VM Provisioning` workflow that will be used for pre-provisioning tasks. +![Workflow organization](qt-D1mJFE.png) + +On the Inputs/Outputs tab of the workflow, I create a single input named `inputProperties` of type `Properties` which will hold all the information about the deployment coming from the vRA side of things. +![inputProperties](tiBJVKYdf.png) + +#### Logging the input properties +The first thing I'll want this workflow to do (particularly for testing) is to tell me about the input data from vRA. That will help to eliminate a lot of guesswork. I could just write a script within the workflow to do that, but creating it as a separate action will make it easier to reuse in other workflows. Behold, the `logPayloadProperties` action (nested within the `net.bowdre.utility` module which contains some spoilers for what else is to come!): +![image.png](0fSl55whe.png) + +This action has a single input, a `Properties` object named `payload`. (By the way, vRO is pretty particular about variable typing so going forward I'll reference variables as `variableName (type)`.) Here's the JavaScript that will basically loop through each element and write the contents to the vRO debug log: + +```js +// JavaScript: logPayloadProperties +// Inputs: payload (Properties) +// Outputs: none + +System.debug("==== Begin: vRA Event Broker Payload Properties ===="); +logAllProperties(payload,0); +System.debug("==== End: vRA Event Broker Payload Properties ===="); + +function logAllProperties(props,indent) { + var keys = (props.keys).sort(); + for each (var key in keys) { + var prop = props.get(key); + var type = System.getObjectType(prop); + if (type == "Properties") { + logSingleProperty(key,prop,indent); + logAllProperties(prop,indent+1); + } else { + logSingleProperty(key,prop,indent); + } + } +} + +function logSingleProperty(name,value,i) { + var prefix = ""; + if (i > 0) { + var prefix = Array(i+1).join("-") + " "; + } + System.debug(prefix + name + " :: " + System.getObjectType(value) + " :: " + value); +} +``` + +Going back to my VM Provisioning workflow, I drag an Action Element onto the canvas and tie it to my new action, passing in `inputProperties (Properties)` as the input: +![image.png](o8CgTjSYm.png) + +#### Event Broker Subscription +And at this point I save the workflow. I'm not finished with it - not by a long shot! - but this is a great place to get the workflow plumbed up to vRA and run a quick test. So I go to the vRA interface, hit up the Extensibility tab, and create a new subscription. I name it "VM Provisioning" and set it to fire on the "Compute allocation" event, which will happen right before the VM starts getting created. I link in my VM Provisioning workflow, and also set this as a blocking execution so that no other/future workflows will run until this one completes. +![VM Provisioning subscription](IzaMb39C-.png) + +Alrighty, let's test this and see if it works. I head back to the Design tab and kick off another deployment. +![Test deployment](6PA6lIOcP.png) + +I'm going to go grab some more coffee while this runs. +![Successful deployment](Zyha7vAwi.png) + +And we're back! Now that the deployment completed successfully, I can go back to the Orchestrator view and check the Workflow Runs section to confirm that the VM Provisioning workflow did fire correctly. I can click on it to get more details, and the Logs tab will show me all the lovely data logged by the `logPayloadProperties` action running from the workflow. +![Logged payload properties](AiFwzSpWS.png) + +That information can also be seen on the Variables tab: +![So many variables](65ECa7nej.png) + +A really handy thing about capturing the data this way is that I can use the Run Again or Debug button to execute the vRO workflow again without having to actually deploy a new VM from vRA. This will be great for testing as I press onward. + +(Note that I can't actually edit the workflow directly from this workflow run; I have to go back into the workflow itself to edit it, but then I can re-run the run and it will execute the new code with the already-stored variables.) + +#### Wrapper workflow +I'm going to use this VM Provisioning workflow as a sort of top-level wrapper. This workflow will have a task to parse the payload and grab the variables that will be needed for naming a VM, and it will also have a task to actually rename the VM, but it's going to delegate the name generation to another nested workflow. Making the workflows somewhat modular will make it easier to make changes in the future if needed. + +Anyway, I drop a Scriptable Task item onto the workflow canvas to handle parsing the payload - I'll call it `parse payload` - and pass it `inputProperties (Properties)` as its input. +![parse payload task](aQg91t93a.png) + +The script for this is pretty straight-forward: + +```js +// JavaScript: parse payload +// Inputs: inputProperties (Properties) +// Outputs: requestProperties (Properties), originalNames (Array/string) + +var customProperties = inputProperties.customProperties || new Properties(); +var requestProperties = new Properties(); + +requestProperties.site = customProperties.site; +requestProperties.environment = customProperties.environment; +requestProperties.function = customProperties.function; +requestProperties.app = customProperties.app; +requestProperties.dnsDomain = customProperties.dnsDomain; + +System.debug("requestProperties: " + requestProperties) + +originalNames = inputProperties.resourceNames || new Array(); +System.debug("Original names: " + originalNames) +``` + +It creates a new `requestProperties (Properties)` variable to store the limited set of properties that will be needed for naming - `site`, `environment`, `function`, and `app`. It also stores a copy of the original `resourceNames (Array/string)`, which will be useful when we need to replace the old name with the new one. To make those two new variables accessible to other parts of the workflow, I'll need to also create the variables at the workflow level and map them as outputs of this task: +![outputs mapped](4B6wN8QeG.png) + +I'll also drop in a "Foreach Element" item, which will run a linked workflow once for each item in an input array (`originalNames (Array/string)` in this case). I haven't actually created that nested workflow yet so I'm going to skip selecting that for now. +![Nested workflow placeholder](UIafeShcv.png) + +The final step of this workflow will be to replace the existing contents of `resourceNames (Array/string)` with the new name. + +I'll do that with another scriptable task element, named `Apply new names`, which takes `inputProperties (Properties)` and `newNames (Array/string)` as inputs. It will return `resourceNames (Array/string)` as a *workflow output* back to vRA. vRA will see that `resourceNames` has changed and it will update the name of the deployed resource (the VM) accordingly. + +{{% notice info "Binding a workflow output" %}} +To easily create a new workflow output and bind it to a task's output, click the task's **Add New** option like usual: +![](add_new.png) +Select **Output** at the top of the *New Variable* dialog and the complete the form with the other required details: +![](new_output_parameter.png) +{{% /notice %}} + + +![Apply new names task](h_PHeT6af.png) + +And here's the script for that task: + +```js +// JavaScript: Apply new names +// Inputs: inputProperties (Properties), newNames (Array/string) +// Outputs: resourceNames (Array/string) + +resourceNames = inputProperties.get("resourceNames"); +for (var i = 0; i < newNames.length; i++) { + System.log("Replacing resourceName '" + resourceNames[i] + "' with '" + newNames[i] + "'"); + resourceNames[i] = newNames[i]; +} +``` + +Now's a good time to save this workflow (ignoring the warning about it failing validation for now), and create a new workflow that the VM Provisioning workflow can call to actually generate unique hostnames. + +### Nested workflow +I'm a creative person so I'm going to call this workflow "Generate unique hostname". It's going to receive `requestProperties (Properties)` as its sole input, and will return `nextVmName (String)` as its sole output. +![Workflow input and output](5bWfqh4ZSE.png) + +I will also need to bind a couple of workflow variables to those configuration elements I created earlier. This is done by creating the variable as usual (`baseFormat (string)` in this case), toggling the "Bind to configuration" option, and then searching for the appropriate configuration. It's important to make sure the selected type matches that of the configuration element - otherwise it won't show up in the list. +![Binding a variable to a configuration](PubHnv_jM.png) + +I do the same for the `nameFormat (string)` variable as well. +![Configuration variables added to the workflow](7Sb3j2PS3.png) + +#### Task: create lock +Okay, on to the schema. This workflow may take a little while to execute, and it would be bad if another deployment came in while it was running - the two runs might both assign the same hostname without realizing it. Fortunately vRO has a locking system which can be used to avoid that. Accordingly, I'll add a scriptable task element to the canvas and call it `create lock`. It will have two inputs used for identifying the lock so that it can be easily removed later on, so I create a new variable `lockOwner (string)` with the value `eventBroker` and another named `lockId (string)` set to `namingLock`. +![Task: create lock](G0TEJ30003.png) + +The script is very short: + +```js +// JavaScript: create lock +// Inputs: lockOwner (String), lockId (String) +// Outputs: none + +System.debug("Creating lock...") +LockingSystem.lockAndWait(lockId, lockOwner) +``` + +#### Task: generate hostnameBase +We're getting to the meat of the operation now - another scriptable task named `generate hostnameBase` which will take the naming components from the deployment properties and stick them together in the form defined in the `nameFormat (String)` configuration. The inputs will be the existing `nameFormat (String)`, `requestProperties (Properties)`, and `baseFormat (String)` variables, and it will output new `hostnameBase (String)` ("`BOW-DAPP-WEB`") and `digitCount (Number)` ("`3`", one for each `#` in the format) variables. I'll also go ahead and initialize `hostnameSeq (Number)` to `0` to prepare for a later step. +![Task: generate hostnameBase](XATryy20y.png) + + +```js +// JavaScript: generate hostnameBase +// Inputs: nameFormat (String), requestProperties (Properties), baseFormat (String) +// Outputs: hostnameBase (String), digitCount (Number), hostnameSeq (Number) + +hostnameBase = baseFormat; +digitCount = nameFormat.match(/(#)/g).length; +hostnameSeq = 0; + +// Get request keys and drop them into the template +for each (var key in requestProperties.keys) { + var propValue = requestProperties.get(key); + hostnameBase = hostnameBase.replace("{{" + key + "}}", propValue); +} + +// Remove leading/trailing special characters from hostname base +hostnameBase = hostnameBase.toUpperCase(); +hostnameBase = hostnameBase.replace(/([-_]$)/g, ""); +hostnameBase = hostnameBase.replace(/^([-_])/g, ""); +System.debug("Hostname base: " + hostnameBase) +``` + +#### Interlude: connecting vRO to vCenter +Coming up, I'm going to want to connect to vCenter so I can find out if there are any existing VMs with a similar name. I'll use the vSphere vCenter Plug-in which is included with vRO to facilitate that, but that means I'll first need to set up that connection. So I'll save the workflow I've been working on (save early, save often) and then go run the preloaded "Add a vCenter Server instance" workflow. The first page of required inputs is pretty self-explanatory: +![Add a vCenter Server instance - vCenter properties](6Gpxapzd3.png) + +On the connection properties page, I unchecked the per-user connection in favor of using a single service account, the same one that I'm already using for vRA's connection to vCenter. +![Add a vCenter Server instance - Connection properties](RuqJhj00_.png) + +After successful completion of the workflow, I can go to Administration > Inventory and confirm that the new endpoint is there: +![vCenter plugin endpoint](rUmGPdz2I.png) + +I've only got the one vCenter in my lab. At work, I've got multiple vCenters so I would need to repeat these steps to add each of them as an endpoint. + +#### Task: prepare vCenter SDK connection +Anyway, back to my "Generate unique hostname" workflow, where I'll add another scriptable task to prepare the vCenter SDK connection. This one doesn't require any inputs, but will output an array of `VC:SdkConnection` objects: +![Task: prepare vCenter SDK connection](ByIWO66PC.png) + +```js +// JavaScript: prepare vCenter SDK connection +// Inputs: none +// Outputs: sdkConnections (Array/VC:SdkConnection) + +sdkConnections = VcPlugin.allSdkConnections +System.log("Preparing vCenter SDK connection...") +``` + +#### ForEach element: search VMs by name +Next, I'm going to drop another ForEach element onto the canvas. For each vCenter endpoint in `sdkConnections (Array/VC:SdkConnection)`, it will execute the workflow titled "Get virtual machines by name with PC". I map the required `vc` input to `*sdkConnections (VC:SdkConnection)`, `filter` to `hostnameBase (String)`, and skip `rootVmFolder` since I don't care where a VM resides. And I create a new `vmsByHost (Array/Array)` variable to hold the output. +![ForEach: search VMs by name](mnOxV2udH.png) + +#### Task: unpack results for all hosts +That `vmsByHost (Array/array)` object contains any and all VMs which match `hostnameBase (String)`, but they're broken down by the host they're running on. So I use a scriptable task to convert that array-of-arrays into a new array-of-strings containing just the VM names. +![Task: unpack results for all hosts](gIEFRnilq.png) + +```js +// JavaScript: unpack results for all hosts +// Inputs: vmsByHost (Array/Array) +// Outputs: vmNames (Array/string) + +var vms = new Array(); +vmNames = new Array(); + +for (host in vmsByHost) { + var a = vmsByHost[host] + for (vm in a) { + vms.push(a[vm]) + } +} +vmNames = vms.map(function(i) {return (i.displayName).toUpperCase()}) +``` + +#### Task: generate hostnameSeq & candidateVmName +This scriptable task will check the `computerNames` configuration element we created earlier to see if we've already named a VM starting with `hostnameBase (String)`. If such a name exists, we'll increment the number at the end by one, and return that as a new `hostnameSeq (Number)` variable; if it's the first of its kind, `hostnameSeq (Number)` will be set to `1`. And then we'll combine `hostnameBase (String)` and `hostnameSeq (Number)` to create the new `candidateVmName (String)`. If things don't work out, this script will throw `errMsg (String)` so I need to add that as an output exception binding as well. +![Task: generate hostnameSeq & candidateVmName](fWlSrD56N.png) + +```js +// JavaScript: generate hostnameSeq & candidateVmName +// Inputs: hostnameBase (String), digitCount (Number) +// Outputs: hostnameSeq (Number), computerNames (ConfigurationElement), candidateVmName (String) + +// Get computerNames configurationElement, which lives in the 'CustomProvisioning' folder +// Specify a different path if the CE lives somewhere else +var category = Server.getConfigurationElementCategoryWithPath("CustomProvisioning") +var elements = category.configurationElements +for (var i in elements) { + if (elements[i].name == "computerNames") { + computerNames = elements[i] + } +} + +// Lookup hostnameBase and increment sequence value +try { + var attribute = computerNames.getAttributeWithKey(hostnameBase); + hostnameSeq = attribute.value; + System.debug("Found " + attribute.name + " with sequence " + attribute.value) +} catch (e) { + System.debug("Hostname base " + hostnameBase + " does not exist, it will be created.") +} finally { + hostnameSeq++; + if (hostnameSeq.toString().length > digitCount) { + errMsg = 'All out of potential VM names, aborting...'; + throw(errMsg); + } + System.debug("Adding " + hostnameBase + " with sequence " + hostnameSeq) + computerNames.setAttributeWithKey(hostnameBase, hostnameSeq) +} + +// Convert number to string and add leading zeroes +var hostnameNum = hostnameSeq.toString(); +var leadingZeroes = new Array(digitCount - hostnameNum.length + 1).join("0"); +hostnameNum = leadingZeroes + hostnameNum; + +// Produce our candidate VM name +candidateVmName = hostnameBase + hostnameNum; +candidateVmName = candidateVmName.toUpperCase(); +System.log("Proposed VM name: " + candidateVmName) +``` + +#### Task: check for VM name conflicts +Now that I know what I'd like to try to name this new VM, it's time to start checking for any potential conflicts. So this task will compare my `candidateVmName (String)` against the existing `vmNames (Array/string)` to see if there are any collisions. If there's a match, it will set a new variable called `conflict (Boolean)` to `true` and also report the issue through the `errMsg (String)` output exception binding. Otherwise it will move on to the next check. +![Task: check for VM name conflicts](qmHszypww.png) + +```js +// JavaScript: check for VM name conflicts +// Inputs: candidateVmName (String), vmNames (Array/string) +// Outputs: conflict (Boolean) + +for (i in vmNames) { + if (vmNames[i] == candidateVmName) { + conflict = true; + errMsg = "Found a conflicting VM name!" + System.warn(errMsg) + throw(errMsg) + } +} +System.log("No VM name conflicts found for " + candidateVmName) +``` + +#### Conflict resolution +So what happens if there *is* a naming conflict? This solution wouldn't be very flexible if it just gave up as soon as it encountered a problem. Fortunately, I planned for this - all I need to do in the event of a conflict is to run the `generate hostnameSeq & candidateVmName` task again to increment `hostnameSeq (Number)` by one, use that to create a new `candidateVmName (String)`, and then continue on with the checks. + +So far, all of the workflow elements have been connected with happy blue lines which show the flow when everything is going according to the plan. Remember that `errMsg (String)` from the last task? When that gets thrown, the flow will switch to follow an angry dashed red line (if there is one). After dropping a new scriptable task onto the canvas, I can click on the blue line connecting it to the previous item and then click the red X to make it go away. +![So long, Blue Line!](BOIwhMxKy.png) + +I can then drag the new element away from the "everything is fine" flow, and connect it to the `check for VM name conflict` element with that angry dashed red line. Once `conflict resolution` completes (successfully), a happy blue line will direct the flow back to `generate hostnameSeq & candidateVmName` so that the sequence can be incremented and the checks performed again. And finally, a blue line will connect the `check for VM name conflict` task's successful completion to the end of the workflow: +![Error -> fix it -> try again](dhcjdDo-E.png) + +All this task really does is clear the `conflict (Boolean)` flag so that's the only output. + +```js +// JavaScript: conflict resolution +// Inputs: none +// Outputs: conflict (Boolean) + +System.log("Conflict encountered, trying a new name...") +conflict = false; +``` + +So if `check VM name conflict` encounters a collision with an existing VM name it will set `conflict (Boolean) = true;` and throw `errMsg (String)`, which will divert the flow to the `conflict resolution` task. That task will clear the `conflict (Boolean)` flag and return flow to `generate hostnameSeq & candidateVmName`, which will attempt to increment `hostnameSeq (Number)`. Not that this task doesn't have a dashed red line escape route; if it needs to throw `errMsg (String)` because of exhausting the number pool it will abort the workflow entirely. + +#### Task: return nextVmName +Assuming that everything has gone according to plan and the workflow has avoided any naming conflicts, it will need to return `nextVmName (String)` back to the `VM Provisioning` workflow. That's as simple as setting it to the last value of `candidateVmName (String)`: +![Task: return nextVmName](5QFTPHp5H.png) + +```js +// JavaScript: return nextVmName +// Inputs: candidateVmName (String) +// Outputs: nextVmName (String) + +nextVmName = candidateVmName; +System.log(" ***** Selecting [" + nextVmName + "] as the next VM name ***** ") +``` + +#### Task: remove lock +And we should also remove that lock that we created at the start of this workflow. +![Task: remove lock](BhBnBh8VB.png) + +```js +// JavaScript remove lock +// Inputs: lockId (String), lockOwner (String) +// Outputs: none + +System.debug("Releasing lock...") +LockingSystem.unlock(lockId, lockOwner) +``` + +Done! Well, mostly. Right now the workflow only actually releases the lock if it completes successfully. Which brings me to: + +#### Default error handler +I can use a default error handler to capture an abort due to running out of possible names, release the lock (with an exact copy of the `remove lock` task), and return (failed) control back to the parent workflow. +![Default error handler](afDacKjVx.png) + +Because the error handler will only fire when the workflow has failed catastrophically, I'll want to make sure the parent workflow knows about it. So I'll set the end mode to "Error, throw an exception" and bind it to that `errMsg (String)` variable to communicate the problem back to the parent. +![End Mode](R9d8edeFP.png) + +#### Finalizing the VM Provisioning workflow +When I had dropped the foreach workflow item into the VM Provisioning workflow earlier, I hadn't configured anything but the name. Now that the nested workflow is complete, I need to fill in the blanks: +![Generate unique hostname](F0IZHRj-J.png) + +So for each item in `originalNames (Array/string)`, this will run the workflow named `Generate unique hostname`. The input to the workflow will be `requestProperties (Properties)`, and the output will be `newNames (Array/string)`. + + +### Putting it all together now +Hokay, so. I've got configuration elements which hold the template for how I want servers to be named and also track which names have been used. My cloud template asks the user to input certain details which will be used to create a useful computer name. And I've added an extensibility subscription in Cloud Assembly which will call this vRealize Orchestrator workflow before the VM gets created: +![Workflow: VM Provisioning](cONrdrbb6.png) + +This workflow first logs all the properties obtained from the vRA side of things, then parses the properties to grab the necessary details. It then passes that information to a nested workflow actually generate the hostname. Once it gets a result, it updates the deployment properties with the new name so that vRA can configure the VM accordingly. + +The nested workflow is a bit more complicated: +![Workflow: Generate unique hostname](siEJSdeDE.png) + +It first creates a lock to ensure there won't be multiple instances of this workflow running simultaneously, and then processes data coming from the "parent" workflow to extract the details needed for this workflow. It smashes together the naming elements (site, environment, function, etc) to create a naming base, then connects to each defined vCenter to compile a list of any VMs with the same base. The workflow then consults a configuration element to see which (if any) similar names have already been used, and generates a suggested VM name based on that. It then consults the existing VM list to see if there might be any collisions; if so, it flags the conflict and loops back to generate a new name and try again. Once the conflicts are all cleared, the suggested VM name is made official and returned back up to the VM Provisioning workflow. + +Cool. But does it actually work? + +### Testing +Remember how I had tested my initial workflow just to see which variables got captured? Now that the workflow has a bit more content, I can just re-run it without having to actually provision a new VM. After doing so, the logging view reports that it worked! +![Sweet success!](eZ1BUfesQ.png) + +I can also revisit the `computerNames` configuration element to see the new name reflected there: +![More success](krx8rZMmh.png) + +If I run the workflow again, I should see `DRE-DTST-XXX002` assigned, and I do! +![Twice as nice](SOPs3mzTs.png) + +And, finally, I can go back to vRA and request a new VM and confirm that the name gets correctly applied to the VM. +![#winning](HXrAMJrH.png) + +It's so beautiful! + +### Wrap-up +At this point, I'm tired of typing and I'm sure you're tired of reading. In the next installment, I'll go over how I modify this workflow to also check for naming conflicts in Active Directory and DNS. That sounds like it should be pretty simple but, well, you'll see. + +See you then! \ No newline at end of file diff --git a/content/post/vra8-custom-provisioning-part-two/iqsHm5zQR.png b/content/post/vra8-custom-provisioning-part-two/iqsHm5zQR.png new file mode 100644 index 0000000..19b0b54 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/iqsHm5zQR.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/krx8rZMmh.png b/content/post/vra8-custom-provisioning-part-two/krx8rZMmh.png new file mode 100644 index 0000000..48a8fb3 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/krx8rZMmh.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/lUo1odZ03.png b/content/post/vra8-custom-provisioning-part-two/lUo1odZ03.png new file mode 100644 index 0000000..2cb8bff Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/lUo1odZ03.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/mnOxV2udH.png b/content/post/vra8-custom-provisioning-part-two/mnOxV2udH.png new file mode 100644 index 0000000..2c0975a Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/mnOxV2udH.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/new_output_parameter.png b/content/post/vra8-custom-provisioning-part-two/new_output_parameter.png new file mode 100644 index 0000000..cce1683 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/new_output_parameter.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/o8CgTjSYm.png b/content/post/vra8-custom-provisioning-part-two/o8CgTjSYm.png new file mode 100644 index 0000000..f3522fb Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/o8CgTjSYm.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/pqrvUNmsj.png b/content/post/vra8-custom-provisioning-part-two/pqrvUNmsj.png new file mode 100644 index 0000000..e52cba2 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/pqrvUNmsj.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/qmHszypww.png b/content/post/vra8-custom-provisioning-part-two/qmHszypww.png new file mode 100644 index 0000000..383efec Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/qmHszypww.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/qt-D1mJFE.png b/content/post/vra8-custom-provisioning-part-two/qt-D1mJFE.png new file mode 100644 index 0000000..201a1de Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/qt-D1mJFE.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/rUmGPdz2I.png b/content/post/vra8-custom-provisioning-part-two/rUmGPdz2I.png new file mode 100644 index 0000000..507f8cd Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/rUmGPdz2I.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/siEJSdeDE.png b/content/post/vra8-custom-provisioning-part-two/siEJSdeDE.png new file mode 100644 index 0000000..62c31a2 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/siEJSdeDE.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/tiBJVKYdf.png b/content/post/vra8-custom-provisioning-part-two/tiBJVKYdf.png new file mode 100644 index 0000000..75b8f46 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/tiBJVKYdf.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/y7JKSxsqE.png b/content/post/vra8-custom-provisioning-part-two/y7JKSxsqE.png new file mode 100644 index 0000000..f6b75ed Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/y7JKSxsqE.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/zAF26KJnO.png b/content/post/vra8-custom-provisioning-part-two/zAF26KJnO.png new file mode 100644 index 0000000..6f79617 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/zAF26KJnO.png differ diff --git a/content/post/vra8-custom-provisioning-part-two/zLec-3X_D.png b/content/post/vra8-custom-provisioning-part-two/zLec-3X_D.png new file mode 100644 index 0000000..f240cb8 Binary files /dev/null and b/content/post/vra8-custom-provisioning-part-two/zLec-3X_D.png differ