mirror of
https://github.com/jbowdre/vsphere-k8s.git
synced 2024-11-21 15:42:19 +00:00
Compare commits
6 commits
2d14363967
...
b5bbc4e7a8
Author | SHA1 | Date | |
---|---|---|---|
b5bbc4e7a8 | |||
05ab05a84a | |||
101ad08bd3 | |||
6cd9786752 | |||
3a13a5b3fa | |||
0b13e195dc |
9 changed files with 101 additions and 97 deletions
|
@ -178,7 +178,6 @@ autoinstall:
|
|||
hostname: ${ vm_guest_os_hostname }
|
||||
users:
|
||||
- name: ${ build_username }
|
||||
passwd: "${ build_password }"
|
||||
groups: [adm, cdrom, dip, plugdev, lxd, sudo]
|
||||
lock-passwd: false
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
|
@ -187,5 +186,5 @@ autoinstall:
|
|||
ssh_authorized_keys:
|
||||
%{ for ssh_key in ssh_keys ~}
|
||||
- ${ ssh_key }
|
||||
%{ endfor ~}
|
||||
%{ endfor ~}
|
||||
%{ endif ~}
|
||||
|
|
3
packer/packer_cache/ssh_private_key_packer.example.pem
Normal file
3
packer/packer_cache/ssh_private_key_packer.example.pem
Normal file
|
@ -0,0 +1,3 @@
|
|||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
|
||||
-----END OPENSSH PRIVATE KEY-----
|
|
@ -2,10 +2,10 @@
|
|||
# Prepare a VM to become a template.
|
||||
|
||||
echo '>> Clearing audit logs...'
|
||||
sudo sh -c 'if [ -f /var/log/audit/audit.log ]; then
|
||||
cat /dev/null > /var/log/audit/audit.log
|
||||
sudo sh -c 'if [ -f /var/log/audit/audit.log ]; then
|
||||
cat /dev/null > /var/log/audit/audit.log
|
||||
fi'
|
||||
sudo sh -c 'if [ -f /var/log/wtmp ]; then
|
||||
sudo sh -c 'if [ -f /var/log/wtmp ]; then
|
||||
cat /dev/null > /var/log/wtmp
|
||||
fi'
|
||||
sudo sh -c 'if [ -f /var/log/lastlog ]; then
|
||||
|
|
|
@ -1,67 +1,67 @@
|
|||
/*
|
||||
DESCRIPTION:
|
||||
DESCRIPTION :
|
||||
Ubuntu Server 20.04 LTS Kubernetes node variables used by the Packer Plugin for VMware vSphere (vsphere-iso).
|
||||
*/
|
||||
|
||||
// vSphere Credentials
|
||||
vsphere_endpoint = "vcenter.lab.local"
|
||||
vsphere_username = "packer"
|
||||
vsphere_password = "VMware1!"
|
||||
vsphere_insecure_connection = true
|
||||
vsphere_endpoint = "vcenter.lab.local"
|
||||
vsphere_username = "packer"
|
||||
vsphere_password = "VMware1!"
|
||||
vsphere_insecure_connection = true
|
||||
|
||||
// vSphere Settings
|
||||
vsphere_datacenter = "Datacenter 01"
|
||||
vsphere_cluster = "cluster-01"
|
||||
vsphere_datastore = "datastore-01"
|
||||
vsphere_network = "network-01"
|
||||
vsphere_folder = "_Templates"
|
||||
vsphere_datacenter = "Datacenter 01"
|
||||
vsphere_cluster = "cluster-01"
|
||||
vsphere_datastore = "datastore-01"
|
||||
vsphere_network = "network-01"
|
||||
vsphere_folder = "_Templates"
|
||||
|
||||
// Guest Operating System Settings
|
||||
vm_guest_os_language = "en_US"
|
||||
vm_guest_os_keyboard = "us"
|
||||
vm_guest_os_timezone = "America/Chicago"
|
||||
vm_guest_os_type = "ubuntu64Guest"
|
||||
vm_guest_os_language = "en_US"
|
||||
vm_guest_os_keyboard = "us"
|
||||
vm_guest_os_timezone = "America/Chicago"
|
||||
vm_guest_os_type = "ubuntu64Guest"
|
||||
|
||||
// Virtual Machine Hardware Settings
|
||||
vm_name = "k8s-u2004"
|
||||
vm_firmware = "efi-secure"
|
||||
vm_cdrom_type = "sata"
|
||||
vm_cpu_count = 2
|
||||
vm_cpu_cores = 1
|
||||
vm_cpu_hot_add = true
|
||||
vm_mem_size = 2048
|
||||
vm_mem_hot_add = true
|
||||
vm_disk_size = 30720
|
||||
vm_disk_controller_type = ["pvscsi"]
|
||||
vm_disk_thin_provisioned = true
|
||||
vm_network_card = "vmxnet3"
|
||||
common_vm_version = 19
|
||||
common_tools_upgrade_policy = true
|
||||
common_remove_cdrom = true
|
||||
vm_name = "k8s-u2004"
|
||||
vm_firmware = "efi-secure"
|
||||
vm_cdrom_type = "sata"
|
||||
vm_cpu_count = 2
|
||||
vm_cpu_cores = 1
|
||||
vm_cpu_hot_add = true
|
||||
vm_mem_size = 2048
|
||||
vm_mem_hot_add = true
|
||||
vm_disk_size = 30720
|
||||
vm_disk_controller_type = ["pvscsi"]
|
||||
vm_disk_thin_provisioned = true
|
||||
vm_network_card = "vmxnet3"
|
||||
common_vm_version = 19
|
||||
common_tools_upgrade_policy = true
|
||||
common_remove_cdrom = true
|
||||
|
||||
// Template and Content Library Settings
|
||||
common_template_conversion = true
|
||||
common_content_library_name = null
|
||||
common_content_library_ovf = false
|
||||
common_content_library_destroy = true
|
||||
common_content_library_skip_export = true
|
||||
common_template_conversion = true
|
||||
common_content_library_name = null
|
||||
common_content_library_ovf = false
|
||||
common_content_library_destroy = true
|
||||
common_content_library_skip_export = true
|
||||
|
||||
// OVF Export Settings
|
||||
common_ovf_export_enabled = false
|
||||
common_ovf_export_overwrite = true
|
||||
common_ovf_export_path = ""
|
||||
common_ovf_export_enabled = false
|
||||
common_ovf_export_overwrite = true
|
||||
common_ovf_export_path = ""
|
||||
|
||||
// Removable Media Settings
|
||||
common_iso_datastore = "datastore-01"
|
||||
iso_url = null
|
||||
iso_path = "_ISO"
|
||||
iso_file = "ubuntu-20.04.5-live-server-amd64.iso"
|
||||
iso_checksum_type = "sha256"
|
||||
iso_checksum_value = "5035be37a7e9abbdc09f0d257f3e33416c1a0fb322ba860d42d74aa75c3468d4"
|
||||
common_iso_datastore = "datastore-01"
|
||||
iso_url = null
|
||||
iso_path = "_ISO"
|
||||
iso_file = "ubuntu-20.04.5-live-server-amd64.iso"
|
||||
iso_checksum_type = "sha256"
|
||||
iso_checksum_value = "5035be37a7e9abbdc09f0d257f3e33416c1a0fb322ba860d42d74aa75c3468d4"
|
||||
|
||||
// Boot Settings
|
||||
vm_boot_order = "disk,cdrom"
|
||||
vm_boot_wait = "4s"
|
||||
vm_boot_order = "disk,cdrom"
|
||||
vm_boot_wait = "4s"
|
||||
vm_boot_command = [
|
||||
"<esc><wait>",
|
||||
"linux /casper/vmlinuz --- autoinstall ds=\"nocloud\"",
|
||||
|
@ -73,16 +73,15 @@ vm_boot_command = [
|
|||
]
|
||||
|
||||
// Communicator Settings
|
||||
communicator_port = 22
|
||||
communicator_timeout = "20m"
|
||||
common_ip_wait_timeout = "20m"
|
||||
common_shutdown_timeout = "15m"
|
||||
vm_shutdown_command = "sudo /usr/sbin/shutdown -P now"
|
||||
build_remove_keys = true
|
||||
build_username = "admin"
|
||||
build_password = "VMware1!"
|
||||
ssh_keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOpLvpxilPjpCahAQxs4RQgv+Lb5xObULXtwEoimEBpA builder"
|
||||
communicator_port = 22
|
||||
communicator_timeout = "20m"
|
||||
common_ip_wait_timeout = "20m"
|
||||
common_shutdown_timeout = "15m"
|
||||
build_remove_keys = true
|
||||
build_username = "admin"
|
||||
build_password = "VMware1!"
|
||||
ssh_keys = [
|
||||
"ssh-ed25519 blahblahblah builder"
|
||||
]
|
||||
|
||||
// Provisioner Settings
|
||||
|
@ -106,4 +105,4 @@ pre_final_scripts = [
|
|||
]
|
||||
|
||||
// Kubernetes Settings
|
||||
k8s_version = "1.25.3"
|
||||
k8s_version = "1.25.3"
|
|
@ -34,13 +34,13 @@ locals {
|
|||
build_tool = "HashiCorp Packer ${packer.version}"
|
||||
build_date = formatdate("YYYY-MM-DD hh:mm ZZZ", timestamp())
|
||||
build_description = "Kubernetes Ubuntu 20.04 Node template\nBuild date: ${local.build_date}\nBuild tool: ${local.build_tool}"
|
||||
shutdown_command = "sudo /usr/sbin/shutdown -P now"
|
||||
iso_paths = ["[${var.common_iso_datastore}] ${var.iso_path}/${var.iso_file}"]
|
||||
iso_checksum = "${var.iso_checksum_type}:${var.iso_checksum_value}"
|
||||
data_source_content = {
|
||||
"/meta-data" = file("data/meta-data")
|
||||
"/user-data" = templatefile("data/user-data.pkrtpl.hcl", {
|
||||
build_username = var.build_username
|
||||
build_password = bcrypt(var.build_password)
|
||||
ssh_keys = concat([local.ssh_public_key], var.ssh_keys)
|
||||
vm_guest_os_language = var.vm_guest_os_language
|
||||
vm_guest_os_keyboard = var.vm_guest_os_keyboard
|
||||
|
@ -102,7 +102,7 @@ source "vsphere-iso" "ubuntu-k8s" {
|
|||
cd_content = local.data_source_content
|
||||
cd_label = var.cd_label
|
||||
|
||||
// Boot and Provisioning Settings
|
||||
// Boot and Provisioning Settings
|
||||
boot_order = var.vm_boot_order
|
||||
boot_wait = var.vm_boot_wait
|
||||
boot_command = var.vm_boot_command
|
||||
|
@ -138,7 +138,7 @@ source "vsphere-iso" "ubuntu-k8s" {
|
|||
// OVF Export Settings
|
||||
dynamic "export" {
|
||||
for_each = var.common_ovf_export_enabled == true ? [1] : []
|
||||
content {
|
||||
content {
|
||||
name = var.vm_name
|
||||
force = var.common_ovf_export_overwrite
|
||||
options = [
|
||||
|
|
|
@ -15,7 +15,6 @@ variable "vsphere_endpoint" {
|
|||
variable "vsphere_username" {
|
||||
type = string
|
||||
description = "The username to login to the vCenter Server instance. ('packer')"
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "vsphere_password" {
|
||||
|
|
|
@ -99,7 +99,7 @@ resource "vsphere_virtual_machine" "control" {
|
|||
template_uuid = data.vsphere_virtual_machine.template.id
|
||||
customize {
|
||||
timeout = 0
|
||||
|
||||
|
||||
linux_options {
|
||||
host_name = "${var.vm-control-name}-${count.index +1}"
|
||||
domain = var.vm-domain
|
||||
|
@ -180,7 +180,7 @@ resource "vsphere_virtual_machine" "worker" {
|
|||
template_uuid = data.vsphere_virtual_machine.template.id
|
||||
customize {
|
||||
timeout = 0
|
||||
|
||||
|
||||
linux_options {
|
||||
host_name = "${var.vm-worker-name}-${count.index +1}"
|
||||
domain = var.vm-domain
|
||||
|
|
|
@ -52,7 +52,7 @@ EOF
|
|||
sudo chown "$(id -u):$(id -g)" "${HOME}"/.kube/config
|
||||
|
||||
echo ">> Applying Calico networking..."
|
||||
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/master/manifests/calico.yaml
|
||||
|
||||
echo ">> Creating discovery config..."
|
||||
kubectl -n kube-public get configmap cluster-info -o jsonpath='{.data.kubeconfig}' > discovery.yaml
|
||||
|
@ -63,7 +63,7 @@ EOF
|
|||
fi
|
||||
fi
|
||||
echo ">> Waiting up to 10 minutes for all control-plane nodes to be Ready..."
|
||||
python3 -m http.server 2>/dev/null &
|
||||
python3 -m http.server &>/dev/null &
|
||||
PROC_ID=$!
|
||||
attempts_max=60
|
||||
attempt=0
|
||||
|
@ -382,20 +382,25 @@ else
|
|||
sleep 10
|
||||
done
|
||||
echo ">> Continuing after $((attempt*10)) seconds."
|
||||
echo ">> Joining cluster..."
|
||||
echo ">> Retrieving cluster discovery config..."
|
||||
attempts_max=6
|
||||
attempt=0
|
||||
until [ -f /etc/kubernetes/discovery.yaml ]; do
|
||||
wget "http://${K8S_CONTROLPLANE_VIP}:8000/discovery.yaml" 2>/dev/null
|
||||
if ! sudo install -m 600 discovery.yaml /etc/kubernetes/discovery.yaml 2>/dev/null; then
|
||||
if [ ${attempt} -eq ${attempts_max} ]; then
|
||||
echo ">> [ERROR] Timeout waiting for discovery.yaml! <<"
|
||||
exit 1
|
||||
fi
|
||||
until [ -f ~/discovery.yaml ] || [ ${attempt} -eq ${attempts_max} ]; do
|
||||
wget "http://${K8S_CONTROLPLANE_VIP}:8000/discovery.yaml"
|
||||
sleep 2
|
||||
if ! [ -f ~/discovery.yaml ]; then
|
||||
echo ">> Unable to retrieve config..."
|
||||
attempt=$((attempt+1))
|
||||
sleep 10
|
||||
sleep 8
|
||||
fi
|
||||
done
|
||||
if ! [ -f ~/discovery.yaml ]; then
|
||||
echo ">> Timeout reached while retrieving config!"
|
||||
echo "Exiting."
|
||||
exit 1
|
||||
fi
|
||||
sudo install -o root -g root -m 600 discovery.yaml /etc/kubernetes/discovery.yaml
|
||||
echo ">> Successfully discovered cluster!"
|
||||
cat << EOF > kubeadmjoin.yaml
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
caCertPath: /etc/kubernetes/pki/ca.crt
|
||||
|
@ -411,17 +416,8 @@ nodeRegistration:
|
|||
controlPlane:
|
||||
certificateKey: ${KUBEADM_CERTKEY}
|
||||
EOF
|
||||
if sudo kubeadm join "${K8S_CONTROLPLANE_VIP}:6443" --config kubeadmjoin.yaml; then
|
||||
if [ ! -f /etc/kubernetes/manifests/kube-vip.yaml ]; then
|
||||
echo ">> Configuring kube-vip..."
|
||||
sudo ctr image pull ghcr.io/kube-vip/kube-vip:"${KUBEVIP_VER}"
|
||||
sudo ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:"${KUBEVIP_VER}" vip /kube-vip manifest pod \
|
||||
--interface ens192 \
|
||||
--vip "${K8S_CONTROLPLANE_VIP}" \
|
||||
--controlplane \
|
||||
--arp \
|
||||
--leaderElection | sudo tee /etc/kubernetes/manifests/kube-vip.yaml
|
||||
fi
|
||||
echo ">> Joining cluster..."
|
||||
if sudo kubeadm join "${K8S_CONTROLPLANE_VIP}":6443 --config kubeadmjoin.yaml; then
|
||||
echo ">> Node ${HOSTNAME} successfully initialized!"
|
||||
touch .k8s-node-success
|
||||
mkdir -p "${HOME}"/.kube
|
||||
|
|
|
@ -17,7 +17,7 @@ echo ">> Continuing after $((attempt*10)) seconds."
|
|||
echo ">> Waiting up to 10 minutes for all control-plane nodes..."
|
||||
attempts_max=60
|
||||
attempt=0
|
||||
until wget "http://${K8S_CONTROLPLANE_VIP}:8000/.k8s-controlplane-success" 2>/dev/null; do
|
||||
until curl --fail "http://${K8S_CONTROLPLANE_VIP}:8000/.k8s-controlplane-success" 2>/dev/null; do
|
||||
if [ ${attempt} -eq ${attempts_max} ]; then
|
||||
echo ">> [ERROR] Timeout waiting for control-plane nodes! <<"
|
||||
exit 1
|
||||
|
@ -26,18 +26,26 @@ until wget "http://${K8S_CONTROLPLANE_VIP}:8000/.k8s-controlplane-success" 2>/de
|
|||
sleep 10
|
||||
done
|
||||
echo ">> Continuing after $((attempt*10)) seconds."
|
||||
echo ">> Joining cluster..."
|
||||
echo ">> Retrieving cluster discovery config..."
|
||||
attempts_max=6
|
||||
attempt=0
|
||||
until [ -f /etc/kubernetes/discovery.yaml ]; do
|
||||
wget "http://${K8S_CONTROLPLANE_VIP}:8000/discovery.yaml" 2>/dev/null
|
||||
sudo install -m 600 discovery.yaml /etc/kubernetes/discovery.yaml 2>/dev/null
|
||||
if [ ! -f /etc/kubernetes/discovery.yaml ]; then
|
||||
until [ -f ~/discovery.yaml ] || [ ${attempt} -eq ${attempts_max} ]; do
|
||||
wget "http://${K8S_CONTROLPLANE_VIP}:8000/discovery.yaml"
|
||||
sleep 2
|
||||
if ! [ -f ~/discovery.yaml ]; then
|
||||
echo ">> Unable to retrieve config..."
|
||||
attempt=$((attempt+1))
|
||||
sleep 10
|
||||
sleep 8
|
||||
fi
|
||||
done
|
||||
|
||||
if ! [ -f ~/discovery.yaml ]; then
|
||||
echo ">> Timeout reached while retrieving config!"
|
||||
echo "Exiting."
|
||||
exit 1
|
||||
fi
|
||||
sudo install -o root -g root -m 600 discovery.yaml /etc/kubernetes/discovery.yaml
|
||||
echo ">> Successfully discovered cluster!"
|
||||
echo ">> Discovered cluster!"
|
||||
cat << EOF > kubeadmjoin.yaml
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
caCertPath: /etc/kubernetes/pki/ca.crt
|
||||
|
|
Loading…
Reference in a new issue