locals { nodes = concat([ openstack_compute_instance_v2.ingress, openstack_compute_instance_v2.nfs, ], openstack_compute_instance_v2.worker[*], openstack_compute_instance_v2.gpu[*]) master_ip = replace(openstack_compute_instance_v2.master.network[0].fixed_ip_v6, "/\\[(.*)\\]/", "$1") ingress_ip = replace(openstack_compute_instance_v2.ingress.network[0].fixed_ip_v6, "/\\[(.*)\\]/", "$1") nfs_ip = replace(openstack_compute_instance_v2.nfs.network[0].fixed_ip_v6, "/\\[(.*)\\]/", "$1") worker_ips = [for s in openstack_compute_instance_v2.worker[*].network[0].fixed_ip_v6 : replace(s, "/\\[(.*)\\]/", "$1")] gpu_ips = [for s in openstack_compute_instance_v2.gpu[*].network[0].fixed_ip_v6 : replace(s, "/\\[(.*)\\]/", "$1")] } data "openstack_images_image_v2" "ubuntu" { name = "ubuntu-22.04" } data "openstack_compute_flavor_v2" "master-flavor" { name = var.master_flavor_name } data "openstack_compute_flavor_v2" "worker-flavor" { name = var.worker_flavor_name } data "openstack_compute_flavor_v2" "gpu-flavor" { name = var.gpu_flavor_name } resource "openstack_compute_instance_v2" "master" { name = "k8s-${var.site_name}-master" image_id = data.openstack_images_image_v2.ubuntu.id flavor_id = data.openstack_compute_flavor_v2.master-flavor.id security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name] user_data = file("cloud-init.yaml") tags = ["master"] network { name = var.net_name } } resource "openstack_compute_instance_v2" "nfs" { name = "k8s-${var.site_name}-nfs" image_id = data.openstack_images_image_v2.ubuntu.id flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name] user_data = file("cloud-init.yaml") tags = ["worker"] network { name = var.net_name } } resource "openstack_compute_instance_v2" "ingress" { name = "k8s-${var.site_name}-w-ingress" image_id = data.openstack_images_image_v2.ubuntu.id flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name, openstack_networking_secgroup_v2.http.name] user_data = file("cloud-init.yaml") tags = ["worker"] network { name = var.net_name } } resource "openstack_compute_instance_v2" "worker" { count = var.extra_workers name = "k8s-${var.site_name}-worker-${count.index}" image_id = data.openstack_images_image_v2.ubuntu.id flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name] user_data = file("cloud-init.yaml") tags = ["worker"] network { name = var.net_name } } resource "openstack_compute_instance_v2" "gpu" { count = var.gpu_workers name = "k8s-${var.site_name}-gpu-${count.index}" image_id = data.openstack_images_image_v2.ubuntu.id flavor_id = data.openstack_compute_flavor_v2.gpu-flavor.id security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name] user_data = file("cloud-init.yaml") tags = ["worker"] network { name = var.net_name } } resource "openstack_blockstorage_volume_v3" "nfs-volume" { name = "nfs" size = var.nfs_volume_size volume_type = "large" } resource "openstack_compute_volume_attach_v2" "nfs-volume-attach" { instance_id = openstack_compute_instance_v2.nfs.id volume_id = openstack_blockstorage_volume_v3.nfs-volume.id } resource "local_file" "volume-script" { filename = "nfs-volume.sh" file_permission = "0755" content = <<EOT #! /bin/bash -xe if ! dpkg-query -s xfsprogs >/dev/null 2>&1; then apt-get update apt-get install -y xfsprogs fi device="${openstack_compute_volume_attach_v2.nfs-volume-attach.device}" mkfs.xfs -L NFS "$device" || true grep -q 'LABEL=NFS' /etc/fstab || /bin/echo -e "LABEL=NFS\t/exports\txfs\tdefaults,uquota,pquota\t0\t0" | tee -a /etc/fstab mkdir /exports 2>/dev/null || true mount -a EOT } resource "openstack_blockstorage_volume_v3" "docker-volume" { count = var.extra_workers + var.gpu_workers + 2 name = format("docker-%s", local.nodes[count.index].name) size = var.docker_volumes_size } resource "openstack_compute_volume_attach_v2" "docker-volume-attach" { count = var.extra_workers + var.gpu_workers + 2 instance_id = local.nodes[count.index].id volume_id = openstack_blockstorage_volume_v3.docker-volume[count.index].id } resource "local_file" "docker-volume-script" { filename = "docker-volume.sh" file_permission = "0755" content = <<EOT #! /bin/bash -xe volumes="${join("\n", [for n, d in zipmap(tolist(local.nodes[*].name), tolist(openstack_compute_volume_attach_v2.docker-volume-attach[*].device)) : format("%s:%s", n, d)])}" volume=$(echo "$volumes" | grep "$(hostname):") device=$(echo "$volume" | cut -d: -f2) if ! dumpe2fs -h "$device" >/dev/null 2>&1; then mkfs.ext4 -L DOCKER "$device" grep -q 'LABEL=DOCKER' /etc/fstab || /bin/echo -e "LABEL=DOCKER\t/var/lib/docker/overlay2\text4\tdefaults,x-systemd.before=local-fs.target\t0\t0" | tee -a /etc/fstab mkdir -p /var/lib/docker/overlay2 2>/dev/null || true systemctl stop docker kubelet >/dev/null 2>&1 || true sleep 10 systemctl stop docker kubelet >/dev/null 2>&1 || true umount /var/lib/docker/overlay2 2>&1 || true mount "$device" /mnt mv /var/lib/docker/overlay2/* /mnt >/dev/null 2>&1 || true umount /mnt mount -a systemctl start docker kubelet >/dev/null 2>&1 || true fi EOT } resource "openstack_blockstorage_volume_v3" "squid-volume" { name = "squid" size = var.squid_volume_size volume_type = "fast" } resource "openstack_compute_volume_attach_v2" "squid-volume-attach" { instance_id = openstack_compute_instance_v2.ingress.id volume_id = openstack_blockstorage_volume_v3.squid-volume.id } resource "local_file" "squid-volume-script" { filename = "squid-volume.sh" file_permission = "0755" content = <<EOT #! /bin/bash -xe device="${openstack_compute_volume_attach_v2.squid-volume-attach.device}" if ! dumpe2fs -h "$device" >/dev/null 2>&1; then mkfs.ext4 -L SQUID "$device" fi grep -q 'LABEL=SQUID' /etc/fstab || /bin/echo -e "LABEL=SQUID\t/var/spool/squid\text4\tdefaults,x-systemd.before=local-fs.target\t0\t0" | tee -a /etc/fstab mkdir /var/spool/squid 2>/dev/null || true mount -a EOT } resource "local_file" "inventory" { filename = "inventory.yaml" file_permission = "0644" content = <<EOT --- fip: hosts: ${openstack_compute_instance_v2.ingress.network[0].fixed_ip_v4} master: hosts: ${local.master_ip}: # must be IPv4 address or hostname kube_server: ${openstack_compute_instance_v2.master.network[0].fixed_ip_v4} ingress: hosts: ${local.ingress_ip}: nfs: hosts: ${local.nfs_ip}: worker: hosts: ${join("\n ", [for s in local.worker_ips : "${s}:"])} gpu: hosts: ${join("\n ", [for s in local.gpu_ips : "${s}:"])} # using public IP of kube_server for ansible delegate_to kube_server: hosts: ${openstack_compute_instance_v2.master.network[0].fixed_ip_v4}: ansible_host: ${local.master_ip} EOT } resource "local_file" "fip" { filename = "fip.txt" file_permission = "0644" content = <<EOT ${openstack_compute_instance_v2.ingress.network[0].fixed_ip_v4} EOT } resource "local_file" "hosts" { filename = "hosts.txt" file_permission = "0644" content = <<EOT ${local.master_ip} ${local.ingress_ip} ${local.nfs_ip} ${join("\n", concat(local.worker_ips, local.gpu_ips))} EOT }