provider "openstack" { } locals { nodes = concat([ openstack_compute_instance_v2.ingress, openstack_compute_instance_v2.nfs, ], openstack_compute_instance_v2.worker[*]) master_ip = replace(openstack_compute_instance_v2.master.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1") ingress_ip = replace(openstack_compute_instance_v2.ingress.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1") nfs_ip = replace(openstack_compute_instance_v2.nfs.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1") worker_ips = [for s in openstack_compute_instance_v2.worker[*].network[1].fixed_ip_v6 : replace(s, "/\\[(.*)\\]/", "$1")] } # Security groups resource "openstack_compute_secgroup_v2" "ping" { name = "ping" description = "ICMP for ping" rule { from_port = 8 to_port = 0 ip_protocol = "icmp" cidr = "0.0.0.0/0" } rule { from_port = 128 to_port = 0 # initial installation (bug in terraform): ip_protocol = "icmp" ip_protocol = "ipv6-icmp" cidr = "::/0" } } resource "openstack_compute_secgroup_v2" "ssh" { name = "ssh" description = "ssh connection" rule { from_port = 22 to_port = 22 ip_protocol = "tcp" cidr = "0.0.0.0/0" } rule { from_port = 22 to_port = 22 ip_protocol = "tcp" cidr = "::/0" } } resource "openstack_compute_secgroup_v2" "http" { name = "http" description = "http/https" rule { from_port = 80 to_port = 80 ip_protocol = "tcp" cidr = "0.0.0.0/0" } rule { from_port = 80 to_port = 80 ip_protocol = "tcp" cidr = "::/0" } rule { from_port = 443 to_port = 443 ip_protocol = "tcp" cidr = "0.0.0.0/0" } rule { from_port = 443 to_port = 443 ip_protocol = "tcp" cidr = "::/0" } } resource "openstack_networking_floatingip_v2" "public_ip" { pool = var.ip_pool } data "openstack_images_image_v2" "ubuntu" { name = "ubuntu-jammy-x86_64" } data "openstack_compute_flavor_v2" "master-flavor" { vcpus = var.master_cpus ram = var.master_ram } data "openstack_compute_flavor_v2" "worker-flavor" { vcpus = var.worker_cpus ram = var.worker_ram } resource "openstack_compute_instance_v2" "master" { name = "k8s-${var.site_name}-master" image_id = data.openstack_images_image_v2.ubuntu.id # 4 cores 4 GB RAM flavor_id = data.openstack_compute_flavor_v2.master-flavor.id security_groups = ["default", "all"] user_data = file("cloud-init.yaml") tags = ["master"] network { name = var.net_name } network { name = var.net6_name } } resource "openstack_compute_instance_v2" "nfs" { name = "k8s-${var.site_name}-nfs" image_id = data.openstack_images_image_v2.ubuntu.id flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name] user_data = file("cloud-init.yaml") tags = ["worker"] network { name = var.net_name } network { name = var.net6_name } } resource "openstack_compute_instance_v2" "ingress" { name = "k8s-${var.site_name}-w-ingress" image_id = data.openstack_images_image_v2.ubuntu.id flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name, openstack_compute_secgroup_v2.http.name] user_data = file("cloud-init.yaml") tags = ["worker"] network { name = var.net_name } network { name = var.net6_name } } resource "openstack_compute_instance_v2" "worker" { count = var.extra_workers name = "k8s-${var.site_name}-worker-${count.index}" image_id = data.openstack_images_image_v2.ubuntu.id flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name] user_data = file("cloud-init.yaml") tags = ["worker"] network { name = var.net_name } network { name = var.net6_name } } resource "openstack_compute_floatingip_associate_v2" "fip" { floating_ip = openstack_networking_floatingip_v2.public_ip.address instance_id = openstack_compute_instance_v2.ingress.id } resource "openstack_blockstorage_volume_v3" "nfs-volume" { name = "nfs" size = var.nfs_volume_size } resource "openstack_compute_volume_attach_v2" "nfs-volume-attach" { instance_id = openstack_compute_instance_v2.nfs.id volume_id = openstack_blockstorage_volume_v3.nfs-volume.id } resource "local_file" "volume-script" { filename = "nfs-volume.sh" file_permission = "0755" content = <<EOT #! /bin/bash -xe if ! dpkg-query -s xfsprogs >/dev/null 2>&1; then apt-get update apt-get install -y xfsprogs fi device="${openstack_compute_volume_attach_v2.nfs-volume-attach.device}" mkfs.xfs -L NFS "$device" || true grep -q 'LABEL=NFS' /etc/fstab || /bin/echo -e "LABEL=NFS\t/exports\txfs\tdefaults,uquota,pquota\t0\t0" | tee -a /etc/fstab mkdir /exports 2>/dev/null || true mount -a EOT } resource "openstack_blockstorage_volume_v3" "docker-volume" { count = var.extra_workers + 2 name = format("docker-%s", local.nodes[count.index].name) size = var.docker_volumes_size } resource "openstack_compute_volume_attach_v2" "docker-volume-attach" { count = var.extra_workers + 2 instance_id = local.nodes[count.index].id volume_id = openstack_blockstorage_volume_v3.docker-volume[count.index].id } resource "local_file" "docker-volume-script" { filename = "docker-volume.sh" file_permission = "0755" content = <<EOT #! /bin/bash -xe volumes="${join("\n", [for n, d in zipmap(tolist(local.nodes[*].name), tolist(openstack_compute_volume_attach_v2.docker-volume-attach[*].device)) : format("%s:%s", n, d)])}" volume=$(echo "$volumes" | grep "$(hostname):") device=$(echo "$volume" | cut -d: -f2) if ! dumpe2fs -h "$device" >/dev/null 2>&1; then mkfs.ext4 -L DOCKER "$device" grep -q 'LABEL=DOCKER' /etc/fstab || /bin/echo -e "LABEL=DOCKER\t/var/lib/docker/overlay2\text4\tdefaults\t0\t0" | tee -a /etc/fstab mkdir -p /var/lib/docker/overlay2 2>/dev/null || true service docker stop >/dev/null 2>&1 || true sleep 10 mount "$device" /mnt mv /var/lib/docker/overlay2/* /mnt >/dev/null 2>&1 || true umount /mnt mount -a fi EOT } resource "openstack_blockstorage_volume_v3" "squid-volume" { name = "squid" size = var.squid_volume_size } resource "openstack_compute_volume_attach_v2" "squid-volume-attach" { instance_id = openstack_compute_instance_v2.ingress.id volume_id = openstack_blockstorage_volume_v3.squid-volume.id } resource "local_file" "squid-volume-script" { filename = "squid-volume.sh" file_permission = "0755" content = <<EOT #! /bin/bash -xe device="${openstack_compute_volume_attach_v2.squid-volume-attach.device}" if ! dumpe2fs -h "$device" >/dev/null 2>&1; then mkfs.ext4 -L SQUID "$device" fi grep -q 'LABEL=SQUID' /etc/fstab || /bin/echo -e "LABEL=SQUID\t/var/spool/squid\text4\tdefaults\t0\t0" | tee -a /etc/fstab mkdir /var/spool/squid 2>/dev/null || true mount -a EOT } resource "local_file" "inventory" { filename = "inventory.yaml" file_permission = "0644" content = <<EOT --- fip: hosts: ${openstack_networking_floatingip_v2.public_ip.address}: master: hosts: ${local.master_ip}: # must be IPv4 address or hostname kube_server: ${openstack_compute_instance_v2.master.network[0].fixed_ip_v4} ingress: hosts: ${local.ingress_ip}: nfs: hosts: ${local.nfs_ip}: worker: hosts: ${join(":\n ", local.worker_ips)}: # using public IP of kube_server for ansible delegate_to kube_server: hosts: ${openstack_compute_instance_v2.master.network[0].fixed_ip_v4}: ansible_host: ${local.master_ip} EOT } resource "local_file" "fip" { filename = "fip.txt" file_permission = "0644" content = <<EOT ${openstack_networking_floatingip_v2.public_ip.address} EOT } resource "local_file" "hosts" { filename = "hosts.txt" file_permission = "0644" content = <<EOT ${local.master_ip} ${local.ingress_ip} ${local.nfs_ip} ${join("\n", local.worker_ips)} EOT }