Newer
Older
locals {
nodes = concat([
openstack_compute_instance_v2.ingress,
openstack_compute_instance_v2.nfs,
], openstack_compute_instance_v2.worker[*], openstack_compute_instance_v2.gpu[*])
master_ip = replace(openstack_compute_instance_v2.master.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1")
ingress_ip = replace(openstack_compute_instance_v2.ingress.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1")
nfs_ip = replace(openstack_compute_instance_v2.nfs.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1")
worker_ips = [for s in openstack_compute_instance_v2.worker[*].network[1].fixed_ip_v6 : replace(s, "/\\[(.*)\\]/", "$1")]
gpu_ips = [for s in openstack_compute_instance_v2.gpu[*].network[1].fixed_ip_v6 : replace(s, "/\\[(.*)\\]/", "$1")]
}
# Security groups
resource "openstack_compute_secgroup_v2" "ping" {
name = "ping"
description = "ICMP for ping"
rule {
from_port = 8
to_port = 0
ip_protocol = "icmp"
cidr = "0.0.0.0/0"
}
rule {
from_port = 128
to_port = 0
ip_protocol = "ipv6-icmp"
cidr = "::/0"
# initial installation (bug in terraform): ip_protocol = "icmp"
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
}
}
resource "openstack_compute_secgroup_v2" "ssh" {
name = "ssh"
description = "ssh connection"
rule {
from_port = 22
to_port = 22
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
rule {
from_port = 22
to_port = 22
ip_protocol = "tcp"
cidr = "::/0"
}
}
resource "openstack_compute_secgroup_v2" "http" {
name = "http"
description = "http/https"
rule {
from_port = 80
to_port = 80
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
rule {
from_port = 80
to_port = 80
ip_protocol = "tcp"
cidr = "::/0"
}
rule {
from_port = 443
to_port = 443
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
rule {
from_port = 443
to_port = 443
ip_protocol = "tcp"
cidr = "::/0"
}
}
resource "openstack_networking_floatingip_v2" "public_ip" {
pool = var.ip_pool
}
data "openstack_images_image_v2" "ubuntu" {
name = "ubuntu-jammy-x86_64"
}
data "openstack_compute_flavor_v2" "master-flavor" {
}
data "openstack_compute_flavor_v2" "worker-flavor" {
name = var.worker_flavor_name
}
data "openstack_compute_flavor_v2" "gpu-flavor" {
name = var.gpu_flavor_name
}
resource "openstack_compute_instance_v2" "master" {
name = "k8s-${var.site_name}-master"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.master-flavor.id
security_groups = ["default", "all"]
user_data = file("cloud-init.yaml")
tags = ["master"]
network {
name = var.net_name
}
network {
name = var.net6_name
}
}
resource "openstack_compute_instance_v2" "nfs" {
name = "k8s-${var.site_name}-nfs"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id
security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name]
user_data = file("cloud-init.yaml")
tags = ["worker"]
network {
name = var.net_name
}
network {
name = var.net6_name
}
}
resource "openstack_compute_instance_v2" "ingress" {
name = "k8s-${var.site_name}-w-ingress"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id
security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name, openstack_compute_secgroup_v2.http.name]
user_data = file("cloud-init.yaml")
tags = ["worker"]
network {
name = var.net_name
}
network {
name = var.net6_name
}
}
resource "openstack_compute_instance_v2" "worker" {
count = var.extra_workers
name = "k8s-${var.site_name}-worker-${count.index}"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id
security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name]
user_data = file("cloud-init.yaml")
tags = ["worker"]
network {
name = var.net_name
}
network {
name = var.net6_name
}
}
resource "openstack_compute_instance_v2" "gpu" {
count = var.gpu_workers
name = "k8s-${var.site_name}-gpu-${count.index}"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.gpu-flavor.id
security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name]
user_data = file("cloud-init.yaml")
tags = ["worker"]
network {
name = var.net_name
}
network {
name = var.net6_name
}
}
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
resource "openstack_compute_floatingip_associate_v2" "fip" {
floating_ip = openstack_networking_floatingip_v2.public_ip.address
instance_id = openstack_compute_instance_v2.ingress.id
}
resource "openstack_blockstorage_volume_v3" "nfs-volume" {
name = "nfs"
size = var.nfs_volume_size
}
resource "openstack_compute_volume_attach_v2" "nfs-volume-attach" {
instance_id = openstack_compute_instance_v2.nfs.id
volume_id = openstack_blockstorage_volume_v3.nfs-volume.id
}
resource "local_file" "volume-script" {
filename = "nfs-volume.sh"
file_permission = "0755"
content = <<EOT
#! /bin/bash -xe
if ! dpkg-query -s xfsprogs >/dev/null 2>&1; then
apt-get update
apt-get install -y xfsprogs
fi
device="${openstack_compute_volume_attach_v2.nfs-volume-attach.device}"
mkfs.xfs -L NFS "$device" || true
grep -q 'LABEL=NFS' /etc/fstab || /bin/echo -e "LABEL=NFS\t/exports\txfs\tdefaults,uquota,pquota\t0\t0" | tee -a /etc/fstab
mkdir /exports 2>/dev/null || true
mount -a
EOT
}
resource "openstack_blockstorage_volume_v3" "docker-volume" {
count = var.extra_workers + var.gpu_workers + 2
name = format("docker-%s", local.nodes[count.index].name)
size = var.docker_volumes_size
}
resource "openstack_compute_volume_attach_v2" "docker-volume-attach" {
count = var.extra_workers + var.gpu_workers + 2
instance_id = local.nodes[count.index].id
volume_id = openstack_blockstorage_volume_v3.docker-volume[count.index].id
}
resource "local_file" "docker-volume-script" {
filename = "docker-volume.sh"
file_permission = "0755"
content = <<EOT
#! /bin/bash -xe
volumes="${join("\n", [for n, d in zipmap(tolist(local.nodes[*].name), tolist(openstack_compute_volume_attach_v2.docker-volume-attach[*].device)) : format("%s:%s", n, d)])}"
volume=$(echo "$volumes" | grep "$(hostname):")
device=$(echo "$volume" | cut -d: -f2)
if ! dumpe2fs -h "$device" >/dev/null 2>&1; then
mkfs.ext4 -L DOCKER "$device"
grep -q 'LABEL=DOCKER' /etc/fstab || /bin/echo -e "LABEL=DOCKER\t/var/lib/docker/overlay2\text4\tdefaults,x-systemd.before=local-fs.target\t0\t0" | tee -a /etc/fstab
mkdir -p /var/lib/docker/overlay2 2>/dev/null || true
systemctl stop docker kubelet >/dev/null 2>&1 || true
systemctl stop docker kubelet >/dev/null 2>&1 || true
umount /var/lib/docker/overlay2 2>&1 || true
mount "$device" /mnt
mv /var/lib/docker/overlay2/* /mnt >/dev/null 2>&1 || true
umount /mnt
mount -a
systemctl start docker kubelet >/dev/null 2>&1 || true
fi
EOT
}
resource "openstack_blockstorage_volume_v3" "squid-volume" {
name = "squid"
size = var.squid_volume_size
}
resource "openstack_compute_volume_attach_v2" "squid-volume-attach" {
instance_id = openstack_compute_instance_v2.ingress.id
volume_id = openstack_blockstorage_volume_v3.squid-volume.id
}
resource "local_file" "squid-volume-script" {
filename = "squid-volume.sh"
file_permission = "0755"
content = <<EOT
#! /bin/bash -xe
device="${openstack_compute_volume_attach_v2.squid-volume-attach.device}"
if ! dumpe2fs -h "$device" >/dev/null 2>&1; then
mkfs.ext4 -L SQUID "$device"
fi
grep -q 'LABEL=SQUID' /etc/fstab || /bin/echo -e "LABEL=SQUID\t/var/spool/squid\text4\tdefaults,x-systemd.before=local-fs.target\t0\t0" | tee -a /etc/fstab
mkdir /var/spool/squid 2>/dev/null || true
mount -a
EOT
}
resource "local_file" "inventory" {
filename = "inventory.yaml"
file_permission = "0644"
content = <<EOT
---
fip:
hosts:
${openstack_networking_floatingip_v2.public_ip.address}:
master:
hosts:
${local.master_ip}:
František Dvořák
committed
# must be IPv4 address or hostname
kube_server: ${openstack_compute_instance_v2.master.network[0].fixed_ip_v4}
ingress:
hosts:
${local.ingress_ip}:
nfs:
hosts:
${local.nfs_ip}:
worker:
hosts:
${join("\n ", [for s in local.worker_ips : "${s}:"])}
gpu:
hosts:
${join("\n ", [for s in local.gpu_ips : "${s}:"])}
# using public IP of kube_server for ansible delegate_to
kube_server:
hosts:
${openstack_compute_instance_v2.master.network[0].fixed_ip_v4}:
ansible_host: ${local.master_ip}
EOT
}
resource "local_file" "fip" {
filename = "fip.txt"
file_permission = "0644"
content = <<EOT
${openstack_networking_floatingip_v2.public_ip.address}
EOT
}
resource "local_file" "hosts" {
filename = "hosts.txt"
file_permission = "0644"
content = <<EOT
${local.master_ip}
${local.ingress_ip}
${local.nfs_ip}
${join("\n", concat(local.worker_ips, local.gpu_ips))}