Skip to content
Snippets Groups Projects
Commit 113d19ed authored by František Dvořák's avatar František Dvořák
Browse files

New production1 deployment @ PSNC

* Tune OpenStack parameters
* Enlarge flavours on PSNC production
* Disable scratch volumes
* Fix production FW for admin machine
* Inventory file
* Site name
parent a53286dc
No related branches found
No related tags found
No related merge requests found
Showing
with 306 additions and 52 deletions
../../../../common/playbooks/templates/etc/exports.ipv4
\ No newline at end of file
../../../../common/playbooks/templates/etc/glpi-agent
\ No newline at end of file
../../../../common/playbooks/templates/etc/mailutils.conf
\ No newline at end of file
../../../../common/playbooks/templates/etc/squid
\ No newline at end of file
../../../common/playbooks/templates/fluent-bit-secrets.yaml.j2
\ No newline at end of file
../../../common/playbooks/templates/fluent-bit.yaml.j2
\ No newline at end of file
../../common/playbooks/upgrade.yaml
\ No newline at end of file
../../common/terraform/.gitignore
\ No newline at end of file
../../common/terraform/cloud-init.yaml
\ No newline at end of file
../../common/terraform/firewall.tf
\ No newline at end of file
../../common/terraform/network.tf
\ No newline at end of file
# These need to be defined for things to work
ip_pool = "PSNC-EXT-PUB1-EDU"
net_name = ""
net6_name = ""
site_name = "production1"
# These may need some adjustment for your provider
master_flavor_name = "M1-NET-2vCPU-8R"
worker_flavor_name = "M1-NVME-32vCPU-128R-300D"
# XXX: replace for the GPU flavor
gpu_flavor_name = "M1-NVME-32vCPU-128R-300D"
# Number of extra workers
extra_workers = 3
# Number of GPU workers
gpu_workers = 0
# volumes for docker
docker_volumes_size = 384
# NFS volume
nfs_volume_size = 256
# scratch volume
scratch_volumes_size = 0
# squid volume
squid_volume_size = 128
# global firewall rules - public and admin access
security_public_cidr = {
"147.228.0.0/16": "University of West Bohemia in Pilsen",
"2001:718:1801::/48": "University of West Bohemia in Pilsen",
"78.128.246.160/32": "CESNET VPN",
"78.128.247.175/32": "CESNET VPN",
"2001:718:ff05:acb::/64": "CESNET VPN",
"2001:718:ff05:acc::/64": "CESNET VPN",
"147.251.21.79/32": "admin machine - OpenStack project router",
"2001:718:801:432:f816:3eff:feab:fbc8/128": "admin machine",
"145.90.225.224/27": "EGI VPN",
"2001:610:450:80::/64": "EGI VPN",
"78.128.247.55/32": "CESNET monitoring",
"2001:718:ff05:205::55/128": "CESNET monitoring",
}
../../common/terraform/vars.tf
\ No newline at end of file
../../common/terraform/versions.tf
\ No newline at end of file
locals {
nodes = concat([
openstack_compute_instance_v2.ingress,
openstack_compute_instance_v2.nfs,
], openstack_compute_instance_v2.worker[*], openstack_compute_instance_v2.gpu[*])
master_ip = openstack_compute_instance_v2.master.network[0].fixed_ip_v4
ingress_ip = openstack_compute_instance_v2.ingress.network[0].fixed_ip_v4
nfs_ip = openstack_compute_instance_v2.nfs.network[0].fixed_ip_v4
worker_ips = [for s in openstack_compute_instance_v2.worker[*].network[0].fixed_ip_v4 : s]
gpu_ips = [for s in openstack_compute_instance_v2.gpu[*].network[0].fixed_ip_v4 : s]
}
resource "openstack_networking_floatingip_v2" "public_ip" {
pool = var.ip_pool
}
data "openstack_images_image_v2" "ubuntu" {
name = "ubuntu-22.04-x86_64-server-cloudimg-20240429"
}
data "openstack_compute_flavor_v2" "master-flavor" {
name = var.master_flavor_name
}
data "openstack_compute_flavor_v2" "worker-flavor" {
name = var.worker_flavor_name
}
data "openstack_compute_flavor_v2" "gpu-flavor" {
name = var.gpu_flavor_name
}
resource "openstack_compute_instance_v2" "master" {
name = "k8s-${var.site_name}-master"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.master-flavor.id
security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name]
user_data = file("cloud-init.yaml")
tags = ["master"]
network {
uuid = openstack_networking_network_v2.local-network.id
}
}
resource "openstack_compute_instance_v2" "nfs" {
name = "k8s-${var.site_name}-nfs"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id
security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name]
user_data = file("cloud-init.yaml")
tags = ["worker"]
network {
uuid = openstack_networking_network_v2.local-network.id
}
}
resource "openstack_compute_instance_v2" "ingress" {
name = "k8s-${var.site_name}-w-ingress"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id
security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name, openstack_networking_secgroup_v2.http.name]
user_data = file("cloud-init.yaml")
tags = ["worker"]
network {
uuid = openstack_networking_network_v2.local-network.id
}
}
resource "openstack_compute_instance_v2" "worker" {
count = var.extra_workers
name = "k8s-${var.site_name}-worker-${count.index}"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.worker-flavor.id
security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name]
user_data = file("cloud-init.yaml")
tags = ["worker"]
network {
uuid = openstack_networking_network_v2.local-network.id
}
}
resource "openstack_compute_instance_v2" "gpu" {
count = var.gpu_workers
name = "k8s-${var.site_name}-gpu-${count.index}"
image_id = data.openstack_images_image_v2.ubuntu.id
flavor_id = data.openstack_compute_flavor_v2.gpu-flavor.id
security_groups = ["default", openstack_networking_secgroup_v2.ping.name, openstack_networking_secgroup_v2.ssh.name]
user_data = file("cloud-init.yaml")
tags = ["worker"]
network {
uuid = openstack_networking_network_v2.local-network.id
}
}
resource "openstack_compute_floatingip_associate_v2" "fip" {
floating_ip = openstack_networking_floatingip_v2.public_ip.address
instance_id = openstack_compute_instance_v2.ingress.id
}
resource "openstack_blockstorage_volume_v3" "nfs-volume" {
name = "nfs"
size = var.nfs_volume_size
}
resource "openstack_compute_volume_attach_v2" "nfs-volume-attach" {
instance_id = openstack_compute_instance_v2.nfs.id
volume_id = openstack_blockstorage_volume_v3.nfs-volume.id
}
resource "local_file" "volume-script" {
filename = "nfs-volume.sh"
file_permission = "0755"
content = <<EOT
#! /bin/bash -xe
if ! dpkg-query -s xfsprogs >/dev/null 2>&1; then
apt-get update
apt-get install -y xfsprogs
fi
device="${openstack_compute_volume_attach_v2.nfs-volume-attach.device}"
mkfs.xfs -L NFS "$device" || true
grep -q 'LABEL=NFS' /etc/fstab || /bin/echo -e "LABEL=NFS\t/exports\txfs\tdefaults,uquota,pquota\t0\t0" | tee -a /etc/fstab
mkdir /exports 2>/dev/null || true
mount -a
EOT
}
resource "openstack_blockstorage_volume_v3" "docker-volume" {
count = var.extra_workers + var.gpu_workers + 2
name = format("docker-%s", local.nodes[count.index].name)
size = var.docker_volumes_size
}
resource "openstack_compute_volume_attach_v2" "docker-volume-attach" {
count = var.extra_workers + var.gpu_workers + 2
instance_id = local.nodes[count.index].id
volume_id = openstack_blockstorage_volume_v3.docker-volume[count.index].id
}
resource "local_file" "docker-volume-script" {
filename = "docker-volume.sh"
file_permission = "0755"
content = <<EOT
#! /bin/bash -xe
volumes="${join("\n", [for n, d in zipmap(tolist(local.nodes[*].name), tolist(openstack_compute_volume_attach_v2.docker-volume-attach[*].device)) : format("%s:%s", n, d)])}"
volume=$(echo "$volumes" | grep "$(hostname):")
device=$(echo "$volume" | cut -d: -f2)
if ! dumpe2fs -h "$device" >/dev/null 2>&1; then
mkfs.ext4 -L DOCKER "$device"
grep -q 'LABEL=DOCKER' /etc/fstab || /bin/echo -e "LABEL=DOCKER\t/var/lib/docker/overlay2\text4\tdefaults,x-systemd.before=local-fs.target\t0\t0" | tee -a /etc/fstab
mkdir -p /var/lib/docker/overlay2 2>/dev/null || true
systemctl stop docker kubelet >/dev/null 2>&1 || true
sleep 10
systemctl stop docker kubelet >/dev/null 2>&1 || true
umount /var/lib/docker/overlay2 2>&1 || true
mount "$device" /mnt
mv /var/lib/docker/overlay2/* /mnt >/dev/null 2>&1 || true
umount /mnt
mount -a
systemctl start docker kubelet >/dev/null 2>&1 || true
fi
EOT
}
resource "openstack_blockstorage_volume_v3" "squid-volume" {
name = "squid"
size = var.squid_volume_size
}
resource "openstack_compute_volume_attach_v2" "squid-volume-attach" {
instance_id = openstack_compute_instance_v2.ingress.id
volume_id = openstack_blockstorage_volume_v3.squid-volume.id
}
resource "local_file" "squid-volume-script" {
filename = "squid-volume.sh"
file_permission = "0755"
content = <<EOT
#! /bin/bash -xe
device="${openstack_compute_volume_attach_v2.squid-volume-attach.device}"
if ! dumpe2fs -h "$device" >/dev/null 2>&1; then
mkfs.ext4 -L SQUID "$device"
fi
grep -q 'LABEL=SQUID' /etc/fstab || /bin/echo -e "LABEL=SQUID\t/var/spool/squid\text4\tdefaults,x-systemd.before=local-fs.target\t0\t0" | tee -a /etc/fstab
mkdir /var/spool/squid 2>/dev/null || true
mount -a
EOT
}
resource "local_file" "inventory" {
filename = "inventory.yaml"
file_permission = "0644"
content = <<EOT
---
fip:
hosts:
${openstack_networking_floatingip_v2.public_ip.address}:
master:
hosts:
${local.master_ip}:
# must be IPv4 address or hostname
kube_server: ${openstack_compute_instance_v2.master.network[0].fixed_ip_v4}
ingress:
hosts:
${local.ingress_ip}:
nfs:
hosts:
${local.nfs_ip}:
worker:
hosts:
${join("\n ", [for s in local.worker_ips: "${s}:"])}
gpu:
hosts:
${join("\n ", [for s in local.gpu_ips : "${s}:"])}
# using public IP of kube_server for ansible delegate_to
kube_server:
hosts:
${openstack_compute_instance_v2.master.network[0].fixed_ip_v4}:
ansible_host: ${local.master_ip}
EOT
}
resource "local_file" "fip" {
filename = "fip.txt"
file_permission = "0644"
content = <<EOT
${openstack_networking_floatingip_v2.public_ip.address}
EOT
}
resource "local_file" "hosts" {
filename = "hosts.txt"
file_permission = "0644"
content = <<EOT
${local.master_ip}
${local.ingress_ip}
${local.nfs_ip}
${join("\n", concat(local.worker_ips, local.gpu_ips))}
EOT
}
...@@ -39,9 +39,8 @@ security_public_cidr = { ...@@ -39,9 +39,8 @@ security_public_cidr = {
"78.128.247.175/32": "", "78.128.247.175/32": "",
"2001:718:ff05:acb::/64": "", "2001:718:ff05:acb::/64": "",
"2001:718:ff05:acc::/64": "", "2001:718:ff05:acc::/64": "",
"147.251.21.79/32": "",
# admin machine (OpenStack project router) # admin machine (OpenStack project router)
"147.228.21.79/32": "", "147.251.21.79/32": "",
# admin machine # admin machine
"2001:718:801:432:f816:3eff:feab:fbc8/128": "", "2001:718:801:432:f816:3eff:feab:fbc8/128": "",
# EGI VPN # EGI VPN
......
data "openstack_networking_network_v2" "public-network" {
name = var.ip_pool
}
resource "openstack_networking_network_v2" "local-network" {
name = "local-network"
admin_state_up = "true"
}
resource "openstack_networking_subnet_v2" "local-network-subnet" {
name = "local-network-subnet"
network_id = openstack_networking_network_v2.local-network.id
cidr = "192.168.0.0/24"
}
resource "openstack_networking_router_v2" "local-router" {
name = "local-router"
admin_state_up = true
external_network_id = data.openstack_networking_network_v2.public-network.id
}
resource "openstack_networking_router_interface_v2" "router_interface_1" {
router_id = openstack_networking_router_v2.local-router.id
subnet_id = openstack_networking_subnet_v2.local-network-subnet.id
}
../../common/terraform/network.tf
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment