From a306145e7e62487340dc532cddd1036e3e614b43 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franti=C5=A1ek=20Dvo=C5=99=C3=A1k?= <valtri@civ.zcu.cz>
Date: Fri, 30 Aug 2024 20:34:43 +0000
Subject: [PATCH] Kubernetes upgrade 1.30.2 -> 1.31.0

Components upgraded:

* k8s
* calico
* helm
* ingress
* cert-manager
* prometheus
* grafana
---
 common/playbooks/files/calico.yaml | 13 ++++++++-----
 common/playbooks/k8s.yaml          | 20 ++++++++++++--------
 common/playbooks/upgrade.yaml      |  9 +++++----
 3 files changed, 25 insertions(+), 17 deletions(-)

diff --git a/common/playbooks/files/calico.yaml b/common/playbooks/files/calico.yaml
index 7f4cb47..f7f2980 100644
--- a/common/playbooks/files/calico.yaml
+++ b/common/playbooks/files/calico.yaml
@@ -4796,7 +4796,7 @@ spec:
         # It can be deleted if this is a fresh installation, or if you have already
         # upgraded to use calico-ipam.
         - name: upgrade-ipam
-          image: docker.io/calico/cni:v3.28.0
+          image: docker.io/calico/cni:v3.28.1
           imagePullPolicy: IfNotPresent
           command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
           envFrom:
@@ -4824,7 +4824,7 @@ spec:
         # This container installs the CNI binaries
         # and CNI network config file on each node.
         - name: install-cni
-          image: docker.io/calico/cni:v3.28.0
+          image: docker.io/calico/cni:v3.28.1
           imagePullPolicy: IfNotPresent
           command: ["/opt/cni/bin/install"]
           envFrom:
@@ -4867,7 +4867,7 @@ spec:
         # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
         # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
         - name: "mount-bpffs"
-          image: docker.io/calico/node:v3.28.0
+          image: docker.io/calico/node:v3.28.1
           imagePullPolicy: IfNotPresent
           command: ["calico-node", "-init", "-best-effort"]
           volumeMounts:
@@ -4893,7 +4893,7 @@ spec:
         # container programs network policy and routes on each
         # host.
         - name: calico-node
-          image: docker.io/calico/node:v3.28.0
+          image: docker.io/calico/node:v3.28.1
           imagePullPolicy: IfNotPresent
           envFrom:
           - configMapRef:
@@ -5030,9 +5030,11 @@ spec:
         - name: var-run-calico
           hostPath:
             path: /var/run/calico
+            type: DirectoryOrCreate
         - name: var-lib-calico
           hostPath:
             path: /var/lib/calico
+            type: DirectoryOrCreate
         - name: xtables-lock
           hostPath:
             path: /run/xtables.lock
@@ -5053,6 +5055,7 @@ spec:
         - name: cni-bin-dir
           hostPath:
             path: /opt/cni/bin
+            type: DirectoryOrCreate
         - name: cni-net-dir
           hostPath:
             path: /etc/cni/net.d
@@ -5110,7 +5113,7 @@ spec:
       priorityClassName: system-cluster-critical
       containers:
         - name: calico-kube-controllers
-          image: docker.io/calico/kube-controllers:v3.28.0
+          image: docker.io/calico/kube-controllers:v3.28.1
           imagePullPolicy: IfNotPresent
           env:
             # Choose which controllers to run.
diff --git a/common/playbooks/k8s.yaml b/common/playbooks/k8s.yaml
index ef46de4..31d9f19 100644
--- a/common/playbooks/k8s.yaml
+++ b/common/playbooks/k8s.yaml
@@ -155,10 +155,10 @@
         # kube_nvidia_device_plugin_version: "v0.12.2"
         # kube_nvidia_driver_version: "515" # "525"
         kube_nvidia_support: true
-        kube_version: 1.30.2
+        kube_version: 1.31.0
         kube_network: 'none'  # custom network installation
         kube_install_helm: true
-        kube_install_helm_version: 'v3.15.2'
+        kube_install_helm_version: 'v3.15.4'
         kube_install_metrics: true
   tasks:
     - name: Create kubectl config dir
@@ -183,11 +183,11 @@
 - name: K8s network deployment
   hosts: master
   vars:
-    calicoctl_version: 3.28.0
+    calicoctl_version: 3.28.1
   tasks:
     - name: Calico config
       copy:
-        # https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/calico.yaml
+        # https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml
         src: files/calico.yaml
         dest: /tmp/calico-net.yaml
         mode: 0644
@@ -218,7 +218,7 @@
         # must be IPv4 address or hostname
         kube_server: "{{ hostvars[groups['master'][0]].kube_server | default(groups['master'][0]) }}"
         kube_type_of_node: wn
-        kube_version: 1.30.2
+        kube_version: 1.31.0
         kubelet_extra_args: '--volume-stats-agg-period 0'
   tasks:
     - name: Overlay2 mountpoint workaround to docker.service unit
@@ -348,11 +348,13 @@
       when: "'ingress-nginx' not in ansible_local.helm_repos | map(attribute='name') | list"
     - name: Ingress
       vars:
+        version: 4.11.2 # app 1.11.2
         config: >-
           --set controller.service.type=NodePort
           --set controller.service.externalIPs={{ '{' + hostvars[groups['ingress'][0]].ansible_default_ipv4.address + '}' }}
           --set controller.config.proxy-body-size=0
           --set controller.allowSnippetAnnotations=false
+          --version={{version}}
       shell: |-
         helm status --namespace kube-system cluster-ingress
         if [ $? -ne 0 ]; then
@@ -366,7 +368,7 @@
       when: true
     - name: Cert-manager
       vars:
-        version: 1.15.0
+        version: 1.15.3
         config: >-
           --version={{ version }}
           --set ingressShim.defaultIssuerName=letsencrypt-prod
@@ -477,8 +479,9 @@
                         description: "Disk is almost full {{ '{{ $value | printf \"%.2f\" }}' }}% (limit {{ limit_disk_warn }}%)"
     - name: Prometheus
       vars:
+        version: 25.27.0 # app v2.54.1
         config: >-
-          --version=25.21.0
+          --version={{ version }}
           -f /tmp/prometheus.yaml
       shell: |-
         helm status --namespace prometheus prometheus
@@ -525,8 +528,9 @@
               enabled: true
     - name: Grafana
       vars:
+        version: 8.5.0 # app 11.1.5
         config: >-
-          --version=8.0.2
+          --version={{ version }}
           -f /tmp/grafana.yaml
       shell: |-
         helm status --namespace grafana grafana
diff --git a/common/playbooks/upgrade.yaml b/common/playbooks/upgrade.yaml
index 2c76219..ff5546d 100644
--- a/common/playbooks/upgrade.yaml
+++ b/common/playbooks/upgrade.yaml
@@ -6,7 +6,7 @@
 #
 # Usage example:
 #
-# VERSION=1.30.2
+# VERSION=1.31.0
 # ansible-playbook playbooks/upgrade.yaml --extra-vars "version=$VERSION"
 #
 - name: Upgrade and hold kubeadm package
@@ -15,8 +15,9 @@
   tasks:
     - name: New k8s repository
       copy:
-        dest: /etc/apt/sources.list.d/pkgs_k8s_io_core_stable_v1_30_deb.list
-        content: deb https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /
+        dest: /etc/apt/sources.list.d/pkgs_k8s_io_core_stable_v1_31_deb.list
+        content: |
+          deb https://pkgs.k8s.io/core:/stable:/v1.31/deb/ /
         mode: 0644
     - name: Upgrade packages
       apt:
@@ -72,7 +73,7 @@
         name: kubelet
     - name: Cleanup old k8s repository
       file:
-        path: /etc/apt/sources.list.d/pkgs_k8s_io_core_stable_v1_29_deb.list
+        path: /etc/apt/sources.list.d/pkgs_k8s_io_core_stable_v1_30_deb.list
         state: absent
 
 
-- 
GitLab