Skip to content
Snippets Groups Projects
Commit 2a0d4a21 authored by František Dvořák's avatar František Dvořák
Browse files

Moar refactoring and cleanups - common directory for ansible

parent 1a8f3ff2
No related branches found
No related tags found
No related merge requests found
Showing
with 5428 additions and 11 deletions
../../../cesnet-central/playbooks/files/etc ../../../common/playbooks/files/etc
\ No newline at end of file \ No newline at end of file
../../../cesnet-central/playbooks/files/usr ../../../common/playbooks/files/usr
\ No newline at end of file \ No newline at end of file
../../cesnet-central/playbooks/k8s.yaml ../../common/playbooks/k8s.yaml
\ No newline at end of file \ No newline at end of file
../../cesnet-central/playbooks/squid.yaml ../../common/playbooks/squid.yaml
\ No newline at end of file \ No newline at end of file
# export the NFS directory to all the cluster members
/exports {% for host in groups['allnodes'] -%}{{ hostvars[host].ansible_default_ipv4.address }}(rw,async,no_root_squash,no_subtree_check) {{ hostvars[host].ansible_default_ipv6.address }}(rw,async,no_root_squash,no_subtree_check) {% endfor -%}
../../../../common/playbooks/templates/etc/exports.ipv46
\ No newline at end of file
../../../../cesnet-central/playbooks/templates/etc/mailutils.conf ../../../../common/playbooks/templates/etc/mailutils.conf
\ No newline at end of file \ No newline at end of file
../../../../cesnet-central/playbooks/templates/etc/squid ../../../../common/playbooks/templates/etc/squid
\ No newline at end of file \ No newline at end of file
/home/valtri/notebooks-operations.eosc/cesnet-central/playbooks/upgrade.yaml ../../common/playbooks/upgrade.yaml
\ No newline at end of file \ No newline at end of file
---
- name: CVMFS deployment
hosts: ingress, nfs, worker, gpu
vars:
# EGI repositories: gridpp.egi.eu eosc.egi.eu pheno.egi.eu mice.egi.eu ghost.egi.eu wenmr.egi.eu neugrid.egi.eu auger.egi.eu dirac.egi.eu galdyn.egi.eu seadatanet.egi.eu ligo.egi.eu supernemo.egi.eu pravda.egi.eu chipster.egi.eu hyperk.egi.eu snoplus.egi.eu km3net.egi.eu t2k.egi.eu na62.egi.eu biomed.egi.eu eiscat.egi.eu comet.egi.eu notebooks.egi.eu
cvmfs_repositories:
- cvmfs-config.cern.ch # required
- atlas.cern.ch
- cms.cern.ch
- grid.cern.ch
- auger.egi.eu
- biomed.egi.eu
- dirac.egi.eu
- eiscat.egi.eu
- notebooks.egi.eu
become: true
tasks:
- name: Check cvmfs apt repository
command:
cmd: dpkg-query -W cvmfs-release
register: cvmfs_release_check_deb
failed_when: cvmfs_release_check_deb.rc > 1
changed_when: false
# Avoid occasional network failures (partially)
- name: Set cvmfs apt repository proxy cache
copy:
dest: /etc/apt/apt.conf.d/99cvmfs-proxy
mode: 0644
content: |
Acquire::http::Proxy {
cvmrepo.web.cern.ch "http://{{ groups['ingress'][0] | ansible.utils.ipwrap }}:3128";
};
- name: Install and setup cvmfs apt repository
vars:
f: cvmfs-release-latest_all.deb
when: cvmfs_release_check_deb.rc | default(0) == 1
block:
- name: Download cvmfs-release latest package
get_url:
url: https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/{{ f }}
dest: /tmp/{{ f }}
mode: 0644
- name: Install cvmfs-release latest package
apt:
deb: /tmp/{{ f }}
- name: Update apt cache with cvmfs apt repository
apt:
update_cache: true
- name: Install cvmfs
package:
name: cvmfs
state: present
- name: Config cvmfs
copy:
dest: /etc/cvmfs/default.local
mode: 0644
content: |
CVMFS_HTTP_PROXY=http://{{ groups['ingress'][0] | ansible.utils.ipwrap }}:3128
- name: Setup and mount cvmfs repository {{ item }}
ansible.posix.mount:
path: /cvmfs/{{ item }}
src: "{{ item }}"
fstype: cvmfs
opts: defaults,_netdev,nodev,x-systemd.requires-mounts-for=/cvmfs/config-egi.egi.eu
state: mounted
with_items: "{{ cvmfs_repositories }}"
- name: Check updatedb.conf existence
stat:
path: /etc/updatedb.conf
register: register_updatedb
- name: Tune updatedb.conf - ensure /cvmfs in PRUNEPATHS
lineinfile:
path: /etc/updatedb.conf
backrefs: true
regex: '^(\s*PRUNEPATHS\s*=\s*)"(.*?)\s*(/cvmfs\s*)?"\s*$'
line: '\1"\2 /cvmfs"'
when: register_updatedb.stat.exists
This diff is collapsed.
File moved
---
- name: Notebooks deployments
hosts: master
become: true
tasks:
- name: Configure helm repo
shell: |-
helm repo add jupyterhub https://jupyterhub.github.io/helm-chart/
helm repo add eginotebooks https://egi-federation.github.io/egi-notebooks-chart/
helm repo update
when: "'jupyterhub' not in ansible_local.helm_repos | map(attribute='name') | list or
'eginotebooks' not in ansible_local.helm_repos | map(attribute='name') | list"
- name: Get Secrets from Vault for notebooks
vars:
name: "{{ item | basename | splitext | first }}"
set_fact:
secrets: "{{ secrets|default({}) | combine({name: lookup('community.hashi_vault.hashi_vault', vault_mount_point + '/deployment-' + name,
token_validate=false)}) }}"
with_fileglob:
- "../deployments/*.yaml"
- name: Debug Deployments Secrets
debug:
msg: "{{ item.key }} = {{ item.value }}"
loop: "{{ secrets | dict2items }}"
- name: Copy config file to master
vars:
name: "{{ item | basename | splitext | first }}"
secret: "{{ secrets[name] }}"
template:
src: "{{ item }}"
dest: "/tmp/{{ item | basename }}"
mode: 0600
with_fileglob:
- "../deployments/*.yaml"
- name: Deploy/upgrade notebook instance
vars:
name: "{{ item | basename | splitext | first }}"
version: "3.2.1" # app 4.0.2 (2023-11-27)
monitor_version: "0.3.1"
shell: |-
helm status --namespace {{ name }} {{ name }}
if [ $? -ne 0 ]; then
helm install --create-namespace --namespace {{ name }} \
-f /tmp/{{ item | basename }} --version {{ version }} --timeout 2h \
{{ name }} jupyterhub/jupyterhub
else
helm upgrade --version {{ version }} -f /tmp/{{ item | basename }} --timeout 2h \
--namespace {{ name }} {{ name }} jupyterhub/jupyterhub
fi
helm status --namespace {{ name }} {{ name }}-monitor
if [ $? -ne 0 ]; then
helm install --namespace {{ name }} \
-f /tmp/{{ item | basename }} --version {{ monitor_version }} \
{{ name }}-monitor eginotebooks/notebooks-monitor
else
helm upgrade --version {{ monitor_version }} \
-f /tmp/{{ item | basename }} --namespace {{ name }} \
{{ name }}-monitor eginotebooks/notebooks-monitor
fi
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
PATH: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
when: true
with_fileglob:
- "../deployments/*.yaml"
- name: Configure secrets management for the hub
vars:
name: "{{ item | basename | splitext | first }}"
shell: |-
kubectl apply -f - << EOF
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hub-secrets
namespace: {{ name }}
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
verbs: ["get", "watch", "list", "create", "delete", "patch", "update"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hub-secrets
namespace: {{ name }}
subjects:
- kind: ServiceAccount
name: hub
namespace: {{ name }}
roleRef:
kind: Role
name: hub-secrets
apiGroup: rbac.authorization.k8s.io
EOF
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
PATH: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
when: true
with_fileglob:
- "../deployments/*.yaml"
# do the extra bits of configuration
# here we should have all the namespaces, pre-requirements in place
# XXX: this won't remove things that are delete from the directory
- name: Copy extra configuration files
copy:
src: "{{ item }}"
dest: "/tmp/{{ item | basename }}"
mode: 0600
with_fileglob:
- "../extra/*.yaml"
- name: Extra configuration
command: |-
kubectl apply -f /tmp/{{ item | basename }}
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
with_fileglob:
- "../extra/*.yaml"
when: true
# Workaround for pods stuck in "Terminating" state
- name: K8s pods cleaner script
copy:
dest: /usr/local/bin/k8s-pods-cleaner.sh
src: files/usr/local/bin/k8s-pods-cleaner.sh
mode: preserve
# Workaround for pods stuck in "Terminating" state
- name: Regular cleanup of failed user notebooks pods
vars:
name: "{{ item | basename | splitext | first }}"
cron:
cron_file: "notebooks-{{ name }}-cleaner"
name: "Notebooks {{ name }} cleanup"
minute: "*"
hour: "*"
job: "KUBECONFIG=$HOME/.kube/config /usr/local/bin/k8s-pods-cleaner.sh '{{ name }}' --yes >/dev/null 2>&1"
user: egi
with_fileglob:
- "../deployments/*.yaml"
- hosts: nfs
become: true
tasks:
- name: Quota settings
vars:
name: "{{ item | basename | splitext | first }}"
cron:
cron_file: notebook-quotas
name: "{{ name }} quotas"
minute: "0"
hour: "*/2"
job: "/usr/local/bin/xfs-quotas.sh --include ^/exports/{{ name }}- --exclude ^/exports/{{ name }}-hub-db-dir-"
user: root
with_fileglob:
- "../deployments/*.yaml"
---
- name: Gather facts on all nodes
hosts: allnodes
become: true
tasks:
- name: Gather facts on the node
debug:
msg: "IPv4: {{ ansible_default_ipv4.address | default('') }}, IPv6: {{ ansible_default_ipv6.address | default('') }}"
- name: Squid proxy deployment
hosts: ingress[0]
become: true
tasks:
- name: Install squid
package:
name: squid
# full-fledge restart needed to build cache
notify: Restart squid
# https://cvmfs.readthedocs.io/en/stable/cpt-squid.html
- name: Configure squid
lineinfile:
regexp: '^\s*{{ item.key }}\s+.*'
line: "{{ item.key }} {{ item.value }}"
path: /etc/squid/squid.conf
loop: "{{ config | dict2items }}"
vars:
config:
collapsed_forwarding: "on"
minimum_expiry_time: 0
maximum_object_size: 1024 MB
cache_mem: 128 MB
maximum_object_size_in_memory: 128 KB
cache_dir: ufs /var/spool/squid 81920 16 256
notify: Reload squid
- name: Configure squid - ACL allcluster
template:
src: templates/etc/squid/conf.d/allcluster.conf
dest: /etc/squid/conf.d/allcluster.conf
mode: 0644
notify: Reload squid
handlers:
- name: Restart squid
service:
name: squid
state: restarted
- name: Reload squid
service:
name: squid
state: reloaded
# export the NFS directory to all the cluster members
/exports {% for host in groups['allnodes'] -%}{{ host }}(rw,async,no_root_squash,no_subtree_check) {% endfor -%}
# export the NFS directory to all the cluster members
/exports {% for host in groups['allnodes'] -%}{{ hostvars[host].ansible_default_ipv4.address }}(rw,async,no_root_squash,no_subtree_check) {% endfor -%}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment