From 91a2e4f40a424a02175432925a4052472ac10787 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franti=C5=A1ek=20Dvo=C5=99=C3=A1k?= <valtri@civ.zcu.cz>
Date: Thu, 2 May 2024 07:17:36 +0000
Subject: [PATCH] EOSC LOT3 third demo

---
 demo/ansible.cfg                     |   5 +
 demo/deploy.sh                       |  53 ++++
 demo/deployments/hub.yaml            | 405 +++++++++++++++++++++++++++
 demo/inventory/99-all.yaml           |  17 ++
 demo/playbooks/cvmfs.yaml            |   1 +
 demo/playbooks/files/calico.yaml     |   1 +
 demo/playbooks/files/helm_repos.fact |   1 +
 demo/playbooks/files/k8s-cheats.sh   |   1 +
 demo/playbooks/files/xfs-quotas.sh   |   1 +
 demo/playbooks/k8s.yaml              |   1 +
 demo/playbooks/notebooks.yaml        | 104 +++++++
 demo/playbooks/public_keys           |   1 +
 demo/playbooks/squid.yaml            |   1 +
 demo/playbooks/templates/etc/exports |   2 +
 demo/playbooks/templates/etc/squid   |   1 +
 demo/terraform/.gitignore            |   8 +
 demo/terraform/cloud-init.yaml       |  18 ++
 demo/terraform/terraform.tfvars      |  23 ++
 demo/terraform/vars.tf               |  64 +++++
 demo/terraform/versions.tf           |  10 +
 demo/terraform/vms.tf                | 335 ++++++++++++++++++++++
 21 files changed, 1053 insertions(+)
 create mode 100644 demo/ansible.cfg
 create mode 100755 demo/deploy.sh
 create mode 100644 demo/deployments/hub.yaml
 create mode 100644 demo/inventory/99-all.yaml
 create mode 120000 demo/playbooks/cvmfs.yaml
 create mode 120000 demo/playbooks/files/calico.yaml
 create mode 120000 demo/playbooks/files/helm_repos.fact
 create mode 120000 demo/playbooks/files/k8s-cheats.sh
 create mode 120000 demo/playbooks/files/xfs-quotas.sh
 create mode 120000 demo/playbooks/k8s.yaml
 create mode 100644 demo/playbooks/notebooks.yaml
 create mode 120000 demo/playbooks/public_keys
 create mode 120000 demo/playbooks/squid.yaml
 create mode 100644 demo/playbooks/templates/etc/exports
 create mode 120000 demo/playbooks/templates/etc/squid
 create mode 100644 demo/terraform/.gitignore
 create mode 100644 demo/terraform/cloud-init.yaml
 create mode 100644 demo/terraform/terraform.tfvars
 create mode 100644 demo/terraform/vars.tf
 create mode 100644 demo/terraform/versions.tf
 create mode 100644 demo/terraform/vms.tf

diff --git a/demo/ansible.cfg b/demo/ansible.cfg
new file mode 100644
index 0000000..c3a73be
--- /dev/null
+++ b/demo/ansible.cfg
@@ -0,0 +1,5 @@
+[defaults]
+inventory=inventory
+
+[diff]
+always=true
diff --git a/demo/deploy.sh b/demo/deploy.sh
new file mode 100755
index 0000000..b05f2f5
--- /dev/null
+++ b/demo/deploy.sh
@@ -0,0 +1,53 @@
+#! /bin/bash -xe
+
+#
+# EOSC LOT3 Jupyter Notebooks Demo
+#
+
+cd terraform && terraform init && terraform apply
+cd -
+cp -pv terraform/inventory.yaml inventory/1-cesnet.yaml
+
+# dynamic DNS
+ip="$(head -n 1 < terraform/fip.txt)"
+shellstate=$(shopt -po xtrace)
+set +o xtrace
+# https://nsupdate.fedcloud.eu
+vault_prefix=secrets/users/e1662e20-e34b-468c-b0ce-d899bc878364@egi.eu/eosc-demo
+FEDCLOUD_DYNAMIC_DNS=$(vault read -field data $vault_prefix/FEDCLOUD_DYNAMIC_DNS | grep ^map | head -n 1 | sed 's/map\[\(.*\)\]/\1/')
+for auth in $FEDCLOUD_DYNAMIC_DNS; do
+	echo "curl -i -X GET -u $(echo "$auth" | cut -d: -f1):XXX https://nsupdate.fedcloud.eu/nic/update?myip=$ip"
+	curl -i -X GET -u "$auth" https://nsupdate.fedcloud.eu/nic/update?myip="$ip"
+done
+eval "$shellstate"
+
+# wait for ping and ssh
+for ip in $(cat terraform/hosts.txt); do
+	while ! ping -c 1 "$ip"; do sleep 5; done
+	ssh-keygen -R "$ip"
+	while ! ssh egi@"$ip" -o ConnectTimeout=10 -o PreferredAuthentications=publickey -o StrictHostKeyChecking=no :; do sleep 10; done
+done
+
+# check ssh access
+ansible -m command -a 'uname -a' allnodes
+
+# wait cloud-init
+ansible -m shell -a 'while ! test -f /var/lib/cloud/instance/boot-finished; do sleep 2; done' allnodes
+
+# setup volumes
+ansible -m copy -a 'src=terraform/nfs-volume.sh dest=/root/ mode=preserve' nfs
+ansible -m command -a '/root/nfs-volume.sh' nfs
+ansible -m copy -a 'src=terraform/squid-volume.sh dest=/root/ mode=preserve' 'ingress[0]'
+ansible -m command -a '/root/squid-volume.sh' 'ingress[0]'
+ansible -m copy -a 'src=terraform/docker-volume.sh dest=/root/ mode=preserve' 'ingress nfs worker'
+ansible -m command -a '/root/docker-volume.sh' 'ingress nfs worker'
+ansible -m copy -a 'src=terraform/scratch-volume.sh dest=/root/ mode=preserve' 'ingress nfs worker'
+ansible -m command -a '/root/scratch-volume.sh' 'ingress nfs worker'
+
+# k8s + notebooks
+ansible-playbook playbooks/k8s.yaml
+ansible-playbook playbooks/squid.yaml
+ansible-playbook playbooks/cvmfs.yaml
+
+# wait for finish
+while ansible -m command -a 'kubectl get pods --all-namespaces' master | tail -n +3 | grep -v ' Running '; do sleep 5; done
diff --git a/demo/deployments/hub.yaml b/demo/deployments/hub.yaml
new file mode 100644
index 0000000..8c5798a
--- /dev/null
+++ b/demo/deployments/hub.yaml
@@ -0,0 +1,405 @@
+---
+proxy:
+  service:
+    type: NodePort
+
+ingress:
+  enabled: true
+  annotations:
+    kubernetes.io/ingress.class: "nginx"
+    kubernetes.io/tls-acme: "true"
+  hosts:
+    - "{{ notebooks_hostname }}"
+  tls:
+    - hosts:
+        - "{{ notebooks_hostname }}"
+      secretName: acme-tls-hub
+
+singleuser:
+  # keep resource limits in sync with:
+  # - documentation/content/en/users/dev-env/notebooks/_index.md
+  # - documentation/content/en/users/dev-env/notebooks/data/_index.md
+  # - profileList
+  storage:
+    capacity: 20Gi
+    dynamic:
+      pvcNameTemplate: claim-{userid}{servername}
+      volumeNameTemplate: vol-{userid}{servername}
+      storageAccessModes: ["ReadWriteMany"]
+    extraVolumes:
+      - name: cvmfs-host
+        hostPath:
+          path: /cvmfs
+          type: Directory
+      - name: b2drop
+        # sizeLimit problematic in this environment,
+        # not needed for remote mounts
+        empty_dir:
+      - name: owncloud
+        # sizeLimit problematic in this environment,
+        # not needed for remote mounts
+        empty_dir:
+      - name: scratch
+        ephemeral:
+          volumeClaimTemplate:
+            spec:
+              accessModes: [ "ReadWriteOnce" ]
+              storageClassName: local-path
+              resources:
+                requests:
+                  storage: "10Gi"
+    extraVolumeMounts:
+      - name: cvmfs-host
+        mountPath: "/cvmfs:shared"
+      - name: b2drop
+        mountPath: '/mnt/b2drop:shared'
+      - name: owncloud
+        mountPath: '/mnt/owncloud:shared'
+      - name: scratch
+        mountPath: '/scratch'
+  lifecycleHooks:
+    postStart:
+      exec: { "command": ["/bin/sh", "-c", "ln -snf /mnt/b2drop $HOME/b2drop; ln -snf /mnt/owncloud $HOME/owncloud; for d in cvmfs scratch; do if ! test -e $HOME/$d; then ln -snf /$d $HOME/$d; fi; done; mkdir -p /home/jovyan/.notebookCheckpoints"] }
+  memory:
+    limit: 4G
+    guarantee: 512M
+  cpu:
+    limit: 2
+    guarantee: .2
+  defaultUrl: "/lab"
+  image:
+    name: valtri/single-user
+    tag: "jupyter-4"
+  profileList:
+    - display_name: Small Environment - 2 vCPU / 4 GB RAM
+      description: >
+        The notebook environment includes Python, R, Julia and Octave kernels.
+      default: true
+      kubespawner_override:
+        args:
+          - "--CondaKernelSpecManager.env_filter='/opt/conda$'"
+    - display_name: Medium Environment - 4 vCPU / 8 GB RAM
+      description: >
+        The notebook environment includes Python, R, Julia and Octave kernels.
+      kubespawner_override:
+        args:
+          - "--CondaKernelSpecManager.env_filter='/opt/conda$'"
+        cpu_guarantee: 0.4
+        cpu_limit: 4
+        mem_guarantee: 1G
+        mem_limit: 8G
+    - display_name: Large Environment - 8 vCPU / 16 GB RAM / GPU
+      description: >
+        The notebook environment includes Python, R, Julia and Octave kernels with GPU.
+      kubespawner_override:
+        args:
+          - "--CondaKernelSpecManager.env_filter='/opt/conda$'"
+        cpu_guarantee: 0.8
+        cpu_limit: 8
+        mem_guarantee: 2G
+        mem_limit: 16G
+        extra_resource_guarantees:
+          nvidia.com/gpu: 1
+        extra_resource_limits:
+          nvidia.com/gpu: 1
+
+hub:
+  # services:
+  #   status:
+  #     url: "http://status-web/"
+  #     admin: true
+  image:
+    name: valtri/hub
+    tag: "3.x-eosc2" # EOSC, jupyter 3.1.0
+  config:
+    Authenticator:
+      enable_auth_state: true
+      admin_users:
+        - 529a87e5ce04cd5ddd7161734d02df0e2199a11452430803e714cb1309cc3907@egi.eu
+        - 025166931789a0f57793a6092726c2ad89387a4cc167e7c63c5d85fc91021d18@egi.eu
+        - 7ce47695f1e7fc91a1156e672f4a47576559938cdbe840355e2429e3a05b4ff8@egi.eu
+        # fdvorak2 @ aai.egi.eu
+        - 52cc7599bd1553c9d63e34e4c90b7e84d44967490c28bb4c53fe97b0c881d677@egi.eu
+        # fdvorak2 @ aai-dev.egi.eu
+        - c481e0a85e1ae0a5a1480a63e62295ca2f9ac652244947995bd4a0210fbcb77c@egi.eu
+        # jhradil3 @ aai-dev.egi.eu
+        - 240c0594fe34ac26cffd82fd0ad85f29d9ad9dfbb46febb05ed42db0bff594d1@egi.eu
+      # keep in sync with:
+      # - cesnet/playbooks/templates/binder.yaml
+      # - documentation/content/en/users/dev-env/notebooks/_index.md
+      allowed_groups:
+        - urn:geant:eosc-federation.eu:testing:group:eosc#testing.eosc-federation.eu
+      auto_login: true
+      claim_groups_key: "entitlements"
+    EGICheckinAuthenticator:
+      checkin_host: "{{ secret['checkin_host'] }}"
+      authorize_url: "https://{{ secret['checkin_host'] }}/OIDC/authorization"
+      token_url: "https://{{ secret['checkin_host'] }}/OIDC/token"
+      userdata_url: "https://{{ secret['checkin_host'] }}/OIDC/userinfo"
+      client_id: "{{ secret['client_id'] }}"
+      client_secret: "{{ secret['client_secret'] }}"
+      oauth_callback_url: "https://{{ notebooks_hostname }}/hub/oauth_callback"
+      scope: ["openid", "profile", "email", "offline_access", "entitlements"]
+      username_key: "sub"
+      extra_authorize_params:
+        prompt: consent
+    JupyterHub:
+      admin_access: true
+      authenticate_prometheus: false
+      authenticator_class: egi_notebooks_hub.egiauthenticator.EGICheckinAuthenticator
+      # spawner_class: (in egi-notebooks-b2drop)
+  extraConfig:
+    egi-notebooks-welcome: |-
+      from egi_notebooks_hub.welcome import WelcomeHandler
+      c.JupyterHub.default_url = "/welcome"
+      c.JupyterHub.extra_handlers = [(r'/welcome', WelcomeHandler)]
+    egi-notebooks-b2drop: |-
+{% raw %}
+      import base64
+      from jinja2 import BaseLoader
+      from jinja2 import Environment
+      from egi_notebooks_hub.onedata import OnedataSpawner
+      from kubernetes_asyncio.client.rest import ApiException
+
+      class B2DropSpawner(OnedataSpawner):
+          async def auth_state_hook(self, spawner, auth_state):
+              await super().auth_state_hook(spawner, auth_state)
+              self.b2drop_ready = False
+              self.b2drop_user = ""
+              self.b2drop_pwd = ""
+              try:
+                  secret = await self.api.read_namespaced_secret(self.token_secret_name, self.namespace)
+              except ApiException:
+                  return
+              if secret and secret.data:
+                   self.b2drop_user = base64.b64decode(secret.data.get("b2drop-user", "")).decode()
+                   self.b2drop_pwd = base64.b64decode(secret.data.get("b2drop-pwd", "")).decode()
+                   self.b2drop_ready = (self.b2drop_user and self.b2drop_pwd)
+
+          def _render_options_form(self, profile_list):
+              # old:self._profile_list = self._init_profile_list(profile_list)
+              self._profile_list = self._get_initialized_profile_list(profile_list)
+
+              profile_form_template = Environment(loader=BaseLoader).from_string(
+                  self.profile_form_template
+              )
+              return profile_form_template.render(profile_list=self._profile_list, b2drop_ready=self.b2drop_ready, b2drop_user=self.b2drop_user, b2drop_pwd=self.b2drop_pwd)
+
+          async def pre_spawn_hook(self, spawner):
+              await super(B2DropSpawner, self).pre_spawn_hook(spawner)
+              b2drop_user = self.user_options.get("b2drop-user", "")
+              b2drop_pwd = self.user_options.get("b2drop-pwd", "")
+              b2drop_remember = self.user_options.get("b2drop-remember", None)
+              if not (b2drop_user and b2drop_pwd):
+                  secret = await self.api.read_namespaced_secret(self.token_secret_name, self.namespace)
+                  if secret and secret.data:
+                      b2drop_user = base64.b64decode(secret.data.get("b2drop-user", "")).decode()
+                      b2drop_pwd = base64.b64decode(secret.data.get("b2drop-pwd", "")).decode()
+              if b2drop_user and b2drop_pwd:
+                  volume_mounts = [
+                    {"mountPath": "/b2drop:shared", "name": "b2drop"},
+                  ]
+                  spawner.extra_containers.append(
+                    {
+                        "name": "b2drop",
+                        "image": "eginotebooks/webdav-sidecar:sha-e5e8df2",
+                        "env": [
+                            {"name": "WEBDAV_URL", "value": "https://b2drop.eudat.eu/remote.php/webdav"},
+                            {"name": "WEBDAV_PWD", "value": b2drop_pwd},
+                            {"name": "WEBDAV_USER", "value": b2drop_user},
+                            {"name": "MOUNT_PATH", "value": "/b2drop"},
+                        ],
+                        "resources": self.sidecar_resources,
+                        "securityContext": {
+                            "runAsUser": 0,
+                            "privileged": True,
+                            "capabilities": {"add": ["SYS_ADMIN"]},
+                        },
+                        "volumeMounts": volume_mounts,
+                        "lifecycle": {
+                            "preStop": {
+                                "exec": {"command": ["umount", "-l", "/b2drop"]}
+                            },
+                        },
+                    }
+                  )
+              if b2drop_remember:
+                 await self._update_secret({"b2drop-user": b2drop_user,
+                                            "b2drop-pwd":  b2drop_pwd})
+              else:
+                  await self._update_secret({"b2drop-user": "", "b2drop-pwd": ""})
+
+          def options_from_form(self, formdata):
+              data = super(B2DropSpawner, self)._options_from_form(formdata)
+              data.update({'b2drop-user': formdata.get('b2drop-user', [None])[0],
+                           'b2drop-remember': formdata.get('b2drop-remember', [None])[0],
+                           'b2drop-pwd': formdata.get('b2drop-pwd', [None])[0]})
+              return data
+
+      class WebDavOIDCSpawner(B2DropSpawner):
+          async def pre_spawn_hook(self, spawner):
+              await super(WebDavOIDCSpawner, self).pre_spawn_hook(spawner)
+              auth_state = await self.user.get_auth_state()
+              # volume name as in EGI spawner
+              token_secret_volume_name = self._expand_user_properties(
+                self.token_secret_volume_name_template
+              )
+              token_path = os.path.join(self.token_mount_path, "access_token")
+              volume_mounts = [
+                {"mountPath": "/owncloud:shared", "name": "owncloud"},
+                {"mountPath": self.token_mount_path, "name": token_secret_volume_name, "readOnly": True},
+              ]
+              spawner.extra_containers.append(
+                {
+                    "name": "owncloud",
+                    "image": "valtri/webdav-rclone-sidecar:sha-1e36c50",
+                    "args": ["bearer_token_command=cat " + token_path],
+                    "env": [
+                        {"name": "WEBDAV_URL", "value": "https://ocis.aaitest.owncloud.works/remote.php/webdav/"},
+                        {"name": "WEBDAV_VENDOR", "value": "other"},
+                        {"name": "MOUNT_PATH", "value": "/owncloud"},
+                    ],
+                    "resources": self.sidecar_resources,
+                    "securityContext": {
+                        "runAsUser": 1000,
+                        "fsUser": 1000,
+                        "fsGroup": 100,
+                        "privileged": True,
+                        "capabilities": {"add": ["SYS_ADMIN"]},
+                    },
+                    "volumeMounts": volume_mounts,
+                    "lifecycle": {
+                        "preStop": {
+                            "exec": {"command": ["fusermount", "-u", "-z", "/owncloud"]}
+                        },
+                    },
+                }
+              )
+
+
+      c.JupyterHub.spawner_class = WebDavOIDCSpawner
+      c.B2DropSpawner.http_timeout = 90
+      c.B2DropSpawner.args = ["--FileCheckpoints.checkpoint_dir='/home/jovyan/.notebookCheckpoints'"]
+      c.B2DropSpawner.profile_form_template = """
+        <style>
+            /*
+                .profile divs holds two div tags: one for a radio button, and one
+                for the profile's content.
+            */
+            #kubespawner-profiles-list .profile {
+                display: flex;
+                flex-direction: row;
+                font-weight: normal;
+                border-bottom: 1px solid #ccc;
+                padding-bottom: 12px;
+            }
+
+            #kubespawner-profiles-list .profile .radio {
+                padding: 12px;
+            }
+
+            /* .option divs holds a label and a select tag */
+            #kubespawner-profiles-list .profile .option {
+                display: flex;
+                flex-direction: row;
+                align-items: center;
+                padding-bottom: 12px;
+            }
+
+            #kubespawner-profiles-list .profile .option label {
+                font-weight: normal;
+                margin-right: 8px;
+                min-width: 96px;
+            }
+        </style>
+
+        <div class='form-group' id='kubespawner-profiles-list'>
+            {%- for profile in profile_list %}
+            {#- Wrap everything in a <label> so clicking anywhere selects the option #}
+            <label for='profile-item-{{ profile.slug }}' class='profile'>
+                <div class='radio'>
+                    <input type='radio' name='profile' id='profile-item-{{ profile.slug }}' value='{{ profile.slug }}' {% if profile.default %}checked{% endif %} />
+                </div>
+                <div>
+                    <h3>{{ profile.display_name }}</h3>
+
+                    {%- if profile.description %}
+                    <p>{{ profile.description }}</p>
+                    {%- endif %}
+
+                    {%- if profile.profile_options %}
+                    <div>
+                        {%- for k, option in profile.profile_options.items() %}
+                        <div class='option'>
+                            <label for='profile-option-{{profile.slug}}-{{k}}'>{{option.display_name}}</label>
+                            <select name="profile-option-{{profile.slug}}-{{k}}" class="form-control">
+                                {%- for k, choice in option['choices'].items() %}
+                                <option value="{{ k }}" {% if choice.default %}selected{%endif %}>{{ choice.display_name }}</option>
+                                {%- endfor %}
+                            </select>
+                        </div>
+                        {%- endfor %}
+                    </div>
+                    {%- endif %}
+                </div>
+            </label>
+            {%- endfor %}
+            <div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
+              <div class="panel panel-default">
+                <div class="panel-heading" role="tab" id="headingOne">
+                  <h4 class="panel-title">
+                    <a class="collabpsed" role="button" data-toggle="collapse" data-parent="#accordion" href="#collapseOne" aria-expanded="false" aria-controls="collapseOne">
+                      B2DROP connection
+                    </a>
+                    {%if b2drop_ready %}<span class="label label-success">Already configured!</span>{% endif %}
+                  </h4>
+                </div>
+                <div id="collapseOne" class="panel-collapse collapse" role="tabpanel" aria-labelledby="headingOne">
+                  <div class="panel-body">
+                    <div class='form-group'>
+                      <label for="b2drop-user" class="form-label">B2DROP app Username</label>
+                      <input type="text" class="form-control" name="b2drop-user" id="b2drop-user" aria-describedby="b2drop-user-help" value="{{ b2drop_user }}">
+                      <div id="b2drop-user-help" class="form-text">Create new app password at <a href="https://b2drop.eudat.eu/settings/user/security">B2DROP security configuration</a></div>
+                    </div>
+                    <div class='form-group'>
+                        <label for="b2drop-pwd" class="form-label">B2DROP app Password</label>
+                        <input type="password" class="form-control" name="b2drop-pwd" id="b2drop-pwd" value="{{ b2drop_pwd }}">
+                    </div>
+                    <div class='form-group'>
+                        <input type="checkbox" id="b2drop-remember" name="b2drop-remember" {%if b2drop_ready %}checked{% endif %}>
+                        <label class="form-check-label" for="from-check-input">Remember B2DROP credentials</label>
+                    </div>
+                  </div>
+                </div>
+              </div>
+        </div>
+        """
+{% endraw %}
+  templatePaths:
+    - /egi-notebooks-hub/templates
+  extraFiles:
+    login.html:
+      mountPath: /egi-notebooks-hub/templates/login.html
+      stringData: |-
+{% raw %}
+        {% extends "egi-login.html" %}
+        {% block main_intro %}
+        <h1><img alt="Notebooks Logo" src="{{ static_url('images/egi-icon-notebooks.svg') }}"
+             height="100">Notebooks</h1>
+        <p>
+        Notebooks is an environment based on <a href="http://jupyter.org/">Jupyter</a> and
+        the <a href="https://www.egi.eu/services/cloud-compute/">EGI cloud service</a> that
+        offers a browser-based, scalable tool for interactive data analysis. The Notebooks
+        environment provides users with notebooks where they can combine text, mathematics,
+        computations and rich media output.
+        </p>
+        <p>
+        Access requires a valid <a href="https://docs.egi.eu/users/check-in/signup">EGI account</a>
+        and <a href="https://docs.egi.eu/users/dev-env/notebooks/#notebooks-for-researchers">
+        enrolling to one of the supported VOs</a>.
+        </p>
+        <p>
+        Default environment provides 2 CPU cores, 4 GB RAM and 20GB of personal storage space per user
+        </p>
+        {% endblock main_intro %}
+{% endraw %}
diff --git a/demo/inventory/99-all.yaml b/demo/inventory/99-all.yaml
new file mode 100644
index 0000000..3f8a001
--- /dev/null
+++ b/demo/inventory/99-all.yaml
@@ -0,0 +1,17 @@
+---
+allnodes:
+  children:
+    master:
+    ingress:
+    nfs:
+    worker:
+
+all:
+  vars:
+    ansible_become: yes
+    ansible_user: egi
+    site_name: cesnet
+    vault_mount_point: secrets/users/e1662e20-e34b-468c-b0ce-d899bc878364@egi.eu/eosc-demo
+
+    notebooks_hostname: eosc-notebooks.vm.fedcloud.eu
+    grafana_hostname: grafana-eosc-notebooks.vm.fedcloud.eu
diff --git a/demo/playbooks/cvmfs.yaml b/demo/playbooks/cvmfs.yaml
new file mode 120000
index 0000000..b5dcdf0
--- /dev/null
+++ b/demo/playbooks/cvmfs.yaml
@@ -0,0 +1 @@
+../../cesnet-central/playbooks/cvmfs.yaml
\ No newline at end of file
diff --git a/demo/playbooks/files/calico.yaml b/demo/playbooks/files/calico.yaml
new file mode 120000
index 0000000..3d2b787
--- /dev/null
+++ b/demo/playbooks/files/calico.yaml
@@ -0,0 +1 @@
+../../../cesnet-central/playbooks/files/calico.yaml
\ No newline at end of file
diff --git a/demo/playbooks/files/helm_repos.fact b/demo/playbooks/files/helm_repos.fact
new file mode 120000
index 0000000..94d2856
--- /dev/null
+++ b/demo/playbooks/files/helm_repos.fact
@@ -0,0 +1 @@
+../../../cesnet-central/playbooks/files/helm_repos.fact
\ No newline at end of file
diff --git a/demo/playbooks/files/k8s-cheats.sh b/demo/playbooks/files/k8s-cheats.sh
new file mode 120000
index 0000000..d20f90f
--- /dev/null
+++ b/demo/playbooks/files/k8s-cheats.sh
@@ -0,0 +1 @@
+../../../cesnet-central/playbooks/files/k8s-cheats.sh
\ No newline at end of file
diff --git a/demo/playbooks/files/xfs-quotas.sh b/demo/playbooks/files/xfs-quotas.sh
new file mode 120000
index 0000000..5330cad
--- /dev/null
+++ b/demo/playbooks/files/xfs-quotas.sh
@@ -0,0 +1 @@
+../../../cesnet-central/playbooks/files/xfs-quotas.sh
\ No newline at end of file
diff --git a/demo/playbooks/k8s.yaml b/demo/playbooks/k8s.yaml
new file mode 120000
index 0000000..5e18112
--- /dev/null
+++ b/demo/playbooks/k8s.yaml
@@ -0,0 +1 @@
+../../cesnet-central/playbooks/k8s.yaml
\ No newline at end of file
diff --git a/demo/playbooks/notebooks.yaml b/demo/playbooks/notebooks.yaml
new file mode 100644
index 0000000..1e9eb41
--- /dev/null
+++ b/demo/playbooks/notebooks.yaml
@@ -0,0 +1,104 @@
+---
+- name: Notebooks deployments
+  hosts: master
+  become: true
+  tasks:
+    - name: Configure helm repo
+      shell: |-
+        helm repo add jupyterhub https://jupyterhub.github.io/helm-chart/
+        helm repo update
+      when: "'jupyterhub' not in ansible_local.helm_repos | map(attribute='name') | list"
+    - name: Get Secrets from Vault for notebooks
+      vars:
+        name: "{{ item | basename | splitext | first }}"
+      set_fact:
+        secrets: "{{ secrets|default({}) | combine({name: lookup('community.hashi_vault.hashi_vault', vault_mount_point + '/deployment-' + name,
+          token_validate=false)}) }}"
+      with_fileglob:
+        - "../deployments/*.yaml"
+    - name: Debug Deployments Secrets
+      debug:
+        msg: "{{ item.key }} = {{ item.value }}"
+      loop: "{{ secrets | dict2items }}"
+    - name: Copy config file to master
+      vars:
+        name: "{{ item | basename | splitext | first }}"
+        secret: "{{ secrets[item | basename | splitext | first] }}"
+      template:
+        src: "{{ item }}"
+        dest: "/tmp/{{ item | basename }}"
+        mode: 0600
+      with_fileglob:
+        - "../deployments/*.yaml"
+    - name: Deploy/upgrade notebook instance
+      vars:
+        name: "{{ item | basename | splitext | first }}"
+        version: "3.2.1" # app 4.0.2 (2023-11-27)
+      shell: |-
+        helm status --namespace {{ name }} {{ name }}
+        if [ $? -ne 0 ]; then
+            helm install --create-namespace --namespace {{ name }} \
+                -f /tmp/{{ item | basename }} --version {{ version }} --timeout 2h \
+                 {{ name }} jupyterhub/jupyterhub
+        else
+            helm upgrade --version {{ version }} -f /tmp/{{ item | basename }} --timeout 2h \
+                --namespace {{ name }} {{ name }} jupyterhub/jupyterhub
+        fi
+      environment:
+        KUBECONFIG: /etc/kubernetes/admin.conf
+        PATH: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
+      when: true
+      with_fileglob:
+        - "../deployments/*.yaml"
+
+    - name: Configure secrets management for the hub
+      vars:
+        name: "{{ item | basename | splitext | first }}"
+      shell: |-
+        kubectl apply -f - << EOF
+        ---
+        kind: Role
+        apiVersion: rbac.authorization.k8s.io/v1
+        metadata:
+          name: hub-secrets
+          namespace: {{ name }}
+        rules:
+          - apiGroups: [""]       # "" indicates the core API group
+            resources: ["secrets"]
+            verbs: ["get", "watch", "list", "create", "delete", "patch", "update"]
+        ---
+        kind: RoleBinding
+        apiVersion: rbac.authorization.k8s.io/v1
+        metadata:
+          name: hub-secrets
+          namespace: {{ name }}
+        subjects:
+          - kind: ServiceAccount
+            name: hub
+            namespace: {{ name }}
+        roleRef:
+          kind: Role
+          name: hub-secrets
+          apiGroup: rbac.authorization.k8s.io
+        EOF
+      environment:
+        KUBECONFIG: /etc/kubernetes/admin.conf
+        PATH: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
+      when: true
+      with_fileglob:
+         - "../deployments/*.yaml"
+- hosts: nfs
+  become: true
+  tasks:
+    - name: Quota settings
+      vars:
+        name: "{{ item | basename | splitext | first }}"
+      cron:
+        cron_file: notebook-quotas
+        name: "{{ name }} quotas"
+        minute: "0"
+        hour: "*/2"
+        job: "/usr/local/bin/xfs-quotas.sh --include ^/exports/{{ name }}- --exclude ^/exports/{{ name }}-hub-db-dir-"
+        user: root
+      with_fileglob:
+        - "../deployments/*.yaml"
diff --git a/demo/playbooks/public_keys b/demo/playbooks/public_keys
new file mode 120000
index 0000000..3022cbb
--- /dev/null
+++ b/demo/playbooks/public_keys
@@ -0,0 +1 @@
+../../cesnet-central/playbooks/public_keys
\ No newline at end of file
diff --git a/demo/playbooks/squid.yaml b/demo/playbooks/squid.yaml
new file mode 120000
index 0000000..408847e
--- /dev/null
+++ b/demo/playbooks/squid.yaml
@@ -0,0 +1 @@
+../../cesnet-central/playbooks/squid.yaml
\ No newline at end of file
diff --git a/demo/playbooks/templates/etc/exports b/demo/playbooks/templates/etc/exports
new file mode 100644
index 0000000..d00f3ed
--- /dev/null
+++ b/demo/playbooks/templates/etc/exports
@@ -0,0 +1,2 @@
+# export the NFS directory to all the cluster members
+/exports {% for host in groups['allnodes'] -%}{{ hostvars[host].ansible_default_ipv4.address }}(rw,async,no_root_squash,no_subtree_check) {{ hostvars[host].ansible_default_ipv6.address }}(rw,async,no_root_squash,no_subtree_check) {% endfor -%}
diff --git a/demo/playbooks/templates/etc/squid b/demo/playbooks/templates/etc/squid
new file mode 120000
index 0000000..a7a265f
--- /dev/null
+++ b/demo/playbooks/templates/etc/squid
@@ -0,0 +1 @@
+../../../../cesnet-central/playbooks/templates/etc/squid
\ No newline at end of file
diff --git a/demo/terraform/.gitignore b/demo/terraform/.gitignore
new file mode 100644
index 0000000..e15cf5f
--- /dev/null
+++ b/demo/terraform/.gitignore
@@ -0,0 +1,8 @@
+/.terraform/
+/.terraform.lock.hcl
+/fip.txt
+/hosts.txt
+/inventory.yaml
+/terraform.tfstate
+/terraform.tfstate.backup
+/*-volume.sh
diff --git a/demo/terraform/cloud-init.yaml b/demo/terraform/cloud-init.yaml
new file mode 100644
index 0000000..481d1f2
--- /dev/null
+++ b/demo/terraform/cloud-init.yaml
@@ -0,0 +1,18 @@
+#cloud-config
+---
+package_update: true
+
+package_upgrade: true
+
+users:
+  - default
+  - name: egi
+    gecos: EGI
+    primary_group: egi
+    groups: users
+    shell: /bin/bash
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    ssh_import_id:
+      - gh:enolfc
+      - gh:andrea-manzi
+      - gh:valtri
diff --git a/demo/terraform/terraform.tfvars b/demo/terraform/terraform.tfvars
new file mode 100644
index 0000000..3c7a61c
--- /dev/null
+++ b/demo/terraform/terraform.tfvars
@@ -0,0 +1,23 @@
+# These need to be defined for things to work
+ip_pool  = "public-muni-147-251-124-GROUP"
+net_name = "group-project-network"
+net6_name = "public-muni-v6-432"
+site_name = "demo"
+
+# These may need some adjustment for your provider
+master_cpus = 2    # 2 CPUs to match existing flavours
+master_ram  = 4096
+worker_cpus = 4
+worker_ram  = 8192
+
+# Number of extra workers
+extra_workers = 2
+
+# volumes for docker
+docker_volumes_size = 384
+
+# NFS volume
+nfs_volume_size = 256
+
+# squid volume
+squid_volume_size = 128
diff --git a/demo/terraform/vars.tf b/demo/terraform/vars.tf
new file mode 100644
index 0000000..83658c0
--- /dev/null
+++ b/demo/terraform/vars.tf
@@ -0,0 +1,64 @@
+variable "ip_pool" {
+  type        = string
+  description = "The name of the public IP pool for the servers"
+}
+
+variable "net_name" {
+  type        = string
+  description = "The name of the IPv4 network"
+}
+
+variable "net6_name" {
+  type        = string
+  description = "The name of the IPv6 network"
+}
+
+variable "site_name" {
+  type        = string
+  description = "Site identifier for internal host names"
+}
+
+variable "master_cpus" {
+  type        = number
+  description = "Number of CPUs for the master"
+}
+
+variable "master_ram" {
+  type        = number
+  description = "RAM for the master"
+}
+
+variable "worker_cpus" {
+  type        = number
+  description = "Number of CPUs for the worker"
+}
+
+variable "worker_ram" {
+  type        = number
+  description = "RAM for the worker"
+}
+
+variable "extra_workers" {
+  type        = number
+  description = "Number of extra workers to create"
+}
+
+variable "docker_volumes_size" {
+  type        = number
+  description = "Size of volumes for docker (GB)"
+}
+
+variable "nfs_volume_size" {
+  type        = number
+  description = "Size of volume for NFS server (GB)"
+}
+
+variable "scratch_volume_size" {
+  type        = number
+  description = "Size of volume for ephemeral volumes (GB)"
+}
+
+variable "squid_volume_size" {
+  type        = number
+  description = "Size of volume for squid proxy, CVMFS cache (GB)"
+}
diff --git a/demo/terraform/versions.tf b/demo/terraform/versions.tf
new file mode 100644
index 0000000..43054dd
--- /dev/null
+++ b/demo/terraform/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_providers {
+    local = "~> 2.0"
+    openstack = {
+      source  = "terraform-provider-openstack/openstack",
+      version = ">= 1.38.0"
+    }
+  }
+  required_version = ">= 0.13"
+}
diff --git a/demo/terraform/vms.tf b/demo/terraform/vms.tf
new file mode 100644
index 0000000..6479ea0
--- /dev/null
+++ b/demo/terraform/vms.tf
@@ -0,0 +1,335 @@
+provider "openstack" {
+}
+
+locals {
+  nodes = concat([
+    openstack_compute_instance_v2.ingress,
+    openstack_compute_instance_v2.nfs,
+  ], openstack_compute_instance_v2.worker[*])
+  master_ip = replace(openstack_compute_instance_v2.master.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1")
+  ingress_ip = replace(openstack_compute_instance_v2.ingress.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1")
+  nfs_ip = replace(openstack_compute_instance_v2.nfs.network[1].fixed_ip_v6, "/\\[(.*)\\]/", "$1")
+  worker_ips = [for s in openstack_compute_instance_v2.worker[*].network[1].fixed_ip_v6 : replace(s, "/\\[(.*)\\]/", "$1")]
+}
+
+# Security groups
+
+resource "openstack_compute_secgroup_v2" "ping" {
+  name        = "ping"
+  description = "ICMP for ping"
+
+  rule {
+    from_port   = 8
+    to_port     = 0
+    ip_protocol = "icmp"
+    cidr        = "0.0.0.0/0"
+  }
+  rule {
+    from_port   = 128
+    to_port     = 0
+    # initial installation (bug in terraform): ip_protocol = "icmp"
+    ip_protocol = "ipv6-icmp"
+    cidr        = "::/0"
+  }
+}
+
+resource "openstack_compute_secgroup_v2" "ssh" {
+  name        = "ssh"
+  description = "ssh connection"
+
+  rule {
+    from_port   = 22
+    to_port     = 22
+    ip_protocol = "tcp"
+    cidr        = "0.0.0.0/0"
+  }
+  rule {
+    from_port   = 22
+    to_port     = 22
+    ip_protocol = "tcp"
+    cidr        = "::/0"
+  }
+}
+
+resource "openstack_compute_secgroup_v2" "http" {
+  name        = "http"
+  description = "http/https"
+
+  rule {
+    from_port   = 80
+    to_port     = 80
+    ip_protocol = "tcp"
+    cidr        = "0.0.0.0/0"
+  }
+  rule {
+    from_port   = 80
+    to_port     = 80
+    ip_protocol = "tcp"
+    cidr        = "::/0"
+  }
+  rule {
+    from_port   = 443
+    to_port     = 443
+    ip_protocol = "tcp"
+    cidr        = "0.0.0.0/0"
+  }
+  rule {
+    from_port   = 443
+    to_port     = 443
+    ip_protocol = "tcp"
+    cidr        = "::/0"
+  }
+}
+
+resource "openstack_networking_floatingip_v2" "public_ip" {
+  pool = var.ip_pool
+}
+
+data "openstack_images_image_v2" "ubuntu" {
+  name = "ubuntu-jammy-x86_64"
+}
+
+data "openstack_compute_flavor_v2" "master-flavor" {
+  vcpus = var.master_cpus
+  ram   = var.master_ram
+}
+
+data "openstack_compute_flavor_v2" "worker-flavor" {
+  vcpus = var.worker_cpus
+  ram   = var.worker_ram
+}
+
+resource "openstack_compute_instance_v2" "master" {
+  name     = "k8s-${var.site_name}-master"
+  image_id = data.openstack_images_image_v2.ubuntu.id
+  flavor_id       = data.openstack_compute_flavor_v2.master-flavor.id
+  security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name]
+  user_data       = file("cloud-init.yaml")
+  tags            = ["master"]
+  network {
+    name = var.net_name
+  }
+  network {
+    name = var.net6_name
+  }
+}
+
+resource "openstack_compute_instance_v2" "nfs" {
+  name            = "k8s-${var.site_name}-nfs"
+  image_id        = data.openstack_images_image_v2.ubuntu.id
+  flavor_id       = data.openstack_compute_flavor_v2.worker-flavor.id
+  security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name]
+  user_data       = file("cloud-init.yaml")
+  tags            = ["worker"]
+  network {
+    name = var.net_name
+  }
+  network {
+    name = var.net6_name
+  }
+}
+
+resource "openstack_compute_instance_v2" "ingress" {
+  name            = "k8s-${var.site_name}-w-ingress"
+  image_id        = data.openstack_images_image_v2.ubuntu.id
+  flavor_id       = data.openstack_compute_flavor_v2.worker-flavor.id
+  security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name, openstack_compute_secgroup_v2.http.name]
+  user_data       = file("cloud-init.yaml")
+  tags            = ["worker"]
+  network {
+    name = var.net_name
+  }
+  network {
+    name = var.net6_name
+  }
+}
+
+resource "openstack_compute_instance_v2" "worker" {
+  count           = var.extra_workers
+  name            = "k8s-${var.site_name}-worker-${count.index}"
+  image_id        = data.openstack_images_image_v2.ubuntu.id
+  flavor_id       = data.openstack_compute_flavor_v2.worker-flavor.id
+  security_groups = ["default", openstack_compute_secgroup_v2.ping.name, openstack_compute_secgroup_v2.ssh.name]
+  user_data       = file("cloud-init.yaml")
+  tags            = ["worker"]
+  network {
+    name = var.net_name
+  }
+  network {
+    name = var.net6_name
+  }
+}
+
+resource "openstack_compute_floatingip_associate_v2" "fip" {
+  floating_ip = openstack_networking_floatingip_v2.public_ip.address
+  instance_id = openstack_compute_instance_v2.ingress.id
+}
+
+resource "openstack_blockstorage_volume_v3" "nfs-volume" {
+  name = "nfs"
+  size = var.nfs_volume_size
+}
+
+resource "openstack_compute_volume_attach_v2" "nfs-volume-attach" {
+  instance_id = openstack_compute_instance_v2.nfs.id
+  volume_id   = openstack_blockstorage_volume_v3.nfs-volume.id
+}
+
+resource "local_file" "volume-script" {
+  filename        = "nfs-volume.sh"
+  file_permission = "0755"
+  content         = <<EOT
+#! /bin/bash -xe
+if ! dpkg-query -s xfsprogs >/dev/null 2>&1; then
+	apt-get update
+	apt-get install -y xfsprogs
+fi
+device="${openstack_compute_volume_attach_v2.nfs-volume-attach.device}"
+mkfs.xfs -L NFS "$device" || true
+grep -q 'LABEL=NFS' /etc/fstab || /bin/echo -e "LABEL=NFS\t/exports\txfs\tdefaults,uquota,pquota\t0\t0" | tee -a /etc/fstab
+mkdir /exports 2>/dev/null || true
+mount -a
+EOT
+}
+
+resource "openstack_blockstorage_volume_v3" "docker-volume" {
+  count = var.extra_workers + 2
+  name  = format("docker-%s", local.nodes[count.index].name)
+  size  = var.docker_volumes_size
+}
+
+resource "openstack_compute_volume_attach_v2" "docker-volume-attach" {
+  count       = var.extra_workers + 2
+  instance_id = local.nodes[count.index].id
+  volume_id   = openstack_blockstorage_volume_v3.docker-volume[count.index].id
+}
+
+resource "local_file" "docker-volume-script" {
+  filename        = "docker-volume.sh"
+  file_permission = "0755"
+  content         = <<EOT
+#! /bin/bash -xe
+volumes="${join("\n", [for n, d in zipmap(tolist(local.nodes[*].name), tolist(openstack_compute_volume_attach_v2.docker-volume-attach[*].device)) : format("%s:%s", n, d)])}"
+volume=$(echo "$volumes" | grep "$(hostname):")
+device=$(echo "$volume" | cut -d: -f2)
+if ! dumpe2fs -h "$device" >/dev/null 2>&1; then
+	mkfs.ext4 -L DOCKER "$device"
+	grep -q 'LABEL=DOCKER' /etc/fstab || /bin/echo -e "LABEL=DOCKER\t/var/lib/docker/overlay2\text4\tdefaults,x-systemd.before=local-fs.target\t0\t0" | tee -a /etc/fstab
+	mkdir -p /var/lib/docker/overlay2 2>/dev/null || true
+	service docker stop >/dev/null 2>&1 || true
+	sleep 10
+	mount "$device" /mnt
+	mv /var/lib/docker/overlay2/* /mnt >/dev/null 2>&1 || true
+	umount /mnt
+	mount -a
+fi
+EOT
+}
+
+resource "openstack_blockstorage_volume_v3" "scratch-volume" {
+  count = var.extra_workers + 2
+  name  = format("scratch-%s", local.nodes[count.index].name)
+  size  = var.scratch_volumes_size
+}
+
+resource "openstack_compute_volume_attach_v2" "scratch-volume-attach" {
+  count       = var.extra_workers + 2
+  instance_id = local.nodes[count.index].id
+  volume_id   = openstack_blockstorage_volume_v3.scratch-volume[count.index].id
+}
+
+resource "local_file" "scratch-volume-script" {
+  filename        = "scratch-volume.sh"
+  file_permission = "0755"
+  content         = <<EOT
+#! /bin/bash -xe
+volumes="${join("\n", [for n, d in zipmap(tolist(local.nodes[*].name), tolist(openstack_compute_volume_attach_v2.scratch-volume-attach[*].device)) : format("%s:%s", n, d)])}"
+volume=$(echo "$volumes" | grep "$(hostname):")
+device=$(echo "$volume" | cut -d: -f2)
+if ! dumpe2fs -h "$device" >/dev/null 2>&1; then
+	mkfs.ext4 -L DOCKER "$device"
+	grep -q 'LABEL=DOCKER' /etc/fstab || /bin/echo -e "LABEL=DOCKER\t/scratch\text4\tdefaults\t0\t0" | tee -a /etc/fstab
+	mkdir -p /scratch 2>/dev/null || true
+	mount -a
+fi
+EOT
+}
+
+resource "openstack_blockstorage_volume_v3" "squid-volume" {
+  name = "squid"
+  size = var.squid_volume_size
+}
+
+resource "openstack_compute_volume_attach_v2" "squid-volume-attach" {
+  instance_id = openstack_compute_instance_v2.ingress.id
+  volume_id   = openstack_blockstorage_volume_v3.squid-volume.id
+}
+
+resource "local_file" "squid-volume-script" {
+  filename        = "squid-volume.sh"
+  file_permission = "0755"
+  content         = <<EOT
+#! /bin/bash -xe
+device="${openstack_compute_volume_attach_v2.squid-volume-attach.device}"
+if ! dumpe2fs -h "$device" >/dev/null 2>&1; then
+	mkfs.ext4 -L SQUID "$device"
+fi
+grep -q 'LABEL=SQUID' /etc/fstab || /bin/echo -e "LABEL=SQUID\t/var/spool/squid\text4\tdefaults,x-systemd.before=local-fs.target\t0\t0" | tee -a /etc/fstab
+mkdir /var/spool/squid 2>/dev/null || true
+mount -a
+EOT
+}
+
+resource "local_file" "inventory" {
+  filename        = "inventory.yaml"
+  file_permission = "0644"
+  content         = <<EOT
+---
+fip:
+  hosts:
+    ${openstack_networking_floatingip_v2.public_ip.address}:
+
+master:
+  hosts:
+    ${local.master_ip}:
+      # must be IPv4 address or hostname
+      kube_server: ${openstack_compute_instance_v2.master.network[0].fixed_ip_v4}
+
+ingress:
+  hosts:
+    ${local.ingress_ip}:
+
+nfs:
+  hosts:
+    ${local.nfs_ip}:
+
+worker:
+  hosts:
+    ${join(":\n    ", local.worker_ips)}:
+
+# using public IP of kube_server for ansible delegate_to
+kube_server:
+  hosts:
+    ${openstack_compute_instance_v2.master.network[0].fixed_ip_v4}:
+      ansible_host: ${local.master_ip}
+EOT
+}
+
+resource "local_file" "fip" {
+  filename        = "fip.txt"
+  file_permission = "0644"
+  content         = <<EOT
+${openstack_networking_floatingip_v2.public_ip.address}
+EOT
+}
+
+resource "local_file" "hosts" {
+  filename        = "hosts.txt"
+  file_permission = "0644"
+  content         = <<EOT
+${local.master_ip}
+${local.ingress_ip}
+${local.nfs_ip}
+${join("\n", local.worker_ips)}
+EOT
+}
-- 
GitLab