Skip to content
Snippets Groups Projects
Commit 91a2e4f4 authored by František Dvořák's avatar František Dvořák
Browse files

EOSC LOT3 third demo

parent a86d3642
No related branches found
No related tags found
No related merge requests found
Showing
with 718 additions and 0 deletions
[defaults]
inventory=inventory
[diff]
always=true
#! /bin/bash -xe
#
# EOSC LOT3 Jupyter Notebooks Demo
#
cd terraform && terraform init && terraform apply
cd -
cp -pv terraform/inventory.yaml inventory/1-cesnet.yaml
# dynamic DNS
ip="$(head -n 1 < terraform/fip.txt)"
shellstate=$(shopt -po xtrace)
set +o xtrace
# https://nsupdate.fedcloud.eu
vault_prefix=secrets/users/e1662e20-e34b-468c-b0ce-d899bc878364@egi.eu/eosc-demo
FEDCLOUD_DYNAMIC_DNS=$(vault read -field data $vault_prefix/FEDCLOUD_DYNAMIC_DNS | grep ^map | head -n 1 | sed 's/map\[\(.*\)\]/\1/')
for auth in $FEDCLOUD_DYNAMIC_DNS; do
echo "curl -i -X GET -u $(echo "$auth" | cut -d: -f1):XXX https://nsupdate.fedcloud.eu/nic/update?myip=$ip"
curl -i -X GET -u "$auth" https://nsupdate.fedcloud.eu/nic/update?myip="$ip"
done
eval "$shellstate"
# wait for ping and ssh
for ip in $(cat terraform/hosts.txt); do
while ! ping -c 1 "$ip"; do sleep 5; done
ssh-keygen -R "$ip"
while ! ssh egi@"$ip" -o ConnectTimeout=10 -o PreferredAuthentications=publickey -o StrictHostKeyChecking=no :; do sleep 10; done
done
# check ssh access
ansible -m command -a 'uname -a' allnodes
# wait cloud-init
ansible -m shell -a 'while ! test -f /var/lib/cloud/instance/boot-finished; do sleep 2; done' allnodes
# setup volumes
ansible -m copy -a 'src=terraform/nfs-volume.sh dest=/root/ mode=preserve' nfs
ansible -m command -a '/root/nfs-volume.sh' nfs
ansible -m copy -a 'src=terraform/squid-volume.sh dest=/root/ mode=preserve' 'ingress[0]'
ansible -m command -a '/root/squid-volume.sh' 'ingress[0]'
ansible -m copy -a 'src=terraform/docker-volume.sh dest=/root/ mode=preserve' 'ingress nfs worker'
ansible -m command -a '/root/docker-volume.sh' 'ingress nfs worker'
ansible -m copy -a 'src=terraform/scratch-volume.sh dest=/root/ mode=preserve' 'ingress nfs worker'
ansible -m command -a '/root/scratch-volume.sh' 'ingress nfs worker'
# k8s + notebooks
ansible-playbook playbooks/k8s.yaml
ansible-playbook playbooks/squid.yaml
ansible-playbook playbooks/cvmfs.yaml
# wait for finish
while ansible -m command -a 'kubectl get pods --all-namespaces' master | tail -n +3 | grep -v ' Running '; do sleep 5; done
---
proxy:
service:
type: NodePort
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
kubernetes.io/tls-acme: "true"
hosts:
- "{{ notebooks_hostname }}"
tls:
- hosts:
- "{{ notebooks_hostname }}"
secretName: acme-tls-hub
singleuser:
# keep resource limits in sync with:
# - documentation/content/en/users/dev-env/notebooks/_index.md
# - documentation/content/en/users/dev-env/notebooks/data/_index.md
# - profileList
storage:
capacity: 20Gi
dynamic:
pvcNameTemplate: claim-{userid}{servername}
volumeNameTemplate: vol-{userid}{servername}
storageAccessModes: ["ReadWriteMany"]
extraVolumes:
- name: cvmfs-host
hostPath:
path: /cvmfs
type: Directory
- name: b2drop
# sizeLimit problematic in this environment,
# not needed for remote mounts
empty_dir:
- name: owncloud
# sizeLimit problematic in this environment,
# not needed for remote mounts
empty_dir:
- name: scratch
ephemeral:
volumeClaimTemplate:
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: local-path
resources:
requests:
storage: "10Gi"
extraVolumeMounts:
- name: cvmfs-host
mountPath: "/cvmfs:shared"
- name: b2drop
mountPath: '/mnt/b2drop:shared'
- name: owncloud
mountPath: '/mnt/owncloud:shared'
- name: scratch
mountPath: '/scratch'
lifecycleHooks:
postStart:
exec: { "command": ["/bin/sh", "-c", "ln -snf /mnt/b2drop $HOME/b2drop; ln -snf /mnt/owncloud $HOME/owncloud; for d in cvmfs scratch; do if ! test -e $HOME/$d; then ln -snf /$d $HOME/$d; fi; done; mkdir -p /home/jovyan/.notebookCheckpoints"] }
memory:
limit: 4G
guarantee: 512M
cpu:
limit: 2
guarantee: .2
defaultUrl: "/lab"
image:
name: valtri/single-user
tag: "jupyter-4"
profileList:
- display_name: Small Environment - 2 vCPU / 4 GB RAM
description: >
The notebook environment includes Python, R, Julia and Octave kernels.
default: true
kubespawner_override:
args:
- "--CondaKernelSpecManager.env_filter='/opt/conda$'"
- display_name: Medium Environment - 4 vCPU / 8 GB RAM
description: >
The notebook environment includes Python, R, Julia and Octave kernels.
kubespawner_override:
args:
- "--CondaKernelSpecManager.env_filter='/opt/conda$'"
cpu_guarantee: 0.4
cpu_limit: 4
mem_guarantee: 1G
mem_limit: 8G
- display_name: Large Environment - 8 vCPU / 16 GB RAM / GPU
description: >
The notebook environment includes Python, R, Julia and Octave kernels with GPU.
kubespawner_override:
args:
- "--CondaKernelSpecManager.env_filter='/opt/conda$'"
cpu_guarantee: 0.8
cpu_limit: 8
mem_guarantee: 2G
mem_limit: 16G
extra_resource_guarantees:
nvidia.com/gpu: 1
extra_resource_limits:
nvidia.com/gpu: 1
hub:
# services:
# status:
# url: "http://status-web/"
# admin: true
image:
name: valtri/hub
tag: "3.x-eosc2" # EOSC, jupyter 3.1.0
config:
Authenticator:
enable_auth_state: true
admin_users:
- 529a87e5ce04cd5ddd7161734d02df0e2199a11452430803e714cb1309cc3907@egi.eu
- 025166931789a0f57793a6092726c2ad89387a4cc167e7c63c5d85fc91021d18@egi.eu
- 7ce47695f1e7fc91a1156e672f4a47576559938cdbe840355e2429e3a05b4ff8@egi.eu
# fdvorak2 @ aai.egi.eu
- 52cc7599bd1553c9d63e34e4c90b7e84d44967490c28bb4c53fe97b0c881d677@egi.eu
# fdvorak2 @ aai-dev.egi.eu
- c481e0a85e1ae0a5a1480a63e62295ca2f9ac652244947995bd4a0210fbcb77c@egi.eu
# jhradil3 @ aai-dev.egi.eu
- 240c0594fe34ac26cffd82fd0ad85f29d9ad9dfbb46febb05ed42db0bff594d1@egi.eu
# keep in sync with:
# - cesnet/playbooks/templates/binder.yaml
# - documentation/content/en/users/dev-env/notebooks/_index.md
allowed_groups:
- urn:geant:eosc-federation.eu:testing:group:eosc#testing.eosc-federation.eu
auto_login: true
claim_groups_key: "entitlements"
EGICheckinAuthenticator:
checkin_host: "{{ secret['checkin_host'] }}"
authorize_url: "https://{{ secret['checkin_host'] }}/OIDC/authorization"
token_url: "https://{{ secret['checkin_host'] }}/OIDC/token"
userdata_url: "https://{{ secret['checkin_host'] }}/OIDC/userinfo"
client_id: "{{ secret['client_id'] }}"
client_secret: "{{ secret['client_secret'] }}"
oauth_callback_url: "https://{{ notebooks_hostname }}/hub/oauth_callback"
scope: ["openid", "profile", "email", "offline_access", "entitlements"]
username_key: "sub"
extra_authorize_params:
prompt: consent
JupyterHub:
admin_access: true
authenticate_prometheus: false
authenticator_class: egi_notebooks_hub.egiauthenticator.EGICheckinAuthenticator
# spawner_class: (in egi-notebooks-b2drop)
extraConfig:
egi-notebooks-welcome: |-
from egi_notebooks_hub.welcome import WelcomeHandler
c.JupyterHub.default_url = "/welcome"
c.JupyterHub.extra_handlers = [(r'/welcome', WelcomeHandler)]
egi-notebooks-b2drop: |-
{% raw %}
import base64
from jinja2 import BaseLoader
from jinja2 import Environment
from egi_notebooks_hub.onedata import OnedataSpawner
from kubernetes_asyncio.client.rest import ApiException
class B2DropSpawner(OnedataSpawner):
async def auth_state_hook(self, spawner, auth_state):
await super().auth_state_hook(spawner, auth_state)
self.b2drop_ready = False
self.b2drop_user = ""
self.b2drop_pwd = ""
try:
secret = await self.api.read_namespaced_secret(self.token_secret_name, self.namespace)
except ApiException:
return
if secret and secret.data:
self.b2drop_user = base64.b64decode(secret.data.get("b2drop-user", "")).decode()
self.b2drop_pwd = base64.b64decode(secret.data.get("b2drop-pwd", "")).decode()
self.b2drop_ready = (self.b2drop_user and self.b2drop_pwd)
def _render_options_form(self, profile_list):
# old:self._profile_list = self._init_profile_list(profile_list)
self._profile_list = self._get_initialized_profile_list(profile_list)
profile_form_template = Environment(loader=BaseLoader).from_string(
self.profile_form_template
)
return profile_form_template.render(profile_list=self._profile_list, b2drop_ready=self.b2drop_ready, b2drop_user=self.b2drop_user, b2drop_pwd=self.b2drop_pwd)
async def pre_spawn_hook(self, spawner):
await super(B2DropSpawner, self).pre_spawn_hook(spawner)
b2drop_user = self.user_options.get("b2drop-user", "")
b2drop_pwd = self.user_options.get("b2drop-pwd", "")
b2drop_remember = self.user_options.get("b2drop-remember", None)
if not (b2drop_user and b2drop_pwd):
secret = await self.api.read_namespaced_secret(self.token_secret_name, self.namespace)
if secret and secret.data:
b2drop_user = base64.b64decode(secret.data.get("b2drop-user", "")).decode()
b2drop_pwd = base64.b64decode(secret.data.get("b2drop-pwd", "")).decode()
if b2drop_user and b2drop_pwd:
volume_mounts = [
{"mountPath": "/b2drop:shared", "name": "b2drop"},
]
spawner.extra_containers.append(
{
"name": "b2drop",
"image": "eginotebooks/webdav-sidecar:sha-e5e8df2",
"env": [
{"name": "WEBDAV_URL", "value": "https://b2drop.eudat.eu/remote.php/webdav"},
{"name": "WEBDAV_PWD", "value": b2drop_pwd},
{"name": "WEBDAV_USER", "value": b2drop_user},
{"name": "MOUNT_PATH", "value": "/b2drop"},
],
"resources": self.sidecar_resources,
"securityContext": {
"runAsUser": 0,
"privileged": True,
"capabilities": {"add": ["SYS_ADMIN"]},
},
"volumeMounts": volume_mounts,
"lifecycle": {
"preStop": {
"exec": {"command": ["umount", "-l", "/b2drop"]}
},
},
}
)
if b2drop_remember:
await self._update_secret({"b2drop-user": b2drop_user,
"b2drop-pwd": b2drop_pwd})
else:
await self._update_secret({"b2drop-user": "", "b2drop-pwd": ""})
def options_from_form(self, formdata):
data = super(B2DropSpawner, self)._options_from_form(formdata)
data.update({'b2drop-user': formdata.get('b2drop-user', [None])[0],
'b2drop-remember': formdata.get('b2drop-remember', [None])[0],
'b2drop-pwd': formdata.get('b2drop-pwd', [None])[0]})
return data
class WebDavOIDCSpawner(B2DropSpawner):
async def pre_spawn_hook(self, spawner):
await super(WebDavOIDCSpawner, self).pre_spawn_hook(spawner)
auth_state = await self.user.get_auth_state()
# volume name as in EGI spawner
token_secret_volume_name = self._expand_user_properties(
self.token_secret_volume_name_template
)
token_path = os.path.join(self.token_mount_path, "access_token")
volume_mounts = [
{"mountPath": "/owncloud:shared", "name": "owncloud"},
{"mountPath": self.token_mount_path, "name": token_secret_volume_name, "readOnly": True},
]
spawner.extra_containers.append(
{
"name": "owncloud",
"image": "valtri/webdav-rclone-sidecar:sha-1e36c50",
"args": ["bearer_token_command=cat " + token_path],
"env": [
{"name": "WEBDAV_URL", "value": "https://ocis.aaitest.owncloud.works/remote.php/webdav/"},
{"name": "WEBDAV_VENDOR", "value": "other"},
{"name": "MOUNT_PATH", "value": "/owncloud"},
],
"resources": self.sidecar_resources,
"securityContext": {
"runAsUser": 1000,
"fsUser": 1000,
"fsGroup": 100,
"privileged": True,
"capabilities": {"add": ["SYS_ADMIN"]},
},
"volumeMounts": volume_mounts,
"lifecycle": {
"preStop": {
"exec": {"command": ["fusermount", "-u", "-z", "/owncloud"]}
},
},
}
)
c.JupyterHub.spawner_class = WebDavOIDCSpawner
c.B2DropSpawner.http_timeout = 90
c.B2DropSpawner.args = ["--FileCheckpoints.checkpoint_dir='/home/jovyan/.notebookCheckpoints'"]
c.B2DropSpawner.profile_form_template = """
<style>
/*
.profile divs holds two div tags: one for a radio button, and one
for the profile's content.
*/
#kubespawner-profiles-list .profile {
display: flex;
flex-direction: row;
font-weight: normal;
border-bottom: 1px solid #ccc;
padding-bottom: 12px;
}
#kubespawner-profiles-list .profile .radio {
padding: 12px;
}
/* .option divs holds a label and a select tag */
#kubespawner-profiles-list .profile .option {
display: flex;
flex-direction: row;
align-items: center;
padding-bottom: 12px;
}
#kubespawner-profiles-list .profile .option label {
font-weight: normal;
margin-right: 8px;
min-width: 96px;
}
</style>
<div class='form-group' id='kubespawner-profiles-list'>
{%- for profile in profile_list %}
{#- Wrap everything in a <label> so clicking anywhere selects the option #}
<label for='profile-item-{{ profile.slug }}' class='profile'>
<div class='radio'>
<input type='radio' name='profile' id='profile-item-{{ profile.slug }}' value='{{ profile.slug }}' {% if profile.default %}checked{% endif %} />
</div>
<div>
<h3>{{ profile.display_name }}</h3>
{%- if profile.description %}
<p>{{ profile.description }}</p>
{%- endif %}
{%- if profile.profile_options %}
<div>
{%- for k, option in profile.profile_options.items() %}
<div class='option'>
<label for='profile-option-{{profile.slug}}-{{k}}'>{{option.display_name}}</label>
<select name="profile-option-{{profile.slug}}-{{k}}" class="form-control">
{%- for k, choice in option['choices'].items() %}
<option value="{{ k }}" {% if choice.default %}selected{%endif %}>{{ choice.display_name }}</option>
{%- endfor %}
</select>
</div>
{%- endfor %}
</div>
{%- endif %}
</div>
</label>
{%- endfor %}
<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
<div class="panel panel-default">
<div class="panel-heading" role="tab" id="headingOne">
<h4 class="panel-title">
<a class="collabpsed" role="button" data-toggle="collapse" data-parent="#accordion" href="#collapseOne" aria-expanded="false" aria-controls="collapseOne">
B2DROP connection
</a>
{%if b2drop_ready %}<span class="label label-success">Already configured!</span>{% endif %}
</h4>
</div>
<div id="collapseOne" class="panel-collapse collapse" role="tabpanel" aria-labelledby="headingOne">
<div class="panel-body">
<div class='form-group'>
<label for="b2drop-user" class="form-label">B2DROP app Username</label>
<input type="text" class="form-control" name="b2drop-user" id="b2drop-user" aria-describedby="b2drop-user-help" value="{{ b2drop_user }}">
<div id="b2drop-user-help" class="form-text">Create new app password at <a href="https://b2drop.eudat.eu/settings/user/security">B2DROP security configuration</a></div>
</div>
<div class='form-group'>
<label for="b2drop-pwd" class="form-label">B2DROP app Password</label>
<input type="password" class="form-control" name="b2drop-pwd" id="b2drop-pwd" value="{{ b2drop_pwd }}">
</div>
<div class='form-group'>
<input type="checkbox" id="b2drop-remember" name="b2drop-remember" {%if b2drop_ready %}checked{% endif %}>
<label class="form-check-label" for="from-check-input">Remember B2DROP credentials</label>
</div>
</div>
</div>
</div>
</div>
"""
{% endraw %}
templatePaths:
- /egi-notebooks-hub/templates
extraFiles:
login.html:
mountPath: /egi-notebooks-hub/templates/login.html
stringData: |-
{% raw %}
{% extends "egi-login.html" %}
{% block main_intro %}
<h1><img alt="Notebooks Logo" src="{{ static_url('images/egi-icon-notebooks.svg') }}"
height="100">Notebooks</h1>
<p>
Notebooks is an environment based on <a href="http://jupyter.org/">Jupyter</a> and
the <a href="https://www.egi.eu/services/cloud-compute/">EGI cloud service</a> that
offers a browser-based, scalable tool for interactive data analysis. The Notebooks
environment provides users with notebooks where they can combine text, mathematics,
computations and rich media output.
</p>
<p>
Access requires a valid <a href="https://docs.egi.eu/users/check-in/signup">EGI account</a>
and <a href="https://docs.egi.eu/users/dev-env/notebooks/#notebooks-for-researchers">
enrolling to one of the supported VOs</a>.
</p>
<p>
Default environment provides 2 CPU cores, 4 GB RAM and 20GB of personal storage space per user
</p>
{% endblock main_intro %}
{% endraw %}
---
allnodes:
children:
master:
ingress:
nfs:
worker:
all:
vars:
ansible_become: yes
ansible_user: egi
site_name: cesnet
vault_mount_point: secrets/users/e1662e20-e34b-468c-b0ce-d899bc878364@egi.eu/eosc-demo
notebooks_hostname: eosc-notebooks.vm.fedcloud.eu
grafana_hostname: grafana-eosc-notebooks.vm.fedcloud.eu
../../cesnet-central/playbooks/cvmfs.yaml
\ No newline at end of file
../../../cesnet-central/playbooks/files/calico.yaml
\ No newline at end of file
../../../cesnet-central/playbooks/files/helm_repos.fact
\ No newline at end of file
../../../cesnet-central/playbooks/files/k8s-cheats.sh
\ No newline at end of file
../../../cesnet-central/playbooks/files/xfs-quotas.sh
\ No newline at end of file
../../cesnet-central/playbooks/k8s.yaml
\ No newline at end of file
---
- name: Notebooks deployments
hosts: master
become: true
tasks:
- name: Configure helm repo
shell: |-
helm repo add jupyterhub https://jupyterhub.github.io/helm-chart/
helm repo update
when: "'jupyterhub' not in ansible_local.helm_repos | map(attribute='name') | list"
- name: Get Secrets from Vault for notebooks
vars:
name: "{{ item | basename | splitext | first }}"
set_fact:
secrets: "{{ secrets|default({}) | combine({name: lookup('community.hashi_vault.hashi_vault', vault_mount_point + '/deployment-' + name,
token_validate=false)}) }}"
with_fileglob:
- "../deployments/*.yaml"
- name: Debug Deployments Secrets
debug:
msg: "{{ item.key }} = {{ item.value }}"
loop: "{{ secrets | dict2items }}"
- name: Copy config file to master
vars:
name: "{{ item | basename | splitext | first }}"
secret: "{{ secrets[item | basename | splitext | first] }}"
template:
src: "{{ item }}"
dest: "/tmp/{{ item | basename }}"
mode: 0600
with_fileglob:
- "../deployments/*.yaml"
- name: Deploy/upgrade notebook instance
vars:
name: "{{ item | basename | splitext | first }}"
version: "3.2.1" # app 4.0.2 (2023-11-27)
shell: |-
helm status --namespace {{ name }} {{ name }}
if [ $? -ne 0 ]; then
helm install --create-namespace --namespace {{ name }} \
-f /tmp/{{ item | basename }} --version {{ version }} --timeout 2h \
{{ name }} jupyterhub/jupyterhub
else
helm upgrade --version {{ version }} -f /tmp/{{ item | basename }} --timeout 2h \
--namespace {{ name }} {{ name }} jupyterhub/jupyterhub
fi
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
PATH: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
when: true
with_fileglob:
- "../deployments/*.yaml"
- name: Configure secrets management for the hub
vars:
name: "{{ item | basename | splitext | first }}"
shell: |-
kubectl apply -f - << EOF
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hub-secrets
namespace: {{ name }}
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
verbs: ["get", "watch", "list", "create", "delete", "patch", "update"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hub-secrets
namespace: {{ name }}
subjects:
- kind: ServiceAccount
name: hub
namespace: {{ name }}
roleRef:
kind: Role
name: hub-secrets
apiGroup: rbac.authorization.k8s.io
EOF
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
PATH: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
when: true
with_fileglob:
- "../deployments/*.yaml"
- hosts: nfs
become: true
tasks:
- name: Quota settings
vars:
name: "{{ item | basename | splitext | first }}"
cron:
cron_file: notebook-quotas
name: "{{ name }} quotas"
minute: "0"
hour: "*/2"
job: "/usr/local/bin/xfs-quotas.sh --include ^/exports/{{ name }}- --exclude ^/exports/{{ name }}-hub-db-dir-"
user: root
with_fileglob:
- "../deployments/*.yaml"
../../cesnet-central/playbooks/public_keys
\ No newline at end of file
../../cesnet-central/playbooks/squid.yaml
\ No newline at end of file
# export the NFS directory to all the cluster members
/exports {% for host in groups['allnodes'] -%}{{ hostvars[host].ansible_default_ipv4.address }}(rw,async,no_root_squash,no_subtree_check) {{ hostvars[host].ansible_default_ipv6.address }}(rw,async,no_root_squash,no_subtree_check) {% endfor -%}
../../../../cesnet-central/playbooks/templates/etc/squid
\ No newline at end of file
/.terraform/
/.terraform.lock.hcl
/fip.txt
/hosts.txt
/inventory.yaml
/terraform.tfstate
/terraform.tfstate.backup
/*-volume.sh
#cloud-config
---
package_update: true
package_upgrade: true
users:
- default
- name: egi
gecos: EGI
primary_group: egi
groups: users
shell: /bin/bash
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_import_id:
- gh:enolfc
- gh:andrea-manzi
- gh:valtri
# These need to be defined for things to work
ip_pool = "public-muni-147-251-124-GROUP"
net_name = "group-project-network"
net6_name = "public-muni-v6-432"
site_name = "demo"
# These may need some adjustment for your provider
master_cpus = 2 # 2 CPUs to match existing flavours
master_ram = 4096
worker_cpus = 4
worker_ram = 8192
# Number of extra workers
extra_workers = 2
# volumes for docker
docker_volumes_size = 384
# NFS volume
nfs_volume_size = 256
# squid volume
squid_volume_size = 128
variable "ip_pool" {
type = string
description = "The name of the public IP pool for the servers"
}
variable "net_name" {
type = string
description = "The name of the IPv4 network"
}
variable "net6_name" {
type = string
description = "The name of the IPv6 network"
}
variable "site_name" {
type = string
description = "Site identifier for internal host names"
}
variable "master_cpus" {
type = number
description = "Number of CPUs for the master"
}
variable "master_ram" {
type = number
description = "RAM for the master"
}
variable "worker_cpus" {
type = number
description = "Number of CPUs for the worker"
}
variable "worker_ram" {
type = number
description = "RAM for the worker"
}
variable "extra_workers" {
type = number
description = "Number of extra workers to create"
}
variable "docker_volumes_size" {
type = number
description = "Size of volumes for docker (GB)"
}
variable "nfs_volume_size" {
type = number
description = "Size of volume for NFS server (GB)"
}
variable "scratch_volume_size" {
type = number
description = "Size of volume for ephemeral volumes (GB)"
}
variable "squid_volume_size" {
type = number
description = "Size of volume for squid proxy, CVMFS cache (GB)"
}
terraform {
required_providers {
local = "~> 2.0"
openstack = {
source = "terraform-provider-openstack/openstack",
version = ">= 1.38.0"
}
}
required_version = ">= 0.13"
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment