Skip to content

Commit

Permalink
Merge pull request #506 from BiBiServ/improving-master-mount
Browse files Browse the repository at this point in the history
DRAFT: replace autoMount by mountPoints
  • Loading branch information
XaverStiensmeier authored Jun 6, 2024
2 parents 4480f01 + 2b0b5b9 commit 6eafed2
Show file tree
Hide file tree
Showing 9 changed files with 38 additions and 22 deletions.
6 changes: 3 additions & 3 deletions bibigrid.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
# - [public key one]

## Volumes and snapshots that will be mounted to master
# autoMount: False # WARNING: will overwrite unidentified filesystems
#masterMounts:
# - [mount one]
#masterMounts: (optional) # WARNING: will overwrite unidentified filesystems
# - name: [volume name]
# mountPoint: [where to mount to] # (optional)

#nfsShares: /vol/spool/ is automatically created as a nfs
# - [nfsShare one]
Expand Down
8 changes: 6 additions & 2 deletions bibigrid/core/actions/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,10 @@ def start_vpn_or_master(self, configuration, provider):
elif identifier == MASTER_IDENTIFIER:
configuration["floating_ip"] = server["private_v4"] # pylint: enable=comparison-with-callable
configuration["volumes"] = provider.get_mount_info_from_server(server)
for volume in configuration["volumes"]:
mount = next((mount for mount in configuration["masterMounts"] if mount["name"] == volume["name"]), None)
if mount.get("mountPoint"):
volume["mount_point"] = mount["mountPoint"]

def start_workers(self, worker, worker_count, configuration, provider):
name = WORKER_IDENTIFIER(cluster_id=self.cluster_id, additional=worker_count)
Expand Down Expand Up @@ -252,8 +256,8 @@ def prepare_vpn_or_master_args(self, configuration, provider):
if configuration.get("masterInstance"):
instance_type = configuration["masterInstance"]
identifier = MASTER_IDENTIFIER
master_mounts = configuration.get("masterMounts", [])
volumes = self.prepare_volumes(provider, master_mounts)
master_mounts_src = [master_mount["name"] for master_mount in configuration.get("masterMounts", [])]
volumes = self.prepare_volumes(provider, master_mounts_src)
elif configuration.get("vpnInstance"):
instance_type = configuration["vpnInstance"]
identifier = VPN_WORKER_IDENTIFIER
Expand Down
2 changes: 1 addition & 1 deletion bibigrid/core/utility/ansible_configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def generate_common_configuration_yaml(cidrs, configurations, cluster_id, ssh_us
master_configuration = configurations[0]
log.info("Generating common configuration file...")
common_configuration_yaml = {"bibigrid_version": __version__,
"auto_mount": master_configuration.get("autoMount", False), "cluster_id": cluster_id,
"cluster_id": cluster_id,
"cluster_cidrs": cidrs, "default_user": default_user,
"local_fs": master_configuration.get("localFS", False),
"local_dns_lookup": master_configuration.get("localDNSlookup", False),
Expand Down
2 changes: 1 addition & 1 deletion bibigrid/core/utility/validate_configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def check_volumes(self):
self.log.info("Checking volumes...")
success = True
for configuration, provider in zip(self.configurations, self.providers):
volume_identifiers = configuration.get("masterMounts")
volume_identifiers = [masterMount["name"] for masterMount in configuration.get("masterMounts", [])]
if volume_identifiers:
# check individually if volumes exist
for volume_identifier in volume_identifiers:
Expand Down
2 changes: 1 addition & 1 deletion bibigrid/core/utility/validate_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
{'infrastructure': str, 'cloud': str, 'sshUser': str, Or('subnet', 'network'): str, 'cloud_identifier': str,
Optional('sshPublicKeyFiles'): [str], Optional('sshTimeout'): int,
Optional('cloudScheduling'): {Optional('sshTimeout'): int}, Optional('autoMount'): bool,
Optional('masterMounts'): [str], Optional('nfsShares'): [str],
Optional('masterMounts'): [{'name': str, Optional('mountPoint'): str}], Optional('nfsShares'): [str],
Optional('userRoles'): [{'hosts': [str], 'roles': [{'name': str, Optional('tags'): [str]}]}],
Optional('localFS'): bool, Optional('localDNSlookup'): bool, Optional('slurm'): bool,
Optional('slurmConf'): {Optional('db'): str, Optional('db_user'): str, Optional('db_password'): str,
Expand Down
26 changes: 19 additions & 7 deletions documentation/markdown/features/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,18 +70,30 @@ cloudScheduling:
sshTimeout: 4
```

#### autoMount (optional:False)
> **Warning:** If a volume has an obscure filesystem, this might overwrite your data!

If `True` all [masterMounts](#mastermounts-optional) will be automatically mounted by BiBiGrid if possible.
If a volume is not formatted or has an unknown filesystem, it will be formatted to `ext4`.
Default `False`.

#### masterMounts (optional:False)

`masterMounts` expects a list of volumes and snapshots. Those will be attached to the master. If any snapshots are
given, volumes are first created from them. Volumes are not deleted after Cluster termination.

```yaml
masterMounts:
- name: test # name of the volume to be attached
mountPoint: /vol/spool2 # where attached volume is to be mount to (optional)
```

`masterMounts` can be combined with [nfsshares](#nfsshares-optional).
The following example attaches volume test to our master instance and mounts it to `/vol/spool2`.
Then it creates an nfsshare on `/vol/spool2` allowing workers to access the volume test.

```yaml
masterMounts:
- name: test # name of the volume to be attached
mountPoint: /vol/spool2 # where attached volume is to be mount to (optional)
nfsshares:
- /vol/spool2
```

<details>
<summary>
What is mounting?
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
- block:
- when: item.mount_point is defined
block:
- name: Make sure disks are available
failed_when: false
filesystem:
Expand All @@ -18,15 +19,15 @@

- name: Create mount folders if they don't exist
file:
path: "/vol/{{ item.name }}"
path: "/vol/{{ item.mount_point }}"
state: directory
mode: '0755'
owner: root
group: '{{ ansible_distribution | lower }}'

- name: Mount disks
mount:
path: "/vol/{{ item.name }}"
path: "{{ item.mount_point }}"
src: "{{ item.device }}"
state: mounted
fstype: "{{ filesystem_type.stdout }}"
4 changes: 2 additions & 2 deletions resources/playbook/roles/bibigrid/tasks/020-disk-server.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,6 @@
when: master.disks is defined

- name: Automount
when: volumes is defined and auto_mount
when: volumes is defined
include_tasks: 020-disk-server-automount.yml
with_items: "{{ volumes }}"
with_items: "{{ volumes }}"
3 changes: 1 addition & 2 deletions resources/playbook/roles/bibigrid/tasks/025-nfs-server.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@
lineinfile:
path: /etc/exports
state: present
regexp: '^{{ item.src }}'

regexp: '^{{ item.src }} '
line: "{{ item.src }}
{{cluster_cidrs|map(attribute='provider_cidrs')|flatten|join('(rw,nohide,insecure,no_subtree_check,async) ')}}\
(rw,nohide,insecure,no_subtree_check,async)
Expand Down

0 comments on commit 6eafed2

Please sign in to comment.