Commit d99f67f3 authored by Chris Hines's avatar Chris Hines
Browse files

fix up the slurm plugins for private tmpdir, its different on 20.02.7

parent d653f8d0
......@@ -60,8 +60,11 @@ def make_vars_filesystems(choices, inventory):
# ATM we have only one NFS server, and its hostname ends in -sql because it does double duty as the SQL server for slurm accounting logs
nfsserver = "{}-sql0".format(choices['clustername'])
for vol in ['userdata','slurm_state']:
data['{}_disk'.format(vol)] = inventory['all']['children']['hostvars']['hosts'][nfsserver]['ansible_host_volumes']["{}_{}".format(clustername,vol)]['dev']
for vol in ['userdata','slurm_state', 'userdata2']:
try:
data['{}_disk'.format(vol)] = inventory['all']['children']['hostvars']['hosts'][nfsserver]['ansible_host_volumes']["{}_{}".format(clustername,vol)]['dev']
except:
pass
with open('pre_templates/filesystems_yml.j2') as f:
template = jinja2.Template(f.read())
......
......@@ -6,12 +6,16 @@ volumes:
- host: {{ clustername }}-sql0
dev: {{ userdata_disk }}
mnt: /mnt/userdata
- host: {{ clustername }}-sql0
dev: {{ userdata2_disk }}
mnt: /mnt/nfs01
nfsexports:
- host: {{ clustername }}-sql0
exportList:
- src: /mnt/userdata
- src: /mnt/slurm_state
- src: /home
- src: /mnt/nfs01
nfsmounts:
- group: LoginNodes
nfsMounts:
......@@ -20,6 +24,11 @@ nfsmounts:
src: /mnt/userdata
fstype: nfs4
opts: "defaults,rw"
- name: /mnt/nfs01
ipv4: {{ clustername }}-sql0
src: /mnt/nfs01
fstype: nfs4
opts: "defaults,rw"
- name: /home
ipv4: {{ clustername }}-sql0
src: /home
......@@ -32,6 +41,11 @@ nfsmounts:
src: /mnt/userdata
fstype: nfs4
opts: "defaults,rw"
- name: /mnt/nfs01
ipv4: {{ clustername }}-sql0
src: /mnt/nfs01
fstype: nfs4
opts: "defaults,rw"
- name: /home
ipv4: {{ clustername }}-sql0
src: /home
......
......@@ -48,10 +48,10 @@ RebootProgram=/sbin/reboot
#SrunEpilog=
#TaskProlog=
#TaskEpilog=
TaskPlugin=task/cgroup
TaskPlugin=task/affinity,task/cgroup
#TaskPlugin=task/affinity
#TaskPlugin=task/affinity,task/cgroup
JobSubmitPlugins=lua
#JobSubmitPlugins=lua
OverTimeLimit=1
CompleteWait=10
......@@ -73,7 +73,9 @@ SchedulerType="sched/backfill"
#SchedulerAuth=
#SchedulerPort=
#SchedulerRootFilter=
SelectType="select/linear"
SelectType="select/cons_tres"
SelectTypeParameters=CR_Core_Memory
JobContainerType=job_container/tmpfs
{% if slurmselecttype.find("cons_tres") > 0 %}
SelectTypeParameters=CR_Core_Memory
{% endif %}
......@@ -120,9 +122,10 @@ Epilog={{ slurmjob.epilog }}
Prolog=/opt/slurm/etc/slurm.prolog
Epilog=/opt/slurm/etc/slurm.epilog
{% endif %}
PrologFlags=contain
#
# ACCOUNTING
JobAcctGatherType=jobacct_gather/linux
JobAcctGatherType=jobacct_gather/cgroup
JobAcctGatherFrequency=30
#
AccountingStorageType=accounting_storage/slurmdbd
......
......@@ -173,3 +173,4 @@
- include: installCgroup.yml
- include: spankprivatetmpdir.yml
when: slurm_version == '20.02.7'
......@@ -19,12 +19,14 @@
# work.
#
# Example configuration:
required private-tmpdir.so base={{ spankbase }} mount=/var/tmp mount=/tmp
#required private-tmpdir.so base={{ spankbase }} mount=/var/tmp mount=/tmp
#-------------------------------------------------------------------------------
# required private-tmpdir.so base=/tmp/slurm mount=/var/tmp mount=/tmp
#-------------------------------------------------------------------------------
#
# Example configuration with multiple base parameters:
#-------------------------------------------------------------------------------
# required private-tmpdir.so base=/dev/shm/slurm mount=/dev/shm base=/tmp/slurm mount=/var/tmp mount=/tmp
{% if slurm_version == '20.02.7' %}
required private-tmpdir.so base=/dev/shm/slurm mount=/dev/shm base=/tmp/slurm mount=/var/tmp mount=/tmp
{% endif %}
#-------------------------------------------------------------------------------
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment