Skip to content
Snippets Groups Projects
Commit 71a2c080 authored by Chris Hines's avatar Chris Hines
Browse files

a new slurm role. Slurm communicates on the default interface currently, need...

a new slurm role. Slurm communicates on the default interface currently, need to alter slurm.conf.j2 to use the VPN interface
parent 062b6f55
No related branches found
No related tags found
No related merge requests found
---
- name: restart munge
service: name=munge state=restarted
sudo: true
- name: restart slurm
service: name=slurm state=restarted
sudo: true
---
- name: copy rpms
copy: src=/tmp/rpmbuild dest=/tmp/
- name: install munge rpms
shell: "rpm --install /tmp/rpmbuild/RPMS/x86_64/munge*{{ munge_version }}*rpm"
sudo: true
ignore_errors: true
- name: install perl
yum: name={{ item }} state=latest
with_items:
- perl
- perl-DBI
sudo: true
- name: create slurm group
group: name=slurm
sudo: true
- name: create slurm user
user: name=slurm group=slurm
sudo: true
- name: install slurm rpms
shell: "rpm --install /tmp/rpmbuild/RPMS/x86_64/slurm*{{ slurm_version }}*rpm"
sudo: true
ignore_errors: true
- name: load munge key
include_vars: passwords.yml
- name: install munge key
template: src=munge_key.j2 dest=/etc/munge/munge.key
sudo: true
notify: restart munge
- name: start munge
service: name=munge state=started
sudo: true
- name: install slurm.conf
template: src=slurm.conf.j2 dest=/etc/slurm/slurm.conf
sudo: true
notify: restart slurm
- name: start slurm
service: name=slurm state=started
sudo: true
{{ mungekey }}
#
# Example slurm.conf file. Please run configurator.html
# (in doc/html) to build a configuration file customized
# for your environment.
#
#
# slurm.conf file generated by configurator.html.
#
# See the slurm.conf man page for more information.
#
ClusterName=CIAB
ControlMachine={{ slurmctrl }}
#ControlAddr=
#BackupController=
#BackupAddr=
#
SlurmUser=slurm
#SlurmdUser=root
SlurmctldPort=6817
SlurmdPort=6818
AuthType=auth/munge
#JobCredentialPrivateKey=
#JobCredentialPublicCertificate=
StateSaveLocation=/tmp
SlurmdSpoolDir=/tmp/slurmd
SwitchType=switch/none
MpiDefault=none
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmdPidFile=/var/run/slurmd.pid
ProctrackType=proctrack/pgid
#PluginDir=
CacheGroups=0
#FirstJobId=
ReturnToService=0
#MaxJobCount=
#PlugStackConfig=
#PropagatePrioProcess=
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
#Prolog=
#Epilog=
#SrunProlog=
#SrunEpilog=
#TaskProlog=
#TaskEpilog=
#TaskPlugin=
#TrackWCKey=no
#TreeWidth=50
#TmpFS=
#UsePAM=
#
# TIMERS
SlurmctldTimeout=300
SlurmdTimeout=300
InactiveLimit=0
MinJobAge=300
KillWait=30
Waittime=0
#
# SCHEDULING
SchedulerType=sched/backfill
#SchedulerAuth=
#SchedulerPort=
#SchedulerRootFilter=
SelectType=select/linear
FastSchedule=1
#PriorityType=priority/multifactor
#PriorityDecayHalfLife=14-0
#PriorityUsageResetPeriod=14-0
#PriorityWeightFairshare=100000
#PriorityWeightAge=1000
#PriorityWeightPartition=10000
#PriorityWeightJobSize=1000
#PriorityMaxAge=1-0
#
# LOGGING
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
JobCompType=jobcomp/none
#JobCompLoc=
#
# ACCOUNTING
#JobAcctGatherType=jobacct_gather/linux
#JobAcctGatherFrequency=30
#
#AccountingStorageType=accounting_storage/slurmdbd
#AccountingStorageHost=
#AccountingStorageLoc=
#AccountingStoragePass=
#AccountingStorageUser=
#
# COMPUTE NODES
{% for queue in slurmqueues %}
{% for node in groups[queue.group] %}
NodeName={{ node }} Procs={{ hostvars[node]['ansible_processor_cores'] }} State=UNKNOWN
{% endfor %}
{% endfor %}
{% for queue in slurmqueues %}
PartitionName={{ queue.name }} Nodes={{ groups[queue.group]|join(',') }}
{% endfor %}
...@@ -21,3 +21,22 @@ ...@@ -21,3 +21,22 @@
openvpn_servers: "{{ groups['OpenVPN-Server'] }}" openvpn_servers: "{{ groups['OpenVPN-Server'] }}"
roles: roles:
- { role: OpenVPN-Client } - { role: OpenVPN-Client }
- hosts: 'SubmitHost'
roles:
- { role: slurm-build, slurm_version: 14.11.0, munge_version: 0.5.11 }
- hosts: '*'
vars:
slurm_version: 14.11.0
munge_version: 0.5.11
slurmqueues:
- {name: DEFAULT, group: ComputeNode}
- {name: batch, group: ComputeNode}
- {name: gpu, group: ComputeNode}
slurmctrl: "{{ groups['SubmitHost'][0] }}"
roles:
- { role: slurm }
- { role: test_user }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment