From 29d5e407046094ea1e823df2d036332b67900ecd Mon Sep 17 00:00:00 2001
From: Andreas Hamacher <andreas.hamacher@monash.edu>
Date: Wed, 13 Nov 2019 15:54:56 +1100
Subject: [PATCH] Role updates

---
 roles/make_filesystems/tasks/main.yml         |  6 +-
 roles/rsyslog_client/tasks/main.yml           |  2 +-
 roles/slurm_config/tasks/main.yml             | 11 ++-
 .../slurm_config/templates/job_submit.lua.j2  | 70 +++++++++++++++++++
 4 files changed, 85 insertions(+), 4 deletions(-)
 create mode 100644 roles/slurm_config/templates/job_submit.lua.j2

diff --git a/roles/make_filesystems/tasks/main.yml b/roles/make_filesystems/tasks/main.yml
index deab4d7b..31c9b40b 100644
--- a/roles/make_filesystems/tasks/main.yml
+++ b/roles/make_filesystems/tasks/main.yml
@@ -10,13 +10,15 @@
 - name: format volumes
   filesystem: fstype={{ item.fstype }} dev={{ hostvars[inventory_hostname]['ansible_host_volumes'][item.name]['dev'] }}
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
+  become_user: root
   when: cinder
 
 - name: format volumes
   mount: name={{ item.mntpt }} fstype={{ item.fstype }} src={{ hostvars[inventory_hostname]['ansible_host_volumes'][item.name]['dev'] }} state=mounted
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
+  become_user: root
   when: cinder
 
 - name: format volumes
diff --git a/roles/rsyslog_client/tasks/main.yml b/roles/rsyslog_client/tasks/main.yml
index 9b087381..eed6753a 100644
--- a/roles/rsyslog_client/tasks/main.yml
+++ b/roles/rsyslog_client/tasks/main.yml
@@ -7,7 +7,7 @@
   when: ansible_os_family == 'RedHat'
 
 - name: install rsyslog
-  yum: name=rsyslog state=installed
+  apt: name=rsyslog state=installed
   become: true
   become_user: root
   when: ansible_os_family == 'Debian'
diff --git a/roles/slurm_config/tasks/main.yml b/roles/slurm_config/tasks/main.yml
index 8a6768ab..93912a85 100644
--- a/roles/slurm_config/tasks/main.yml
+++ b/roles/slurm_config/tasks/main.yml
@@ -1,4 +1,13 @@
 ---
 - name: install slurm.conf
   copy: src=files/slurm.conf dest={{ slurm_dir }}/etc/slurm.conf
-  sudo: true
+  become: true
+  become_user: root
+
+- name: setup plugin
+  template: src=job_submit.lua.j2 dest={{ slurm_dir }}/etc/job_submit.lua mode=755
+  run_once: true
+  become: true
+  become_user: root
+  when: slurm_lua is defined
+
diff --git a/roles/slurm_config/templates/job_submit.lua.j2 b/roles/slurm_config/templates/job_submit.lua.j2
new file mode 100644
index 00000000..22b05df7
--- /dev/null
+++ b/roles/slurm_config/templates/job_submit.lua.j2
@@ -0,0 +1,70 @@
+--[[
+
+ Example lua script demonstrating the SLURM job_submit/lua interface.
+ This is only an example, not meant for use in its current form.
+
+ Leave the function names, arguments, local varialbes and setmetatable
+ set up logic in each function unchanged. Change only the logic after
+ the lSUCCESSine containing "*** YOUR LOGIC GOES BELOW ***".
+
+ For use, this script should be copied into a file name "job_submit.lua"
+ in the same directory as the SLURM configuration file, slurm.conf.
+
+
+--]]
+
+function slurm_job_submit(job_desc, part_list, submit_uid)
+
+
+-- Check no default account
+
+if job_desc.account == "default" then
+   slurm.log_user("You have to specify your project ID as part of your job submission. The account=default is now deprecated on M3 job scheduler.")
+   return slurm.ERROR
+end
+
+
+-- Check Desktop requests with more than one node
+
+if ((job_desc.name == "desktop") and (job_desc.min_nodes > 1 )) then
+   slurm.log_user("The current M3 Desktop applications are unable to utilise more than one node, please select one node instead")
+   return slurm.ERROR
+end
+
+
+
+-- Check for gres.gpu requirements in m3c, m3h and m3g, else move job to comp
+
+if ((job_desc.partition == "m3c" ) or (job_desc.partition == "m3h" ) or (job_desc.partition == "m3g" ))  then
+   local partition = ""
+   if (job_desc.gres == nil) then
+      partition = "comp"
+      slurm.log_info("slurm_job_submit: for user: %u, partition: %s", submit_uid, partition)
+      job_desc.partition = partition
+   end
+   return slurm.SUCCESS
+end
+
+
+-- Check for QOS rtq in m3c, m3h , m3g and partition=nil, then forward job to rtqp,comp,m3g
+
+if ((job_desc.qos == "rtq") and (job_desc.partition == nil)) then
+   local partition = ""
+   partition = "rtqp,comp,m3g"
+   slurm.log_info("slurm_job_submit: for user: %u, partition: %s", submit_uid, partition)
+   job_desc.partition = partition
+   return slurm.SUCCESS
+end
+
+
+
+end
+
+
+
+function slurm_job_modify(job_desc, job_rec, part_list, modify_uid)
+       return slurm.SUCCESS
+end
+
+slurm.log_info("initialized")
+return slurm.SUCCESS
-- 
GitLab