diff --git a/roles/make_filesystems/tasks/main.yml b/roles/make_filesystems/tasks/main.yml
index deab4d7bd64144ab6ae1ab53736a0632fab155bd..31c9b40b8c18045f7fbc25b8bd4453168f8c397f 100644
--- a/roles/make_filesystems/tasks/main.yml
+++ b/roles/make_filesystems/tasks/main.yml
@@ -10,13 +10,15 @@
 - name: format volumes
   filesystem: fstype={{ item.fstype }} dev={{ hostvars[inventory_hostname]['ansible_host_volumes'][item.name]['dev'] }}
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
+  become_user: root
   when: cinder
 
 - name: format volumes
   mount: name={{ item.mntpt }} fstype={{ item.fstype }} src={{ hostvars[inventory_hostname]['ansible_host_volumes'][item.name]['dev'] }} state=mounted
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
+  become_user: root
   when: cinder
 
 - name: format volumes
diff --git a/roles/rsyslog_client/tasks/main.yml b/roles/rsyslog_client/tasks/main.yml
index 9b087381192f7818bd9a61467dea29614dab0ac7..eed6753ae301b63d5b9236c1b889f122e31b6cc4 100644
--- a/roles/rsyslog_client/tasks/main.yml
+++ b/roles/rsyslog_client/tasks/main.yml
@@ -7,7 +7,7 @@
   when: ansible_os_family == 'RedHat'
 
 - name: install rsyslog
-  yum: name=rsyslog state=installed
+  apt: name=rsyslog state=installed
   become: true
   become_user: root
   when: ansible_os_family == 'Debian'
diff --git a/roles/slurm_config/tasks/main.yml b/roles/slurm_config/tasks/main.yml
index 8a6768ab999e1b30bade948452f4e5f5f2f9b6f0..93912a851dda2ccb18c18cb26b6c84b2f684c481 100644
--- a/roles/slurm_config/tasks/main.yml
+++ b/roles/slurm_config/tasks/main.yml
@@ -1,4 +1,13 @@
 ---
 - name: install slurm.conf
   copy: src=files/slurm.conf dest={{ slurm_dir }}/etc/slurm.conf
-  sudo: true
+  become: true
+  become_user: root
+
+- name: setup plugin
+  template: src=job_submit.lua.j2 dest={{ slurm_dir }}/etc/job_submit.lua mode=755
+  run_once: true
+  become: true
+  become_user: root
+  when: slurm_lua is defined
+
diff --git a/roles/slurm_config/templates/job_submit.lua.j2 b/roles/slurm_config/templates/job_submit.lua.j2
new file mode 100644
index 0000000000000000000000000000000000000000..22b05df79c76d4e33a0aae386ac6f5102454ee32
--- /dev/null
+++ b/roles/slurm_config/templates/job_submit.lua.j2
@@ -0,0 +1,70 @@
+--[[
+
+ Example lua script demonstrating the SLURM job_submit/lua interface.
+ This is only an example, not meant for use in its current form.
+
+ Leave the function names, arguments, local varialbes and setmetatable
+ set up logic in each function unchanged. Change only the logic after
+ the lSUCCESSine containing "*** YOUR LOGIC GOES BELOW ***".
+
+ For use, this script should be copied into a file name "job_submit.lua"
+ in the same directory as the SLURM configuration file, slurm.conf.
+
+
+--]]
+
+function slurm_job_submit(job_desc, part_list, submit_uid)
+
+
+-- Check no default account
+
+if job_desc.account == "default" then
+   slurm.log_user("You have to specify your project ID as part of your job submission. The account=default is now deprecated on M3 job scheduler.")
+   return slurm.ERROR
+end
+
+
+-- Check Desktop requests with more than one node
+
+if ((job_desc.name == "desktop") and (job_desc.min_nodes > 1 )) then
+   slurm.log_user("The current M3 Desktop applications are unable to utilise more than one node, please select one node instead")
+   return slurm.ERROR
+end
+
+
+
+-- Check for gres.gpu requirements in m3c, m3h and m3g, else move job to comp
+
+if ((job_desc.partition == "m3c" ) or (job_desc.partition == "m3h" ) or (job_desc.partition == "m3g" ))  then
+   local partition = ""
+   if (job_desc.gres == nil) then
+      partition = "comp"
+      slurm.log_info("slurm_job_submit: for user: %u, partition: %s", submit_uid, partition)
+      job_desc.partition = partition
+   end
+   return slurm.SUCCESS
+end
+
+
+-- Check for QOS rtq in m3c, m3h , m3g and partition=nil, then forward job to rtqp,comp,m3g
+
+if ((job_desc.qos == "rtq") and (job_desc.partition == nil)) then
+   local partition = ""
+   partition = "rtqp,comp,m3g"
+   slurm.log_info("slurm_job_submit: for user: %u, partition: %s", submit_uid, partition)
+   job_desc.partition = partition
+   return slurm.SUCCESS
+end
+
+
+
+end
+
+
+
+function slurm_job_modify(job_desc, job_rec, part_list, modify_uid)
+       return slurm.SUCCESS
+end
+
+slurm.log_info("initialized")
+return slurm.SUCCESS