Skip to content
Snippets Groups Projects
Commit 6b660f16 authored by Andreas Hamacher's avatar Andreas Hamacher
Browse files

Merge branch 'remDuplicatejobsubmission' into 'master'

removing jobsubmit.lua because it also exists in slurm_config. also REM SPANK tag

See merge request !411
parents c34d10cc 45bdcd56
No related branches found
No related tags found
1 merge request!411removing jobsubmit.lua because it also exists in slurm_config. also REM SPANK tag
......@@ -3,7 +3,6 @@
stat:
path: /raid
register: raiddir
tags: [SPANK]
- name: Set /mnt/nvme as spankprivatetmpdir if present
file:
......@@ -15,7 +14,6 @@
state: link
become: true
when: hostvars[inventory_hostname]['ansible_devices']['nvme0n1'] is defined
tags: [SPANK]
- name: Link /raid as spankprivatetmpdir if present
file:
......@@ -27,7 +25,6 @@
state: link
become: true
when: raiddir.stat.isdir is defined and raiddir.stat.isdir == True
tags: [SPANK]
- name: create spankprivatetmpdir as directory if there is not a fast drive present
file:
......@@ -38,7 +35,6 @@
state: directory
become: true
when: hostvars[inventory_hostname]['ansible_devices']['nvme0n1'] is not defined and raiddir.stat.isdir is not defined
tags: [SPANK]
- name: create munge group
group: name=munge system=yes gid=498
......@@ -172,22 +168,10 @@
become: true
when: slurm_use_vpn==True
#- name: install job_submit.lua
# copy: src=files/job_submit.lua dest={{ slurm_dir }}/etc/job_submit.lua
# become: true
# when: slurm_use_vpn==False
- name: setup envirnment variables
template: src=slurm_setup.sh.j2 dest=/etc/profile.d/slurm_setup.sh
become: true
- name: setup plugin
template: src=job_submit.lua.j2 dest={{ slurm_dir }}/etc/job_submit.lua mode=755
#delegate_to: "{{ slurmctrl }}"
#run_once: true
become: true
when: slurm_lua==True
- include: installCgroup.yml
- include: spankprivatetmpdir.yml
......
--[[
Example lua script demonstrating the SLURM job_submit/lua interface.
This is only an example, not meant for use in its current form.
Leave the function names, arguments, local varialbes and setmetatable
set up logic in each function unchanged. Change only the logic after
the lSUCCESSine containing "*** YOUR LOGIC GOES BELOW ***".
For use, this script should be copied into a file name "job_submit.lua"
in the same directory as the SLURM configuration file, slurm.conf.
--]]
function slurm_job_submit(job_desc, part_list, submit_uid)
-- Check no default account
if job_desc.account == "default" then
slurm.log_user("You have to specify your project ID as part of your job submission. The account=default is now deprecated on M3 job scheduler.")
return slurm.ERROR
end
-- Check Desktop requests with more than one node
if ((job_desc.name == "desktop") and (job_desc.min_nodes > 1 )) then
slurm.log_user("The current M3 Desktop applications are unable to utilise more than one node, please select one node instead")
return slurm.ERROR
end
-- Check for gres.gpu requirements in m3c, m3h and m3g, else move job to comp
if ((job_desc.partition == "m3c" ) or (job_desc.partition == "m3h" ) or (job_desc.partition == "m3g" )) then
local partition = ""
if (job_desc.gres == nil) then
partition = "comp"
slurm.log_info("slurm_job_submit: for user: %u, partition: %s", submit_uid, partition)
job_desc.partition = partition
end
return slurm.SUCCESS
end
-- Check for QOS rtq in m3c, m3h , m3g and partition=nil, then forward job to rtqp,comp,m3g
if ((job_desc.qos == "rtq") and (job_desc.partition == nil)) then
local partition = ""
partition = "rtqp,comp,m3g"
slurm.log_info("slurm_job_submit: for user: %u, partition: %s", submit_uid, partition)
job_desc.partition = partition
return slurm.SUCCESS
end
end
function slurm_job_modify(job_desc, job_rec, part_list, modify_uid)
return slurm.SUCCESS
end
slurm.log_info("initialized")
return slurm.SUCCESS
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment