diff --git a/roles/enable_root/templates/authorized_keys.j2 b/roles/enable_root/templates/authorized_keys.j2
index 05cb91b928ab0b196cdf2c28c2617b74be9422ec..5ee0159b9ffdf0b9c7959ba0cb5830511a11f31b 100644
--- a/roles/enable_root/templates/authorized_keys.j2
+++ b/roles/enable_root/templates/authorized_keys.j2
@@ -1,4 +1,4 @@
-no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"ec2-user\" rather than the user \"root\".';echo;sleep 10" ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAt/cu47VucrAcmE8T+W4IiBQhZvzSQWaG8ORmEx+OV6d7JmeHtdpLrducKywIkdRJAh2KFsXawewFcYCoBgWRzBon7vZqcECY0Lu6uuQiPtfdAHDwBZpt7WD67UGU9m4eJXTOfVc0T9W/igwiofSt7XAlSrOSpuxyrwaNiFmZhWXOy61AGioSVCuOijzVUZHTJ2gIBx433+Y7TzCSP0LXlEypYxAsYnIeRtiiINRRCHSehUBmi9mnp+Y6J/LUg0CN2bJaoq4Zj/elGCflh4s/69w8vH7JpzqP1yzH0AFXS2C8l1Hs9SXTA2a4XrqJYg/JqP8j8lJXy+b28y6WgBBvVQ== jupiter@cvlproject.monash.edu
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2xrAkFRdYBpYs14AYSzdPFcIOt2zKXIgjPpyj/6eg/yl3y8N84T9VNw9ATRzb3+PJEw1lOfah6xLkFl7FueT6359y14c7wkNByGHgcL022SludkhM2zBe/3ebhcBs11L4Z725rqVnGDSKdKuwZjbCmUtu/nHwGYU/BnLKbQXMVyq53L5cbIyWGfvItPnwCF2ZMy1v0lmnFs1O3qDK9U/qcwc/77MTB0Z/ey0zsoXvmxjkdYr+zgQLRNm2+fkCXn+ZorbeDwWjhHE21arhMym5x3VG0XU2Ob9nL1Z2xEGQVSnBVWeadTMNzkfM8U07Md2tSOIC5B3ePETxk97puxbEQ== root@m2-m
 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPijQ597uLqEPAvVZXQlSjrUfFl2h7SRBTCRhH4hQJMVu55dhFYiojJZ0tjjV3jTcgWs1AsyRp3wDtNp8iQxbwEY2JPxCOjNuH0et4I/y3y6VUjcVWanSaIkdPf5AFNb9KIXo3Hvdyvav8SfFpioRQ0FKp8SZs1JYXpuQ0mZY26oKCKcNsWXv9ZN7knUN0xvYNMycpCnI2Nl666Zrs0gGyJ6e+Xq5bpk1lm8nuK9q52bTRjxqtdEBuSGwkZea+NBJzpYw5rEucteQI66y6tzFuYJk2WC4bUifffIxnkQXKYVynJg1MJ2CGI69r9hXt9eUtH3WrDxrJGmCau8jD3lib hines@sparge
 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAnakq6Lgq2n6yjcMaC7xQXMDMRdN33T6mPCqRy+TPdu0aPvVty0UFeAWsCyTxHeVfst9Vr0HwRRBvNihp1CJuOWGbk0H5a8yALDhLqoHazv2jlMQcLDgTktw0Jgo38+tcBShJyey1iHh8X5WgsS5/hgxR3OzoNBEzqzHUidMO/EI0ahNlM60l8EYL8Ww799NmPgqdPbwxK9nHsoFmx/NKhnUdronSg33L0CJZT3t2fccXAq+4Pbm7uYEkL3T/NgMdgpG5mKS3mKDtKyyKm2gOf3fVzExFew2etBxB3ANPEWvSuJ2XwXQv8sFE1722XQVR4RFgilCWUqXSN7EmqoHkNQ== jupiter@cvlproject
 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAsBtPRJtDJzyW+Utu0v03wklUpvzS5c1E34ysGDMepGU8VT1phJQ2EwRPWVLdRjVHnuhrEeeUHMyQwOtLEdvTPFnw5u/4bHQ+37iwtAeTV6oyPARJVzJLRGuDUuFdkQbXN7xxi/0KUljWgswLN34UV+p5PL79kQlErh1QCN06z5k=
diff --git a/roles/extra_rpms/vars/main.yml b/roles/extra_rpms/vars/main.yml
index a8a78add72c210a0c7d2140a7ce5eef32577b980..8aa77e17fc7123b11d7f5ff4d9799e9d0f61b129 100644
--- a/roles/extra_rpms/vars/main.yml
+++ b/roles/extra_rpms/vars/main.yml
@@ -121,6 +121,7 @@ pkgs:
  - libXpm
  - libXt
  - libXtst
+ - mailx
  - man
  - mod_ssl
  - mysql-server
diff --git a/roles/nfs-client/handlers/main.yml b/roles/nfs-client/handlers/main.yml
index 15acb0658980b561ad5a3b6243b65f2815bea721..d03ebd197c4a7e0160cf74834b78a625b71aefda 100644
--- a/roles/nfs-client/handlers/main.yml
+++ b/roles/nfs-client/handlers/main.yml
@@ -7,4 +7,3 @@
   service: name=rpcidmapd state=restarted
   sudo: true
   when: ansible_os_family == "RedHat"
-
diff --git a/roles/nfs-client/tasks/mountFileSystem.yml b/roles/nfs-client/tasks/mountFileSystem.yml
index 53abd33de55622d44c001ab2f186a0335198bc50..70cc74b6ec75619c5fd1352e79c9b21d9b99686a 100644
--- a/roles/nfs-client/tasks/mountFileSystem.yml
+++ b/roles/nfs-client/tasks/mountFileSystem.yml
@@ -6,7 +6,6 @@
 - name: "Mounting NFS mounts"
   mount: name={{ item.name }} src=" {{ item.ipv4 }}:{{ item.src }} " fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
   with_items: nfsMounts
-  notify: "restart authentication"
   notify: "restart rpcbind"
   notify: "restart idmap"
   sudo: true 
@@ -22,7 +21,6 @@
 - name: "Mounting NFS mounts"
   mount: name={{ item.name }} src=" {{ item.ipv4 }}:{{ item.src }} " fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
   with_items: nfsMounts
-  notify: "restart authentication"
   notify: "restart idmap"
   notify: "restart rpcbind"
   sudo: true 
diff --git a/roles/provision/tasks/main.yml b/roles/provision/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3b71e4d40f287e6fefd0be5763aa05e8ec406b1a
--- /dev/null
+++ b/roles/provision/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: copy provision template 
+  template: src=provision.sh.j2 dest={{ provision }} mode=755 owner=root
+  sudo: true
+
+- name: provision cron job 
+  cron: name=provision job={{ provision }} user=root minute=*/5 state=present
+  sudo: true
diff --git a/roles/slurm-provision/templates/slurm_provision.sh.j2 b/roles/provision/templates/provision.sh.j2
similarity index 97%
rename from roles/slurm-provision/templates/slurm_provision.sh.j2
rename to roles/provision/templates/provision.sh.j2
index 6da159b8f9638a0e9397ab653ffc96c806bc4800..69483a265968419c02dc1715e55caa51821a54fe 100644
--- a/roles/slurm-provision/templates/slurm_provision.sh.j2
+++ b/roles/provision/templates/provision.sh.j2
@@ -1,6 +1,6 @@
-#!/bin/sh
+#!/bin/bash
 
-HOME_DIR="/cvl/home"
+HOME_DIR={{ home_dir }}
 user_list=($(getent passwd | cut -d ":" -f1))
 log_file="/root/slurm.log"
 
diff --git a/roles/slurm-provision/vars/main.yml b/roles/provision/vars/main.yml
similarity index 100%
rename from roles/slurm-provision/vars/main.yml
rename to roles/provision/vars/main.yml
diff --git a/roles/slurm-provision/vars/readme.txt b/roles/provision/vars/readme.txt
similarity index 75%
rename from roles/slurm-provision/vars/readme.txt
rename to roles/provision/vars/readme.txt
index 5c0294c54811c2bc37e62769496f9d2d3bca4c5e..435fdf189ae7e5a8fa8c03eefe796c72e510df4c 100644
--- a/roles/slurm-provision/vars/readme.txt
+++ b/roles/provision/vars/readme.txt
@@ -1 +1,2 @@
 slurm_provision: "/usr/local/sbin/slurmu_provision.sh"
+home_dir: "/home"
diff --git a/roles/slurm-from-source/tasks/installCgroup.yml b/roles/slurm-from-source/tasks/installCgroup.yml
new file mode 100644
index 0000000000000000000000000000000000000000..247983df5f642e58d329495f24e58fef58c0f850
--- /dev/null
+++ b/roles/slurm-from-source/tasks/installCgroup.yml
@@ -0,0 +1,22 @@
+- name: yum install cgroup  
+  yum: name={{ item }} state=installed
+  with_items:
+    - libcgroup
+  sudo: true
+  when: ansible_os_family == "RedHat"
+
+- name: apt install cgroup 
+  apt: name={{ item }} state=installed update_cache=yes
+  with_items:
+    - cgmanager
+    - cgmanager-utils
+    - libcgmanager0 
+  when: ansible_os_family == "Debian"    
+
+- name: config cgroup.conf file
+  template: dest={{ slurm_dir }}/etc/cgroup.conf src=cgroup.conf.j2 mode=644
+  sudo: true
+
+- name: config cgroup_allowed_devices.conf file
+  template: dest={{ slurm_dir }}/etc/cgroup_allowed_devices.conf src=cgroup_allowed_devices.conf.j2 mode=644
+  sudo: true
diff --git a/roles/slurm-from-source/tasks/installNhc.yml b/roles/slurm-from-source/tasks/installNhc.yml
new file mode 100644
index 0000000000000000000000000000000000000000..628b10a9706a75c8947fa8bf434cec0c42735c26
--- /dev/null
+++ b/roles/slurm-from-source/tasks/installNhc.yml
@@ -0,0 +1,56 @@
+- name: Download nhc source  
+  shell: wget https://cvl.massive.org.au/warewulf-nhc-{{ nhc_version }}.tar.gz
+#  shell: wget http://warewulf.lbl.gov/downloads/releases/warewulf-nhc/warewulf-nhc-{{ nhc_version }}.tar.gz
+  args:
+    chdir: /tmp
+    creates: /tmp/warewulf-nhc-{{ nhc_version }}.tar.gz
+
+- name: untar nhc 
+  shell: tar zxf /tmp/warewulf-nhc-{{ nhc_version }}.tar.gz 
+  args:
+    chdir: /tmp
+
+- name: build nhc 
+  shell: ./configure --prefix={{ nhc_dir }} && make
+  args:
+    chdir: /tmp/warewulf-nhc-{{ nhc_version }} 
+
+- name: install nhc 
+  shell: make install
+  sudo: true
+  args:
+    chdir: /tmp/warewulf-nhc-{{ nhc_version }} 
+
+- name: copy nhc sysconfig script
+  template: dest=/etc/sysconfig/nhc src=nhc.sysconfig.j2 mode=644
+  sudo: true
+
+- name: copy cron script
+  template: dest={{ nhc_dir }}/sbin/nhc_cron src=nhc_cron.j2 mode=755
+  sudo: true
+
+- name: copy nhc log rotate script
+  template: dest=/etc/logrotate.d/nhc src=nhclog.j2 mode=644
+  sudo: true
+
+- name: check configure file
+  shell: ls {{ nhc_dir }}/etc/nhc/{{ nhc_config_file }}
+  ignore_errors: true
+  register: generate_nhc_config_file 
+
+- name: generate config file
+  shell: "{{ nhc_dir }}/sbin/nhc-genconf"
+  sudo: true
+  when: generate_nhc_config_file 
+
+- name: config file extension
+  lineinfile: dest="{{ nhc_dir }}/etc/nhc/{{ nhc_config_file }}" line="{{ item }}"
+  with_items:
+    nhc_user_conf
+  sudo: true
+  when: nhc_user_conf is defined and generate_nhc_config_file
+
+- name: start cron job 
+  cron: name=nhc_monitor job={{ nhc_dir }}/sbin/nhc_cron user=root minute=*/5 state=present 
+  sudo: true
+
diff --git a/roles/slurm-from-source/tasks/main.yml b/roles/slurm-from-source/tasks/main.yml
index e4b53d7532641b83c0a0f2a95093463f93128a6a..c44fb08a7b4f4c6bfa005530d34fb49b3830f9ce 100644
--- a/roles/slurm-from-source/tasks/main.yml
+++ b/roles/slurm-from-source/tasks/main.yml
@@ -24,7 +24,7 @@
   sudo: true
 
 - name: create data directory
-  file: path={{ slurmdatadir }} state=directory owner=slurm group=slurm mode=750
+  file: path={{ slurmdatadir }} state=directory owner=slurm group=slurm mode=755
   sudo: true
   when: slurmdatadir is defined 
 
@@ -59,6 +59,8 @@
     - pam-devel
     - perl-ExtUtils-MakeMaker
     - bzip2-devel
+    - hwloc
+    - hwloc-devel
   sudo: true
   when: ansible_os_family == "RedHat"
 
@@ -108,6 +110,7 @@
 - name: install slurm.conf
   template: src=slurm.conf.j2 dest={{ slurm_dir }}/etc/slurm.conf
   sudo: true
+  notify: restart slurm
   when: slurm_use_vpn==False
 
 - name: install slurm.conf
@@ -120,3 +123,6 @@
   sudo: true
   when: ansible_os_family == 'RedHat' 
 
+- include: installCgroup.yml
+- include: installNhc.yml
+
diff --git a/roles/slurm-from-source/templates/cgroup.conf.j2 b/roles/slurm-from-source/templates/cgroup.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c19d5c5f41e16f4187fe5cf1a889a7e796806497
--- /dev/null
+++ b/roles/slurm-from-source/templates/cgroup.conf.j2
@@ -0,0 +1,6 @@
+CgroupAutomount=yes
+ConstrainDevices=yes
+TaskAffinity=yes
+ConstrainCores=yes
+AllowedDevicesFile={{ slurm_dir }}/etc/cgroup_allowed_devices.conf
+
diff --git a/roles/slurm-from-source/templates/cgroup_allowed_devices.conf.j2 b/roles/slurm-from-source/templates/cgroup_allowed_devices.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ed829ec80e683f56e733e9a287be395acf2a6e18
--- /dev/null
+++ b/roles/slurm-from-source/templates/cgroup_allowed_devices.conf.j2
@@ -0,0 +1,6 @@
+/dev/vd*
+/dev/null
+/dev/zero
+/dev/urandom
+/dev/cpu/*/*
+
diff --git a/roles/slurm-from-source/templates/nhc.sysconfig.j2 b/roles/slurm-from-source/templates/nhc.sysconfig.j2
new file mode 100644
index 0000000000000000000000000000000000000000..f58b7ee412c650d1dfd74baa1b222224b77ad719
--- /dev/null
+++ b/roles/slurm-from-source/templates/nhc.sysconfig.j2
@@ -0,0 +1,11 @@
+CONFDIR={{ nhc_dir }}/etc/nhc
+NHC_RM=slurm
+HELPERDIR={{ nhc_dir }}/libexec/nhc
+PATH={{ slurm_dir }}/bin:{{ nhc_dir }}/sbin:$PATH
+LOGFILE=/var/log/nhc.log
+CONFFILE="{{ nhc_dir }}/etc/nhc/{{ nhc_config_file }}"
+NHC_EMAIL_TO="jupiter.hu@monash.edu,chris.hines@monash.edu"
+NHC_EMAIL_SUBJECT="Node failure"
+NHC_LOOP_TIME="300"
+
+
diff --git a/roles/slurm-from-source/templates/nhc_cron.j2 b/roles/slurm-from-source/templates/nhc_cron.j2
new file mode 100755
index 0000000000000000000000000000000000000000..6b683da3b06c1ba922223c5f4b28585c751e0066
--- /dev/null
+++ b/roles/slurm-from-source/templates/nhc_cron.j2
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+error=""
+state_change="1"
+current_state="0"
+previous_state="0"
+
+[ -e /etc/sysconfig/nhc ] && . /etc/sysconfig/nhc
+config_file=${CONFFILE-/etc/nhc/nhc.conf}
+loop=${NHC_LOOP_TIME-300}
+email_subject=${NHC_EMAIL_SUBJECT-notification}
+logfile=${LOGFILE-/var/log/nhc.log}
+email_to=${NHC_EMAIL_TO-root}
+
+function log() {
+    message="$1"
+    echo "$(date): ${message}" >> ${logfile}
+}
+
+function email() {
+    post=$(cat /root/nhc_cron_sendemail.txt)
+    if [ -z "${post}" ]; then
+        message=$(cat /root/nhc.log)
+        echo "${message}" | mail -s "${email_subject}" "${email_to}"
+        log "Node error, send email to ${email_to}"
+        echo "1" >| /root/nhc_cron_sendemail.txt
+    fi
+}
+
+log "Start health check ${config_file}"
+
+nhc -c "${config_file}" > /root/nhc.log 2>&1  || { log "$(cat /root/nhc.log)" && email && exit 1; }
+
+echo -n "" >| /root/nhc_cron_sendemail.txt
+
+
diff --git a/roles/slurm-from-source/templates/nhclog.j2 b/roles/slurm-from-source/templates/nhclog.j2
new file mode 100644
index 0000000000000000000000000000000000000000..edba9b8564ed6dbc1d51ff3eb93edf6bceaed860
--- /dev/null
+++ b/roles/slurm-from-source/templates/nhclog.j2
@@ -0,0 +1,9 @@
+/var/log/nhc.log
+/root/nhc.log
+{
+   missingok
+   notifempty
+   weekly
+   size 200k
+}
+
diff --git a/roles/slurm-from-source/templates/slurm.conf.j2 b/roles/slurm-from-source/templates/slurm.conf.j2
index e45f1318d036f2bdab9a5c588907f2d1fc35077b..20cb0cd8b5dea984ee6ef1904169cbedfd4fb833 100644
--- a/roles/slurm-from-source/templates/slurm.conf.j2
+++ b/roles/slurm-from-source/templates/slurm.conf.j2
@@ -24,14 +24,14 @@ AuthType=auth/munge
 StateSaveLocation={{ slurmstatedir }}
 SlurmdSpoolDir={{ slurmdatadir }}
 SwitchType=switch/none
-MpiDefault=none
+MpiDefault=pmi2
 SlurmctldPidFile={{ slurmpiddir }}/slurmctld.pid
 SlurmdPidFile={{ slurmpiddir }}/slurmd.pid
-ProctrackType=proctrack/pgid
+ProctrackType=proctrack/linuxproc
 #PluginDir=
 CacheGroups=0
 #FirstJobId=
-ReturnToService=0
+ReturnToService=1
 #MaxJobCount=
 #PlugStackConfig=
 #PropagatePrioProcess=
@@ -43,19 +43,25 @@ ReturnToService=0
 #SrunEpilog=
 #TaskProlog=
 #TaskEpilog=
-#TaskPlugin=
+TaskPlugin=task/cgroup
+#TaskPlugin=task/affinity
+#TaskPlugin=task/affinity,task/cgroup
+#JobSubmitPlugins=lua
+OverTimeLimit=1
+CompleteWait=10
+
 #TrackWCKey=no
 #TreeWidth=50
 #TmpFS=
 #UsePAM=
 #
 # TIMERS
-SlurmctldTimeout=300
-SlurmdTimeout=300
-InactiveLimit=0
-MinJobAge=300
-KillWait=30
-Waittime=0
+#SlurmctldTimeout=300
+#SlurmdTimeout=300
+#InactiveLimit=0
+#MinJobAge=300
+KillWait=10
+#Waittime=0
 #
 # SCHEDULING
 SchedulerType={{ slurmschedulertype }}
@@ -65,32 +71,34 @@ SchedulerType={{ slurmschedulertype }}
 SelectType={{ slurmselecttype }}
 FastSchedule={{ slurmfastschedule }}
 #PriorityType=priority/multifactor
-#PriorityDecayHalfLife=14-0
+#PriorityFlags=Ticket_Based
+#PriorityCalcPeriod=5
+#PriorityDecayHalfLife=0
 #PriorityUsageResetPeriod=14-0
-#PriorityWeightFairshare=100000
-#PriorityWeightAge=1000
+##PriorityWeightFairshare=10000
+#PriorityWeightAge=10000
 #PriorityWeightPartition=10000
-#PriorityWeightJobSize=1000
-#PriorityMaxAge=1-0
+#PriorityWeightJobSize=10000
+#PriorityMaxAge=14-0
 #
 # LOGGING
 {% if slurmctlddebug %}
 SlurmctldDebug={{ slurmctlddebug.level }}
-SlurmctldLogFile={{ slurm_dir }}{{ slurmctlddebug.log }}
+SlurmctldLogFile={{ slurmctlddebug.log }}
 {% else %}
 #SlurmctldDebug=
 #SlurmctldLogFile=
 {% endif %}
 {% if slurmddebug %}
 SlurmdDebug={{ slurmddebug.level }}
-SlurmdLogFile={{ slurm_dir }}{{ slurmddebug.log }}
+SlurmdLogFile={{ slurmddebug.log }}
 {% else %}
 #SlurmdDebug=
 #SlurmdLogFile=
 {% endif %}
 {% if slurmschedlog %}
 SlurmSchedlogLevel={{ slurmschedlog.level }}
-SlurmSchedLogFile={{ slurm_dir }}{{ slurmschedlog.log }}
+SlurmSchedLogFile={{ slurmschedlog.log }}
 {% else %}
 #SlurmSchedlogLevel=
 #SlurmSchedLogFile=
@@ -115,7 +123,7 @@ AccountingStorageHost={{ slurmctrl }}
 #AccountingStorageUser=
 #
 #GRES
-#GresTypes=gpu
+GresTypes=gpu
 
 # Fair share
 {% if slurmfairshare.def %}
@@ -133,9 +141,9 @@ MpiParams=ports=12000-12999
 {% endfor %}
 {% endfor %}
 {% for node in nodelist|unique %}
-NodeName={{ node }} Procs={{ hostvars[node]['ansible_processor_vcpus'] }} 
+NodeName={{ node }} Procs={{ hostvars[node]['ansible_processor_vcpus'] }} RealMemory={{ hostvars[node].ansible_memory_mb.real.total }} Sockets={{ hostvars[node]['ansible_processor_vcpus'] }} CoresPerSocket=1 ThreadsPerCore={{ hostvars[node].ansible_processor_threads_per_core }} {% if hostvars[node].ansible_hostname.find('vis') != -1 %}Gres=gpu{% if hostvars[node].ansible_hostname.find('k1') > 0 %}:k1m{% endif %}{% if hostvars[node].ansible_hostname.find('k2') > 0 %}:k2m{% endif %}:1{% endif %} {% if hostvars[node]['ansible_processor_vcpus'] == 1 %}Weight=1{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 1 and hostvars[node]['ansible_processor_vcpus'] <= 16 %}Weight=3{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 16 and hostvars[node]['ansible_processor_vcpus'] <= 20 %}Weight=5{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 20 and hostvars[node]['ansible_processor_vcpus'] <= 40 %}Weight=7{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 40 and hostvars[node]['ansible_processor_vcpus'] <= 64 %}Weight=8{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 64 and hostvars[node]['ansible_processor_vcpus'] <= 128 %}Weight=9{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 128 %}Weight=10{% endif %} Feature=stage1 State=UNKNOWN  
 {% endfor %}
 
 {% for queue in slurmqueues %}
-PartitionName={{ queue.name }} {% if queue.default %}Default=yes{% endif %} Nodes={{ groups[queue.group]|join(',') }}
+PartitionName={{ queue.name }} {% if queue.default %}Default=yes{% endif %} Nodes={{ groups[queue.group]|join(',') }} DefMemPerCPU=1024 DefaultTime=24:00:00 State=UP 
 {% endfor %}
diff --git a/roles/slurm-from-source/templates/slurm_setup.sh.j2 b/roles/slurm-from-source/templates/slurm_setup.sh.j2
index e999952f38698d24b8039fe3bc1715bbc5a976f7..5ef2bf7313341eb20b4bf984bf04e3f09e7114ca 100644
--- a/roles/slurm-from-source/templates/slurm_setup.sh.j2
+++ b/roles/slurm-from-source/templates/slurm_setup.sh.j2
@@ -1,5 +1,6 @@
 
-export PATH={{ munge_dir }}/bin:{{ slurm_dir }}/bin:{{ slurm_dir }}/sbin:$PATH
+export PATH={{ munge_dir }}/bin:{{ slurm_dir }}/bin:{{ slurm_dir }}/sbin:{{ nhc_dir }}/sbin:$PATH
 
 export LD_LIBRARY_PATH={{ munge_dir }}/lib:{{ slurm_dir }}/lib:{{ slurm_dir }}/lib/slurm:$LD_LIBRARY_PATH
 
+export SLURM_SERVER_HOME={{ slurm_dir }}
diff --git a/roles/slurm-provision/tasks/main.yml b/roles/slurm-provision/tasks/main.yml
deleted file mode 100644
index 4ed1e2c67327e88758d27869cb5e8ed9152aa1fe..0000000000000000000000000000000000000000
--- a/roles/slurm-provision/tasks/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: copy slurm provision template 
-  template: src=slurm_provision.sh.j2 dest={{ slurm_provision }} mode=755 owner=root
-  sudo: true
-
-- name: slurm cron job 
-  cron: name=slurm-provision job={{ slurm_provision }} user=root minute=*/5 state=present
-  sudo: true