diff --git a/roles/config_repos/files/epel.repo b/roles/config_repos/files/epel.repo
new file mode 100644
index 0000000000000000000000000000000000000000..053ed43c62542e860cdb16660bdab1918b90fd7d
--- /dev/null
+++ b/roles/config_repos/files/epel.repo
@@ -0,0 +1,8 @@
+# Place this file in your /etc/yum.repos.d/ directory
+
+[epel]
+name=Extra Packages for Enterprise Linux 7 - $basearch
+baseurl=https://consistency0/epel/$releasever/$basearch/
+enabled=0
+gpgcheck=0
+sslverify=false
diff --git a/roles/config_repos/tasks/main.yml b/roles/config_repos/tasks/main.yml
index fe8f56f6c46f9f755625d3ca32fc0f855d6ce2a8..f397640fc77541b7daf56e35a499bef69e561ba8 100644
--- a/roles/config_repos/tasks/main.yml
+++ b/roles/config_repos/tasks/main.yml
@@ -35,12 +35,15 @@
   with_items:
   - monashhpc_base.repo
   - monashhpc_others.repo
+  - epel.repo
 
 - name: get enabled repos
 #shell: yum repolist | grep -v "repo id" | grep -v "Loaded plugins" | head -n -1 | cut -f 1 -d '/' | sed -s 's/\!//'
   shell: yum repolist all | grep enabled | cut -f 1 -d '/' | sed -s 's/\!//'
   register: repolist
   check_mode: no
+  args:
+    warn: False
 
 - name: disable unwanted repos
   shell: yum-config-manager --disable "{{ item }}"
diff --git a/roles/deploy-xorg/tasks/main.yml b/roles/deploy-xorg/tasks/main.yml
index 32afb6d4c07a79ebc436c4559fa1d3fcf87cf581..c1e2d8af8b3c3ff82b848ec5a1b182251788208c 100644
--- a/roles/deploy-xorg/tasks/main.yml
+++ b/roles/deploy-xorg/tasks/main.yml
@@ -9,8 +9,15 @@
     nvidiacardslist: "{{ nvidiacards.stdout | from_json }}"
 
 - name: generate nvidia-xorg-conf
-  sudo: true
+  become: yes
+  become_user: root
   template:
     src: xorg.conf.j2
     dest: "{{ item['filename'] }}"
   with_items: "{{ nvidiacardslist }}"
+
+- name: copy xorg.conf.10 xorg.conf for m3f nodes
+  become: yes
+  become_user: root
+  command: cp /etc/X11/xorg.conf.10 /etc/X11/xorg.conf
+  when: "'m3f' in ansible_hostname"
diff --git a/roles/deploy-xorg/templates/xorg.conf.j2 b/roles/deploy-xorg/templates/xorg.conf.j2
index 2fc5f043c03e710be306d3b740cb6f5963216860..7f8b0f82076f6cfa81b34c4dd00a0d460cbe81a6 100644
--- a/roles/deploy-xorg/templates/xorg.conf.j2
+++ b/roles/deploy-xorg/templates/xorg.conf.j2
@@ -66,8 +66,10 @@ Section "Screen"
     Device         "Device{{item.screens.index(screen)}}"
     Monitor        "Monitor{{item.screens.index(screen)}}"
     DefaultDepth    24
-    Option         "UseDisplayDevice" "None"
     Option         "ProbeAllGpus" "false"
+{% if item.boardname == 'GRID K1' %}
+    Option         "UseDisplayDevice" "None"
+{% endif %}
     SubSection     "Display"
         Virtual     1920 1200
         Depth       24
diff --git a/roles/ec2-user/tasks/main.yml b/roles/ec2-user/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0c147a5ccde699030781a99d9f341be54ce1133a
--- /dev/null
+++ b/roles/ec2-user/tasks/main.yml
@@ -0,0 +1,7 @@
+- name: Add ec2-user to systems group locally
+  user:
+    name: ec2-user
+    groups: systems
+    append: yes
+  become: true
+  become_user: root
diff --git a/roles/gluster_server/tasks/main.yml b/roles/gluster_server/tasks/main.yml
index b0193747ac3071fa6ae357287c33d73052dc1148..c137dde113f5bdd38ea7eaa99896303c7c6f5d6f 100644
--- a/roles/gluster_server/tasks/main.yml
+++ b/roles/gluster_server/tasks/main.yml
@@ -6,37 +6,47 @@
   with_items:
   - glusterfs
   - glusterfs-server
-
-  sudo: true
+  become: true
+  become_user: root
 
 - name: install gluster
   apt: name=glusterfs-server state='latest'
   when: ansible_os_family == 'Debian'
-  sudo: true
+  become: true
+  become_user: root
 
 - name: start daemon
   service: name=glusterd enabled=yes state=started
-  sudo: true
+  become: true
+  become_user: root
   when: ansible_os_family == 'RedHat'
 
 - name: start daemon
   service: name=glusterfs-server enabled=yes state=started
-  sudo: true
+  become: true
+  become_user: root
   when: ansible_os_family == 'Debian'
 
 - name: make brick dir
   file: state=directory path="{{ brickmnt }}/brick"
-  sudo: true
+  become: true
+  become_user: root
 
 - name: set quorum ratio
   command: "gluster volume set all cluster.server-quorum-ratio 51%"
-  sudo: true
+  become: true
+  become_user: root
+  ignore_errors: yes
 
 - name: set quorum type
   command: "gluster volume set all cluster.server-quorum-type server"
-  sudo: true
+  become: true
+  become_user: root
+  ignore_errors: true
 
 - name: set quorum type
   command: "gluster volume set all cluster.quorum-type auto"
-  sudo: true
+  become: true
+  become_user: root
+  ignore_errors: true
 
diff --git a/roles/gpu/files/scripts/nvidia-xconf-gen.py b/roles/gpu/files/scripts/nvidia-xconf-gen.py
index 7cd9cb551f348d608b583466322b1acc137e9b8d..6993d3339bd57d42fb2860dc3a7ac87a79c9e71b 100755
--- a/roles/gpu/files/scripts/nvidia-xconf-gen.py
+++ b/roles/gpu/files/scripts/nvidia-xconf-gen.py
@@ -29,8 +29,18 @@ def grab_card_ids():
     cards = []
 
     for line in p.stdout.readlines():
-        line = line.rstrip().split(":")[2]
-        pcibus_num = int(re.sub('[.:]', '', line).rstrip("0"),16)
+	stripped_line = line.rstrip().split(":")[2]
+        #check for different format of pcibus_id. This happens on baremetals
+        # i.e. 00000000:06:00.0 not 00000000:00:06.0
+        pcibus_id = re.sub('[.:]', '', stripped_line).rstrip("0")
+        if not pcibus_id: # empty string, try the other way
+                 stripped_line = line.rstrip().split(":")[1]
+                 pcibus_id = re.sub('[.:]', '', stripped_line).rstrip("0")
+                 if not pcibus_id:
+                        print("Error in grab_card_ids: we can not parse the line {}".format(line))
+                        print("Command that generated it is: {}".format(cmd))
+                        system.exit(1)
+        pcibus_num = int(pcibus_id,16)
         card = "PCI:0:{}:0".format(str(pcibus_num))
         cards.append(card)
     return cards
diff --git a/roles/gpu/tasks/main.yml b/roles/gpu/tasks/main.yml
index fc72958177e1f349dd8748e494c0762c5f47e00b..bbaf99596489f7e1b62634f99a41c4386957e79e 100644
--- a/roles/gpu/tasks/main.yml
+++ b/roles/gpu/tasks/main.yml
@@ -13,6 +13,7 @@
     - libX11-common
     - libX11-devel
     - libX11
+    - libglvnd-devel
     - xorg-x11-server-common
     - xorg-x11-util-macros
     - xorg-x11-server-utils
@@ -29,6 +30,7 @@
   yum: name="@Development Tools" state=installed
   become: true
   become_user: root
+  ignore_errors: yes
 
 - name: disable nouveau
   template: src=blacklist-nouveau.conf.j2 dest=/etc/modprobe.d/blacklist-nouveau.conf
diff --git a/roles/gpu/templates/xorg.conf.j2 b/roles/gpu/templates/xorg.conf.j2
index 2fc5f043c03e710be306d3b740cb6f5963216860..7f8b0f82076f6cfa81b34c4dd00a0d460cbe81a6 100644
--- a/roles/gpu/templates/xorg.conf.j2
+++ b/roles/gpu/templates/xorg.conf.j2
@@ -66,8 +66,10 @@ Section "Screen"
     Device         "Device{{item.screens.index(screen)}}"
     Monitor        "Monitor{{item.screens.index(screen)}}"
     DefaultDepth    24
-    Option         "UseDisplayDevice" "None"
     Option         "ProbeAllGpus" "false"
+{% if item.boardname == 'GRID K1' %}
+    Option         "UseDisplayDevice" "None"
+{% endif %}
     SubSection     "Display"
         Virtual     1920 1200
         Depth       24
diff --git a/roles/ldapclient/tasks/installOpenLdap.yml b/roles/ldapclient/tasks/installOpenLdap.yml
index b11a480d754812d7c4f7139611502dcb1219fcf4..f0db145ecbf8f310695cdcaebb672d10baacf9d9 100644
--- a/roles/ldapclient/tasks/installOpenLdap.yml
+++ b/roles/ldapclient/tasks/installOpenLdap.yml
@@ -1,14 +1,8 @@
 ---
 - name: "Install open ldap package yum"
-  yum: name={{ item }} state=present
-  with_items:
-    - openldap
-    - openldap-clients
-    - sssd
-    - sssd-common
-    - sssd-client
-    - nss
-    - nss-tools
+  yum: 
+    name: ['openldap', 'openldap-clients', 'sssd', 'sssd-common', 'sssd-client', 'nss', 'nss-tools']
+    state: present
   sudo: true
   when: ansible_os_family == 'RedHat'
 
diff --git a/roles/ldapclient/templates/system-auth.j2 b/roles/ldapclient/templates/system-auth.j2
index 2f9036e24bd3d33b4626af796c1427a168ad3de3..7b5f3e590ee6cf34803ecb5684c446a4d4c8253f 100644
--- a/roles/ldapclient/templates/system-auth.j2
+++ b/roles/ldapclient/templates/system-auth.j2
@@ -1,19 +1,22 @@
 #%PAM-1.0
 # This file is auto-generated.
 # User changes will be destroyed the next time authconfig is run.
+
 auth        required      pam_env.so
+auth        required      pam_faildelay.so delay=2000000
 auth        sufficient    pam_unix.so nullok try_first_pass
 auth        requisite     pam_succeed_if.so uid >= 500 quiet
 auth        sufficient    pam_sss.so use_first_pass
 auth        required      pam_deny.so
 
 account     required      pam_unix.so broken_shadow
+account     sufficient    pam_localuser.so
 account     sufficient    pam_succeed_if.so uid < 500 quiet
 account     [default=bad success=ok user_unknown=ignore] pam_sss.so
 account     required      pam_permit.so
 
-password    requisite     pam_cracklib.so try_first_pass retry=3
-password    sufficient    pam_unix.so md5 shadow nullok try_first_pass use_authtok
+password    requisite     pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type=
+password    sufficient    pam_unix.so sha512 shadow nullok try_first_pass use_authtok
 password    sufficient    pam_sss.so use_authtok
 password    required      pam_deny.so
 
@@ -21,4 +24,4 @@ session     optional      pam_keyinit.so revoke
 session     required      pam_limits.so
 session     [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
 session     required      pam_unix.so
-session     optional      pam_sss.so
+session     optional      pam_sss.so
\ No newline at end of file
diff --git a/roles/mellanox_drivers/tasks/main.yml b/roles/mellanox_drivers/tasks/main.yml
index 146f1a70433c56ad5806f3cf2c39090064ca6b1b..756c3b1aff462dcc22238fe7d05c6540cf2828cf 100644
--- a/roles/mellanox_drivers/tasks/main.yml
+++ b/roles/mellanox_drivers/tasks/main.yml
@@ -65,7 +65,7 @@
   when: install_now
 
 - name: install drivers
-  shell: ./mlnxofedinstall -q --add-kernel-support --force
+  shell: ./mlnxofedinstall -q --add-kernel-support --force --skip-repo
   args:
     #more changes
     chdir: "/tmp/{{ MELLANOX_DRIVER_SRC }}"
@@ -87,9 +87,9 @@
   register: reload_service
 
 - name: enable roce_mode setting
-  service: name=roce_mode state=started enabled=True
+  service: name=roce_mode state=started enabled=yes
   become: true
-
+  ignore_errors: yes
 #
 # A REBOOT IS NEEDED AFTER SUCCESSFUL INSTALL
 #
diff --git a/roles/mellanox_drivers/vars/mellanoxVars.yml b/roles/mellanox_drivers/vars/mellanoxVars.yml
index 98f1c359baa2b76427a9cb7fead2e64473558cb5..6aa643d58ac88337df835db8fd81dd6e3b84fb17 100644
--- a/roles/mellanox_drivers/vars/mellanoxVars.yml
+++ b/roles/mellanox_drivers/vars/mellanoxVars.yml
@@ -1,7 +1,4 @@
 ---
  #note. do not add '.tgz' to driver src. done in playbook
- #MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-3.1-1.0.3-rhel7.1-x86_64-ext{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
- #MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-3.1-1.0.3-rhel7.2-x86_64-ext{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
- #MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-3.4-1.0.0.0-rhel7.2-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
-# MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-4.2-1.2.0.0-rhel7.4-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
-MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-4.4-1.0.0.0-rhel7.4-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
+#MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-4.4-1.0.0.0-rhel7.4-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
+MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-4.5-1.0.1.0-rhel7.6-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
diff --git a/roles/nat_server/templates/iptables.j2 b/roles/nat_server/templates/iptables.j2
index 07def7ca120b86a9fc573c9f7ac3bcc249889df8..c311a4463467e00c82cf77dbe0e93263b3fa6e4a 100644
--- a/roles/nat_server/templates/iptables.j2
+++ b/roles/nat_server/templates/iptables.j2
@@ -22,6 +22,7 @@ COMMIT
 :INPUT ACCEPT [0:0]
 :FORWARD ACCEPT [0:0]
 :OUTPUT ACCEPT [0:0]
+-A INPUT -s {{ PRIVATE_NETWORK_CIDR }} -j ACCEPT
 -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
 -A INPUT -p icmp -j ACCEPT
 -A INPUT -i lo -j ACCEPT
diff --git a/roles/nhc/README.md b/roles/nhc/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..37b6ea18e0ea78a96142b3efb6c2c2dfaea7d9cd
--- /dev/null
+++ b/roles/nhc/README.md
@@ -0,0 +1,20 @@
+Creates Node Health Check on a compute node
+see ://github.com/mej/nhc  
+
+It needs some variable to be defined before calling
+
+i.e.
+nhc_version: 1.4.2
+nhc_dir: /opt/nhc-{{ nhc_version }}
+nhc_config_file: nhc.conf
+nhc_log_level: 0
+nhc_emails: nobody@nowhere.nowhere
+nhc_email_subject: "Node Health Check" 
+
+In addition the NAT check needs two variables to be defined to be enabled  (See example)
+- ADD_NHC_NAT_CHECK has to be defined to (any) value
+- NHC_NAT_CHK_EXTERNAL_IP_ADDRESS is the address to be pinged
+
+Example:
+  roles:
+  - { role: nhc, NHC_NAT_CHK_EXTERNAL_IP_ADDRESS: "118.138.246.208", ADD_NHC_NAT_CHECK: 1  }
diff --git a/roles/nhc/tasks/main.yml b/roles/nhc/tasks/main.yml
index bac75d5034f2dd26aea8f953437f6bee9cd454fe..6b0aeb736af495424674e093b4111882614e45f2 100644
--- a/roles/nhc/tasks/main.yml
+++ b/roles/nhc/tasks/main.yml
@@ -60,3 +60,14 @@
   become: true
   become_user: root
 
+#test NAT is working by pinging an external address, i.e. a license server, or by default Google Public DNS
+- set_fact:
+    EXTERNAL_IP_ADDRESS: "8.8.8.8" 
+  when: EXTERNAL_IP_ADDRESS is undefined
+
+- name: install nhc NAT check
+  template: dest="{{ nhc_dir }}/etc/nhc/scripts/check_nat.nhc" src=check_nat.nhc.j2
+  become: true
+  become_user: root
+  when: ADD_NHC_NAT_CHECK is defined
+
diff --git a/roles/nhc/templates/check_nat.nhc.j2 b/roles/nhc/templates/check_nat.nhc.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ab56e57e890ac1b64ea994f63cd372925f0e7f53
--- /dev/null
+++ b/roles/nhc/templates/check_nat.nhc.j2
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+#checks that NAT is working by pinging an external address from a compute node
+
+function check_nat() {
+#echo ">>> Checking NAT on Compute Nodes Works >>>>>>>>>>>>>>>>>>>>>>>>>"
+
+
+#test NAT by pinging an external IP ADDRESS
+PING_EXTERNAL="ping -c 2 {{ NHC_NAT_CHK_EXTERNAL_IP_ADDRESS }}"
+ 
+$PING_EXTERNAL
+RESULT=$?
+if [ $RESULT -ne 0 ]; then
+     die 1 " $FUNCNAME ERROR on node. Can not ping external address. Command is ' ${PING_EXTERNAL} ' Please check NAT is working, or route on node is valid"
+     return 1
+fi
+return 0
+}
+
+
diff --git a/roles/p100_firmware/README.md b/roles/p100_firmware/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..bb5b0479846737672576214a275b578870ba50c6
--- /dev/null
+++ b/roles/p100_firmware/README.md
@@ -0,0 +1,5 @@
+Role to upgrade firwmare on P100 nodes
+
+Usage
+ - { role: p100_firmware, BINARY_NAME: "P100_PCN204260.bin" , tags: [p100]  }
+
diff --git a/roles/p100_firmware/tasks/main.yml b/roles/p100_firmware/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..44580f195db1bab74e0bf39dabd725c233c5d8c0
--- /dev/null
+++ b/roles/p100_firmware/tasks/main.yml
@@ -0,0 +1,69 @@
+---
+- name: "stop nvidia persistence daemon"
+  service: name=nvidia-persistenced  state=stopped
+  become: true
+  become_user: root
+  ignore_errors: true
+- name: stop collectd
+  service: name=collectd state=stopped
+  become: true
+  become_user: root
+  ignore_errors: true
+- name: stop create-dev-uvm 
+  service: name=collectd state=stopped
+  become: true
+  become_user: root
+  ignore_errors: true
+- name: remove nvidia_drm from kernel
+  modprobe:
+    name: nvidia_drm
+    state: absent
+  become: true
+  become_user: root
+- name: remove nvidia_modeset from kernel
+  modprobe:
+    name: nvidia_modeset
+    state: absent
+  become: true
+  become_user: root
+- name: remove nvidia from kernel
+  modprobe:
+    name: nvidia
+    state: absent
+  become: true
+  become_user: root
+- name: check for nvidia modules
+  shell: /usr/sbin/lsmod | grep nvidia
+  ignore_errors: true
+#- meta: end_play
+- name: download Firwmare 
+  get_url:
+    url: "http://consistency0/src/{{ BINARY_NAME }}"
+    dest: "/tmp/{{ BINARY_NAME }}"
+    mode: "0755"
+- name: Run the binary command
+  command: "/tmp/{{ BINARY_NAME }}"
+  #command: "ls -l /tmp/{{ BINARY_NAME }}"
+  become: true
+  become_user: root 
+  register: upgrade_out
+- name: stdout of upgrade is
+  debug: var=upgrade_out.stdout
+- name: stderr of upgrade is
+  debug: var=upgrade_out.stderr
+- name: enable persistenced on boot
+  service: name=nvidia-persistenced state=started enabled=yes
+  become: true
+  become_user: root
+- name: start collectd
+  service: name=collectd state=started
+  become: true
+  become_user: root
+  ignore_errors: true
+- name: start create-dev-uvm 
+  service : name=create-dev-uvm state=started
+  become: true
+  become_user: root
+  ignore_errors: true
+- name: DON'T FORGET TO REBOOT
+  debug: msg="And I really mean it."
diff --git a/roles/pam_slurm/templates/sshd.j2 b/roles/pam_slurm/templates/sshd.j2
index a1218458728bb47fea1d4f73194191a516cb6214..fea4fda0e8db16351917037f681ae82a4795d5df 100644
--- a/roles/pam_slurm/templates/sshd.j2
+++ b/roles/pam_slurm/templates/sshd.j2
@@ -6,7 +6,7 @@ auth       include      postlogin
 -auth      optional     pam_reauthorize.so prepare
 account    required     pam_nologin.so
 account    include      password-auth
-account    sufficient   pam_slurm.so
+account    sufficient   pam_slurm_adopt.so
 account    required     pam_access.so
 password   include      password-auth
 # pam_selinux.so close should be the first session rule
diff --git a/roles/rsyslog_client/tasks/main.yml b/roles/rsyslog_client/tasks/main.yml
index 2aec4a9c6688a96994edb563b14c15b017cd599e..9b087381192f7818bd9a61467dea29614dab0ac7 100644
--- a/roles/rsyslog_client/tasks/main.yml
+++ b/roles/rsyslog_client/tasks/main.yml
@@ -22,4 +22,4 @@
   service: name=rsyslog state=restarted
   become: true
   become_user: root
-  when: config_changed | changed
+  when: config_changed is changed
diff --git a/roles/set_semaphore_count/README.md b/roles/set_semaphore_count/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2dc03ea31a574196932a811e5a27dc0509236c81
--- /dev/null
+++ b/roles/set_semaphore_count/README.md
@@ -0,0 +1,12 @@
+Some programs, i.e. GAMESS, needs a larger number of system semaphores than  provided by default.
+This role creates a config  file in /etc/sysctl.d/  that sets the value on startup, so that it is persistent after reboot
+
+It also runs a Shell command so change has immediate effect: This is the default value for the role
+echo 500 256000 64 10240 > /proc/sys/kernel/sem
+
+We use a variable SEM_COUNT so users can override the default setting.
+
+use
+- { role: set_semaphore_count } #to use default value hardcoded inside main.yml, (as above)
+or
+- { role: set_semaphore_count, SEM_COUNT: "200 252000 24 20240" } #to use some other value (the ones here are nonsense for example only)
diff --git a/roles/set_semaphore_count/tasks/main.yml b/roles/set_semaphore_count/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..01203f6d9fbbfb0de5d564bb045e10430e9a0a07
--- /dev/null
+++ b/roles/set_semaphore_count/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+- name: set the value of the Semaphores
+  set_fact:
+       SEM_COUNT: "500 256000 64 10240"
+  when: SEM_COUNT is not defined
+- name: test value
+  debug: msg="Value of semaphores is {{ SEM_COUNT }} "  #"
+- name: Place comment line in file
+  lineinfile: 
+     path: /etc/sysctl.d/88-setSemaphore.conf
+     line: "#set large semaphore count. Needed for Gamess."
+     state: present
+     create: yes
+     owner: root
+     group: root
+     mode: "u+rwx,o=rx,g=rx"
+  become: true
+  become_user: root
+     
+- name: Place comment line in file
+  lineinfile: 
+     path: /etc/sysctl.d/88-setSemaphore.conf
+     line: "#https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-setting_semaphores-setting_semaphore_parameters "
+     state: present
+  become: true
+  become_user: root
+
+- name: Place comment line in file
+  lineinfile: 
+     path: /etc/sysctl.d/88-setSemaphore.conf
+     line: "kernel.sem={{ SEM_COUNT }}"
+     state: present
+     create: yes
+     owner: root
+     group: root
+     mode: "u+rwx,o=rx,g=rx"
+  become: true
+  become_user: root
+
+- name: set semaphore count now 
+  shell: "/usr/bin/echo {{ SEM_COUNT }}   > /proc/sys/kernel/sem"
+  become: true
+  become_user: root
+
diff --git a/roles/slurm-common/defaults/main.yml b/roles/slurm-common/defaults/main.yml
index 362d9cf4ae92e58c49950d6a09dcaaf9df92cba6..283c06273b4c10af672e815f0fe0062f986814fd 100644
--- a/roles/slurm-common/defaults/main.yml
+++ b/roles/slurm-common/defaults/main.yml
@@ -1,9 +1,8 @@
 ---
 slurm_use_vpn: False 
-slurmctlddebug: {level: 9, log: '/var/log/slurm/slurmctld.log'}
-slurmddebug: {level: 9, log: '/var/log/slurm/slurmd.log'}
-slurmschedlog: {level: 9, log: '/var/log/slurm/slurmsched.log'}
-slurmdbdlog: {level: 9, log: '/var/log/slurm/slurmdbd.log'}
+slurmddebug: {level: 5, log: '/var/log/slurm/slurmd.log'}
+slurmctlddebug: {level: 5, log: '/mnt/slurm-logs/slurmctld.log'}
+slurmdbdlog: {level: 5, log: '/mnt/slurm-logs/slurmdbd.log'}
 slurmfairshare: {def: false, val: 10000}
 slurmdatadir: "/var/spool/slurm"
 slurmselecttype: "select/linear"
diff --git a/roles/slurm-common/tasks/createSlurmDirectories.yml b/roles/slurm-common/tasks/createSlurmDirectories.yml
index f4847d42b773d936d994da71401c3e3568dbf4c0..295aeadf3e1655c74e778d0b1263d5bc446757bb 100644
--- a/roles/slurm-common/tasks/createSlurmDirectories.yml
+++ b/roles/slurm-common/tasks/createSlurmDirectories.yml
@@ -1,4 +1,8 @@
 ---
+- name: make sure slurmctld and slurmdb log dir exists
+  file: dest=/mnt/slurm-logs state=directory owner=root group=root mode=755
+  sudo: true
+
 - name: make sure slurm conf dir exists
   file: dest={{ slurm_dir }}/etc state=directory
   sudo: true
diff --git a/roles/slurm-common/tasks/installCgroup.yml b/roles/slurm-common/tasks/installCgroup.yml
index 9b21e1b4d7fba85c0b47e8ce12663faedd8b19f4..c7f4253d3dfcb0540421c27249d7aee0a4920118 100644
--- a/roles/slurm-common/tasks/installCgroup.yml
+++ b/roles/slurm-common/tasks/installCgroup.yml
@@ -2,7 +2,8 @@
   yum: name={{ item }} state=installed
   with_items:
     - libcgroup
-  sudo: true
+  become: True
+  become_method: sudo
   when: ansible_os_family == "RedHat"
 
 - name: apt install cgroup 
@@ -11,14 +12,16 @@
     - cgmanager
     - cgmanager-utils
     - libcgmanager0 
-  sudo: true
   when: ansible_os_family == "Debian"    
-  sudo: true
+  become: True
+  become_method: sudo
 
 - name: config cgroup.conf file
   template: dest={{ slurm_dir }}/etc/cgroup.conf src=cgroup.conf.j2 mode=644
-  sudo: true
+  become: True
+  become_method: sudo
 
 - name: config cgroup_allowed_devices.conf file
   template: dest={{ slurm_dir }}/etc/cgroup_allowed_devices.conf src=cgroup_allowed_devices.conf.j2 mode=644
-  sudo: true
+  become: True
+  become_method: sudo
diff --git a/roles/slurm-common/tasks/installSlurmFromSource.yml b/roles/slurm-common/tasks/installSlurmFromSource.yml
index 167994b2dc000568ee739480d28a07679f86685c..c1c7794f6d26dc78413edf578e0007a83d2ae116 100644
--- a/roles/slurm-common/tasks/installSlurmFromSource.yml
+++ b/roles/slurm-common/tasks/installSlurmFromSource.yml
@@ -8,23 +8,23 @@
   sudo: true 
   when: force_slurm_recompile is defined
 
-
 - name: unarchive slurm
   unarchive:
-  args:
     src: "http://consistency0/src/slurm-{{ slurm_version }}.tar.bz2"
-    copy: no
     dest: /tmp
+    remote_src: yes
     creates: "{{ slurm_dir }}/bin/srun"
 
 - name: stat srun
   stat: path="{{ slurm_dir }}/bin/srun"
   register: stat_srun
 
-
+- name: stat pam_slurm_adopt
+  stat: path="/lib64/security/pam_slurm_adopt.so"
+  register: stat_pam_slurm_adopt
 
 - name: configure slurm
-  command: /tmp/slurm-{{ slurm_version }}/configure --prefix={{ slurm_dir }} --with-munge={{ munge_dir }} --enable-pam 
+  command: /tmp/slurm-{{ slurm_version }}/configure --prefix={{ slurm_dir }} --with-munge={{ munge_dir }} --enable-pam --with-pmix=/usr/local/pmix/latest
   args:
     creates: "{{ slurm_dir }}/bin/srun"
     chdir: /tmp/slurm-{{ slurm_version }}
@@ -45,6 +45,32 @@
     creates: "{{ slurm_dir }}/bin/srun"
   when: force_slurm_recompile is defined or not stat_srun.stat.exists
 
+- name: build pmi
+  command: make
+  args:
+    chdir: /tmp/slurm-{{ slurm_version }}/contribs/pmi
+  when: force_slurm_recompile is defined or not stat_srun.stat.exists
+
+- name: install pmi
+  shell: make install
+  sudo: true
+  args:
+    chdir: /tmp/slurm-{{ slurm_version }}/contribs/pmi
+  when: force_slurm_recompile is defined or not stat_srun.stat.exists
+
+- name: build pmi2
+  command: make
+  args:
+    chdir: /tmp/slurm-{{ slurm_version }}/contribs/pmi2
+  when: force_slurm_recompile is defined or not stat_srun.stat.exists
+
+- name: install pmi2
+  shell: make install
+  sudo: true
+  args:
+    chdir: /tmp/slurm-{{ slurm_version }}/contribs/pmi2
+  when: force_slurm_recompile is defined or not stat_srun.stat.exists
+
 - name: build pam_slurm
   command: make
   args:
@@ -58,6 +84,28 @@
     chdir: /tmp/slurm-{{ slurm_version }}/contribs/pam
   when: force_slurm_recompile is defined or not stat_srun.stat.exists
 
+- name: build pam_slurm_adopt
+  make:
+    chdir: /tmp/slurm-{{ slurm_version }}/contribs/pam_slurm_adopt
+  when: force_slurm_recompile is defined or not stat_pam_slurm_adopt.stat.exists
+
+- name: install pam_slurm_adopt
+  make:
+    chdir: /tmp/slurm-{{ slurm_version }}/contribs/pam_slurm_adopt
+    target: install
+  when: force_slurm_recompile is defined or not stat_pam_slurm_adopt.stat.exists
+  sudo: true
+
+- name: remove exist-slurm-latest-link
+  shell: rm -f  /opt/slurm-latest
+  sudo: true
+  when: force_slurm_recompile is defined or not stat_srun.stat.exists
+
+- name: put slurm-latest-link
+  shell: ln -s  {{ slurm_dir }}  /opt/slurm-latest
+  sudo: true
+  when: force_slurm_recompile is defined or not stat_srun.stat.exists
+
 - name: add slurm log rotate config
   template: src=slurmlog.j2 dest=/etc/logrotate.d/slurm mode=644
   sudo: true
diff --git a/roles/slurm-mysql-config/tasks/main.yml b/roles/slurm-mysql-config/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..52f06b184ac0f5487e09b633a97b2db40e712f2a
--- /dev/null
+++ b/roles/slurm-mysql-config/tasks/main.yml
@@ -0,0 +1,4 @@
+- name: "Copy slurm db tuning config"
+  template: src=slurm.cnf.j2 dest=/etc/my.cnf.d/slurm.cnf
+  become: true
+  become_user: root
diff --git a/roles/slurm-mysql-config/templates/slurm.cnf.j2 b/roles/slurm-mysql-config/templates/slurm.cnf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..56c0038550e7389916d14c7b350d1aa6c574dc32
--- /dev/null
+++ b/roles/slurm-mysql-config/templates/slurm.cnf.j2
@@ -0,0 +1,4 @@
+[mysqld]
+innodb_buffer_pool_size=1024M
+innodb_log_file_size=256M
+innodb_lock_wait_timeout=900
diff --git a/roles/slurm-trigger/README.rst b/roles/slurm-trigger/README.rst
index 04f41095ec0ae2413ac9182e93ec2c70279d2f12..61779b1815da7ec036ee5352fe029a5fb3c55fc6 100644
--- a/roles/slurm-trigger/README.rst
+++ b/roles/slurm-trigger/README.rst
@@ -14,7 +14,9 @@ USAGE:
 
 - hosts: 'ManagementNodes'
   roles:
-   - slurm_trigger  
+   - { role: slurm-trigger, slurm_dir: "/opt/slurm-18.08.6", admin_email: "hpc-alerts-warning-l@monash.edu", tags: [slurm, slurm-trigger]   }
+
+ 
 
 
 The role uses several variables that need to be defined:
diff --git a/roles/slurm-trigger/tasks/main.yml b/roles/slurm-trigger/tasks/main.yml
index ceb47c7f8ece4fddc9227ddce0315842478e5989..0e65185c9bd6e7b560db9a434199de6ba992006b 100644
--- a/roles/slurm-trigger/tasks/main.yml
+++ b/roles/slurm-trigger/tasks/main.yml
@@ -15,6 +15,7 @@
   become: true
   become_user: slurm
   run_once: true
+  ignore_errors: true
  
 - name:  template primary_slurmctld_resumed_operation
   template: dest="{{ slurm_dir }}/sbin/primary_slurmctld_resumed_operation.sh" src=primary_slurmctld_resumed_operation.sh.j2 mode="0755"
@@ -31,6 +32,7 @@
   become: true
   become_user: slurm
   run_once: true
+  ignore_errors: true
   
 - name:  template node_down
   template: dest="{{ slurm_dir }}/sbin/node_down.sh" src=node_down.sh.j2 mode="0755"
@@ -48,6 +50,7 @@
   become: true
   become_user: slurm
   run_once: true
+  ignore_errors: true
   
  
  
diff --git a/roles/slurm_config/tasks/main.yml b/roles/slurm_config/tasks/main.yml
index 8a6768ab999e1b30bade948452f4e5f5f2f9b6f0..feec10209a05172fcf9f887384233a553444b5e5 100644
--- a/roles/slurm_config/tasks/main.yml
+++ b/roles/slurm_config/tasks/main.yml
@@ -2,3 +2,7 @@
 - name: install slurm.conf
   copy: src=files/slurm.conf dest={{ slurm_dir }}/etc/slurm.conf
   sudo: true
+
+- name: install job_submit.lua
+  copy: src=files/job_submit.lua dest={{ slurm_dir }}/etc/job_submit.lua
+  sudo: true
diff --git a/roles/slurm_sql_bk/README.md b/roles/slurm_sql_bk/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ae4619d0169e4bc5c46457ac1a32cb599ec3df39
--- /dev/null
+++ b/roles/slurm_sql_bk/README.md
@@ -0,0 +1,40 @@
+This role sets up a cronjob on one  Management machine to perform a mysql dump.(via a remote mysql dump to SQL machine)
+This file is then gzipped and a 'scp' done to transfer it to the destination machine for storage.(SQL_BK_DEST_HOST)
+
+As this script is meant to be portable across clusters, some variables need to defined when called.
+
+Architecture:
+	As SQL machine may not be able to access SQL_BK_DEST_HOST, a management machine must do a dump. Then this is scp'd to SQL_BK_DEST_HOST
+	MySQL password is stored in mysql.conf in the BIN dir of Management Machine
+	Only one Management should do the dump for performance reasons
+        A cron job on SQL_BK_DEST_HOST deletes any backups > 7 days
+
+
+EXAMPLE USAGE:
+
+*ansible-playbook -i static --limit=monarch-mgmt1 slurm_bk.yml*
+
+where slurm_bk.yml contains
+
+~~~~
+- hosts: 'ManagementNodes'
+  gather_facts: True
+  roles:
+  - { role: slurm_sql_bk, create_key: "True", SQL_BK_DEST_HOST: "118.138.234.186", SQL_BK_DEST_DIR: "/mnt/backup/monarch", SQL_IP: "172.16.226.88" , sqlUsername: "slurmdb", sqlPasswd: "{{ slurmdb_passwd }}" }
+~~~~
+
+Meaning:
+* **create_key**: If defined to True then ssh-keygen is called on local machine and then:
+	* private key copied to ~/.ssh on management node
+	* public key inserted into authroized_keys on SQL_BK_DEST_HOST
+* **SQL_BK_DEST_HOST:** IP number of Machine where we store mysql dumps
+* **SQL_BK_DEST_DIR:** Directory on backup machine for mysql dumps. Note this is owned by ec2-user and is manually created. It should be cluster-specific, i.e. different for each cluster
+* **SQL_IP:** IP number of slurm mysql machine
+* **sqlUsername,sqlPasswd** Mysql username/password, same as in slurmdbd.conf
+
+Other VARIABLES:
+
+defined in default/main.yml
+* **BIN_DIR:**          "/root/bin" #where the backup shell script and mysql.conf exists on Management Node
+* **SQL_BK_DATA_DIR:**  "/tmp" # where the inital dump exists on the management node. The file is deleted after a scp so it should not fill up disk
+* **SQL_USER: "ec2-user"**  The user account on SQL_BK_DEST_HOST. Normally ec2-user but could be something else for security reasons
diff --git a/roles/slurm_sql_bk/defaults/main.yml b/roles/slurm_sql_bk/defaults/main.yml
index efd656281b7008f562b3accfcb24d3838bbde76b..bfa1d7f99e09d0c576dbf5a7737ff25c827cfefc 100644
--- a/roles/slurm_sql_bk/defaults/main.yml
+++ b/roles/slurm_sql_bk/defaults/main.yml
@@ -1,6 +1,5 @@
 ---
 # for slurm mysql backup
-SQL_BK_DIR: "/mnt/db_backup"
-SQL_BK_DEST_HOST: "m3-mgmt1"
-SQL_BK_DEST_DIR: "/mnt/home/slurm_db_backup"
-SQL_USER: "slurmsqlbk"
+MGMT_BIN_DIR: "/root/bin"
+SQL_BK_DATA_DIR: "/tmp"
+SQL_USER: "ec2-user"
diff --git a/roles/slurm_sql_bk/tasks/main.yml b/roles/slurm_sql_bk/tasks/main.yml
index 1bfd3276eaa3bf0ec48d495b97ee3b4968c4cd13..70cd526502e0da5b1410b69207e6c9d36381ee6c 100644
--- a/roles/slurm_sql_bk/tasks/main.yml
+++ b/roles/slurm_sql_bk/tasks/main.yml
@@ -1,40 +1,72 @@
 ---
-# this code is for the sql server only
-  - name: template sql backupscript to /etc/cron.daily
-    template: src="backup_mysql_for_slurm.sh.j2" dest="/etc/cron.daily/backup_mysql_for_slurm.sh"  mode="700"
-    sudo: true
-    when: server == 'True'
-  - name: Create directory {{ SQL_BK_DIR }}
-    file: path={{ SQL_BK_DIR }} state=directory
-    sudo: true
-    when: server == 'True'
+  #
+  #first generate ssh keys Gif the variable "create_key" is defined.
+  #
+  - name: delete any existing local private key
+    local_action: command rm -f  ./slm_db_backup
+    when: create_key is defined and create_key=="True"
+  - name: delete any existing local public keys
+    local_action: command rm -f ./slm_db_backup.pub
+    when: create_key is defined and create_key=="True"
+  - name: generate ssh keys if necessary
+    #this command will create a two files "slm_db_backup" and "slm_db_backup.pub"
+    local_action: command ssh-keygen -t rsa -f slm_db_backup -P ""
+    when: create_key is defined and create_key=="True"
+  - name: copy private key to management node
+    copy:
+      src: "./slm_db_backup"
+      dest: "/root/.ssh"
+      owner: root 
+      group: root
+      mode: '600'
+    become: True
+    become_user: root
+    when: create_key is defined and create_key=="True"
+  - name: copy public key to authorised key file of backup volume machine
+    local_action: command  ssh-copy-id -i ./slm_db_backup.pub  {{ SQL_BK_DEST_HOST }}
+    when: create_key is defined and create_key=="True"
+  #
+  # now setup cronjob on management node
+  #
+  - name: ensure {{ MGMT_BIN_DIR }} exists
+    file:
+      path: "{{ MGMT_BIN_DIR }}"
+      state: directory
+    become: true
+    become_user: root 
+  - name: "template sql backupscript to  {{ MGMT_BIN_DIR }}"
+    template: src="backup_mysql_for_slurm.sh.j2" dest="{{ MGMT_BIN_DIR }}/backup_mysql_for_slurm.sh"  mode="700"
+    become: true
+    become_user: root 
+  - name: Make a daily crontab entry
+    cron:
+      name: "Backup of MySQL Database for Slurm"
+      job: "{{ MGMT_BIN_DIR }}/backup_mysql_for_slurm.sh"  
+      hour: 23
+      minute: 55
+    become: true
+    become_user: root 
+  - name: Create directory {{ SQL_BK_DATA_DIR }} to store initial mysql dump
+    file: path={{ SQL_BK_DATA_DIR }} state=directory
+    become: true
+    become_user: root 
   - name: template mysql config file to server
-    template: src="mysql.conf.j2" dest="{{ SQL_BK_DIR }}/mysql.conf" mode="600"
-    sudo: true
-    when: server == 'True'
-  - name: copy ssh pub key to .ssh if it does not exist already
-    copy: src="id_rsa.pub" dest="/root/.ssh/id_rsa.pub"
-    sudo: true
-    when: server == 'True'
-  - name: copy ssh private key to .ssh if it does not exist already
-    copy: src="id_rsa" dest="/root/.ssh/id_rsa" mode="600"
-    sudo: true
-    when: server == 'True'
-    
-#this code is for the Destination Node only
-  - name: create dummy user account
-    user: name="{{ SQL_USER }}" comment="Account for scp of slurm sql backups" 
-    sudo: true
-    when: server == 'False'
-  - name: Add MySQL server ssh key to authorised_files on management nodes"
-    authorized_key: user="{{ SQL_USER }}" state=present  key="{{ lookup('file', 'id_rsa.pub') }}" #"
-    sudo: true
-    when: server == 'False'
-  - name: ensure the dest directory exists (for backups to be copied too)
-    file: path={{ SQL_BK_DEST_DIR }} state=directory owner={{ SQL_USER }}
-    sudo: true
-    when: server == 'False'
-  - name: setup cron job to delete old slurm logs
-    template: src="delete_old_mysql_bk.sh.j2" dest="/etc/cron.daily/delete_old_mysql_bk.sh"  mode="700"
-    sudo: true
-    when: server == 'False'
+    template: src="mysql.conf.j2" dest="{{ MGMT_BIN_DIR }}/mysql.conf" mode="600"
+    become: true
+    become_user: root
+  #
+  # template delete file to localhost.then copy to remote host
+  #
+  - name: make a unique name for the backup script 
+    set_fact: 
+        unique_name: "delete_old_mysql_bk_{{ SQL_BK_DEST_DIR | basename }}.sh"
+  - name: Unique filename is
+    debug: var=unique_name
+  - name: delete local del file
+    local_action: command rm -f  ./{{ unique_name }}
+  - name: template delete script to local dir
+    local_action: template src=delete_old_mysql_bk.sh.j2 dest=./{{ unique_name }}
+  - name: copy backup script to server ec2-user@{{ SQL_BK_DEST_HOST }}
+    local_action: command scp -i ./slm_db_backup ./{{ unique_name }}  "ec2-user@{{ SQL_BK_DEST_HOST }}:"
+  - name: insert delete cron job entry on remote server
+    local_action: command ssh -i ./slm_db_backup ec2-user@{{ SQL_BK_DEST_HOST }}  "{ crontab -l ; echo '#delete old slurm backups' ; echo '00 23 * * * /home/ec2-user/{{ unique_name }}' ; } | crontab  -  "
diff --git a/roles/slurm_sql_bk/templates/backup_mysql_for_slurm.sh.j2 b/roles/slurm_sql_bk/templates/backup_mysql_for_slurm.sh.j2
index 8b5c9cdcfb3a1629291d5c1fc1c20ed7c502a3a9..55dc58fa0e750ffdffd43b4f6ffdd62c127afc75 100644
--- a/roles/slurm_sql_bk/templates/backup_mysql_for_slurm.sh.j2
+++ b/roles/slurm_sql_bk/templates/backup_mysql_for_slurm.sh.j2
@@ -1,17 +1,17 @@
 #!/bin/sh
 #
 # mysql dump for slurm. 
-# S.Michnowicz
-# 20/Jan/2016
 #
 
 TIME=$(date '+%y-%m-%d')
-DIR={{ SQL_BK_DIR }}
-NAME="$DIR/mysql_dump_20${TIME}.sql"
+BIN_DIR={{ MGMT_BIN_DIR }}
+DATA_DIR={{ SQL_BK_DATA_DIR }}
+NAME="$DATA_DIR/mysql_dump_20${TIME}.sql"
 
-sudo mysqldump  --defaults-file=$DIR/mysql.conf   slurm_acct_db > $NAME
+cd $DATA_DIR
+sudo mysqldump --defaults-file=$BIN_DIR/mysql.conf --host={{ SQL_IP }}   slurm_acct_db > $NAME
 sudo chmod go-r $NAME
 sudo gzip -f $NAME
 #scp file to   dummy user @ Destination Node and Directory
-scp ${NAME}.gz  {{ SQL_USER }}@{{ SQL_BK_DEST_HOST }}:{{ SQL_BK_DEST_DIR }}
+scp -i ~/.ssh/slm_db_backup ${NAME}.gz  {{ SQL_USER }}@{{ SQL_BK_DEST_HOST }}:{{ SQL_BK_DEST_DIR }}
 rm -f  ${NAME}.gz 
diff --git a/roles/slurm_sql_bk/templates/mysql.conf.j2 b/roles/slurm_sql_bk/templates/mysql.conf.j2
index ea91192ca855a9b218f9714654ca91f14e58ab84..3324292642272e1fc446aa5b85416f8264e05282 100644
--- a/roles/slurm_sql_bk/templates/mysql.conf.j2
+++ b/roles/slurm_sql_bk/templates/mysql.conf.j2
@@ -1,3 +1,3 @@
 [client]
-password="{{ sqlrootPasswd }}"
-user=root
+password="{{ sqlPasswd }}"
+user="{{ sqlUsername }}"
diff --git a/roles/slurmdb-config/readme.md b/roles/slurmdb-config/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..ee21d6bcc8a88049435ff9c4c589e27093a7c29c
--- /dev/null
+++ b/roles/slurmdb-config/readme.md
@@ -0,0 +1,12 @@
+If the database is already up and running, running this role will not work.
+To make a change to the log file size without data loss before applying the config.
+
+1. While mariadb is still runing 
+   MySQL> SET GLOBAL innodb_fast_shutdown=0;
+2. Stop mariadb
+   systemctl stop mariadb
+3. Run this role to copy the config to /etc/my.cnf.d
+4. Go to /var/lib/mysql
+   mv ib_logfile0 ib_logfile0_orig
+   mv ib_logfile1 ib_logfile1_orig
+5. systemctl start mariadb
diff --git a/roles/slurmdb-config/tasks/main.yml b/roles/slurmdb-config/tasks/main.yml
index a31f5ad72b0a21cc1ebb67d654eea977205b33b1..3e23046fe6d1011f6bb23b4c937089c8724ec053 100644
--- a/roles/slurmdb-config/tasks/main.yml
+++ b/roles/slurmdb-config/tasks/main.yml
@@ -22,13 +22,23 @@
   sudo: true
 
 - name: install slurmdb.conf
-  copy: src=files/slurmdbd.conf dest={{ slurm_dir }}/etc/slurmdbd.conf
+  copy: 
+    src: files/slurmdbd.conf 
+    dest: "{{ slurm_dir }}/etc/slurmdbd.conf"
+    owner: slurm
+    group: slurm
+    mode: u+rw,g-wx,o-rwx
   sudo: true
   when: slurm_dir is defined
 
 
 - name: install slurmdbd.conf
-  copy: src=slurmdbd.conf dest=/etc/slurm/slurmdbd.conf
+  copy: 
+    src: slurmdbd.conf 
+    dest: /etc/slurm/slurmdbd.conf
+    owner: slurm
+    group: slurm
+    mode: u+rw,g-wx,o-rwx
   sudo: true
   when: slurm_dir is not defined
 
diff --git a/roles/smux/README.md b/roles/smux/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..778fac8128859ff1c35660a6ab624fc7ba1b8781
--- /dev/null
+++ b/roles/smux/README.md
@@ -0,0 +1,4 @@
+A role to setup smux
+
+use
+- { role: smux }
diff --git a/roles/smux/tasks/main.yml b/roles/smux/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..62ccd783ccaa3c4e6079e80a09fe18fb8d5c935f
--- /dev/null
+++ b/roles/smux/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- name: install tmux
+  yum: name=tmux state=present
+  sudo: true
+  when: ansible_os_family == "RedHat"
diff --git a/roles/ssh-nopassword-login/handlers/main.yml b/roles/ssh-nopassword-login/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..df0d3deeca457c10a9805a439cb4a61087cac8d3
--- /dev/null
+++ b/roles/ssh-nopassword-login/handlers/main.yml
@@ -0,0 +1,9 @@
+- name: "restart sshd"
+  service: name=sshd state=restarted
+  sudo: true
+  when: ansible_os_family == "RedHat"
+
+- name: "restart ssh"
+  service: name=ssh state=restarted
+  sudo: true
+  when: ansible_os_family == "Debian"
diff --git a/roles/ssh-nopassword-login/tasks/main.yml b/roles/ssh-nopassword-login/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f8594e1902a904b5be06ab3575c1ae697532b854
--- /dev/null
+++ b/roles/ssh-nopassword-login/tasks/main.yml
@@ -0,0 +1,24 @@
+- name: "Disable Challenge Response"
+  lineinfile:
+  args:
+    dest: /etc/ssh/sshd_config
+    regexp: "ChallengeResponseAuthentication yes"
+    line: "ChallengeResponseAuthentication no" 
+    backrefs: yes
+  sudo: true
+  notify: 
+  - restart sshd
+  - restart ssh
+
+- name: "Disable Password"
+  lineinfile:
+  args:
+    dest: /etc/ssh/sshd_config
+    regexp: "PasswordAuthentication yes"
+    line: "PasswordAuthentication no"
+    backrefs: yes
+  sudo: true
+  notify: 
+  - restart sshd
+  - restart ssh
+
diff --git a/roles/ssh-nopassword-login/tasks/main.yml~ b/roles/ssh-nopassword-login/tasks/main.yml~
new file mode 100644
index 0000000000000000000000000000000000000000..f8594e1902a904b5be06ab3575c1ae697532b854
--- /dev/null
+++ b/roles/ssh-nopassword-login/tasks/main.yml~
@@ -0,0 +1,24 @@
+- name: "Disable Challenge Response"
+  lineinfile:
+  args:
+    dest: /etc/ssh/sshd_config
+    regexp: "ChallengeResponseAuthentication yes"
+    line: "ChallengeResponseAuthentication no" 
+    backrefs: yes
+  sudo: true
+  notify: 
+  - restart sshd
+  - restart ssh
+
+- name: "Disable Password"
+  lineinfile:
+  args:
+    dest: /etc/ssh/sshd_config
+    regexp: "PasswordAuthentication yes"
+    line: "PasswordAuthentication no"
+    backrefs: yes
+  sudo: true
+  notify: 
+  - restart sshd
+  - restart ssh
+
diff --git a/roles/upgrade/tasks/main.yml b/roles/upgrade/tasks/main.yml
index 4f29a0726abe5481518a0cabf31ca7f96c111482..0d0a6041a98f6d641e0e4aecd38479e6063f444e 100644
--- a/roles/upgrade/tasks/main.yml
+++ b/roles/upgrade/tasks/main.yml
@@ -53,6 +53,8 @@
   when: ansible_os_family=="RedHat"
   check_mode: no
   changed_when: False
+  args:  
+    warn: False
 
 - name: get kernel version
   shell: uname -r