diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..4dadd58568931d59eb760aa69aaeeb2ac8992fca
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+*.swp
+*.retry
+*-openrc.sh
+gc_key.pem
+CICD/files/slurm.conf
+CICD/files/slurmdbd.conf
+CICD/files/ssh_known_hosts
+
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..18c7d93c31bcf075157165f0bda32eddfe6074a5
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,220 @@
+variables:
+  GIT_SUBMODULE_STRATEGY: recursive
+  STACKNAME: CICD_reporef$CI_COMMIT_REF_NAME
+  NECTAR_ALLOCATION: HPCCICD
+  ANSIBLE_HOST_KEY_CHECKING: "False"
+
+stages:
+#  - integration_test_downstream # working but unwanted here
+#  - trigger_pipeline_in_B   # working but unwanted here
+  - lint
+  #- delete_stack_manual
+  - extended
+  #- heat_test
+  - heat
+  - ansible_create_cluster_stage
+  - push_button_spawn_cluster
+#  - e2e
+  - tests
+  - clean # manually delete stack
+
+
+  
+#trigger_pipeline_in_B:
+#  stage: integration_test_downstream
+#  tags: 
+#  - ansible
+#  script:
+#  - "curl --request POST --form token=${CI_JOB_TOKEN} --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/1085/trigger/pipeline"  # ID is from pysshauthz
+
+# heat_test:
+#   stage: heat_test
+#   allow_failure: false
+#   tags:
+#   - heat
+#   before_script:
+#     - echo "$GC_KEY" > gc_key.pem
+#     - chmod 400 gc_key.pem
+#     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+#     - source ./$NECTAR_ALLOCATION-openrc.sh
+#     - export HEAT_TEST_STACKNAME=_TESTING_HEAT
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
+#     - sleep 60
+#   script:
+#     - echo "heat_test stage"
+#     - source ./$NECTAR_ALLOCATION-openrc.sh
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh create $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#   after_script:
+#     - sleep 20 # artifically wait a bit to make sure it is really dead
+#   when: manual
+
+yamllint:
+  stage: lint
+  allow_failure: true
+  tags: 
+  - yamllint
+  script:
+    - echo "stage yamllint"
+    - cd CICD
+    # - ansible-lint -c .yamllintconf.yaml -x ANSIBLE0002 master_playbook.yml
+    - yamllint -c ./.yamllintheat.yaml ./heat
+
+# delete_stack_manual:
+#   stage: delete_stack_manual
+#   tags:
+#   - heat
+#   before_script:
+#     - echo "$GC_KEY" > gc_key.pem
+#     - chmod 400 gc_key.pem
+#     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+#   script:
+#     - echo "heat stage"
+#     - source ./$NECTAR_ALLOCATION-openrc.sh
+#     - openstack stack list
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+#   when: manual
+
+ansiblelint:
+  allow_failure: true
+  stage: lint
+  tags: 
+  - ansiblelint
+  script:
+    - echo "stage ansiblelint"
+    - cd CICD
+    - python3 ansiblelint/run_lint.py --targets master_playbook.yml
+    
+
+build_cluster_cicd:
+  stage: heat
+  allow_failure: false
+  tags:
+  - heat
+  before_script:
+    - echo "$GC_KEY" > gc_key.pem
+    - chmod 400 gc_key.pem
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - echo "heat stage"
+    - source ./$NECTAR_ALLOCATION-openrc.sh
+    - openstack stack list
+    - bash -x ./CICD/heat/heatcicdwrapper.sh create_or_update $STACKNAME
+  after_script:
+    - sleep 20 # artifically wait a bit to give the nodes time to boot
+#  only:
+#    changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
+#    - "heat/*HOT*.yaml"
+#    - schedules
+#    - ./.gitlab-ci.yml
+
+ansible_create_cluster_stage:
+  stage: ansible_create_cluster_stage
+  tags: 
+  - ansible
+  before_script:
+    - echo "$GC_KEY" > gc_key.pem
+    - chmod 400 gc_key.pem
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - echo "ansible_create_cluster_stage"
+    - bash -x ./CICD/ansible_create_cluster_script.sh
+  #after_script:
+    #- rm ./files/inventory.$STACKNAME
+  #only:
+  #  changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
+  #  - "master_playbook.yml"
+  #  - "vars/*.{yml,yaml}"
+  #  - schedules
+  #  - CICD/.gitlab-ci.yml
+
+tests:
+  stage: tests
+  tags:
+  - ansible
+  before_script:
+    - echo "$GC_KEY" > gc_key.pem
+    - chmod 400 gc_key.pem
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - echo "tests stage"
+    - source ./$NECTAR_ALLOCATION-openrc.sh
+    - openstack stack list
+    - cd CICD
+    - python3 ../scripts/make_inventory.py static $STACKNAME | tee ./files/inventory.$STACKNAME && chmod 755 ./files/inventory.$STACKNAME
+    - grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME   #fail if inventory file is empty
+    - ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
+    - ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
+    
+    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sinfo" ManagementNodes
+    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "squeue" ManagementNodes
+    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet mariadb" SQLNodes
+    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmctld" ManagementNodes
+    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmdbd" ManagementNodes
+    
+    - bash -e ./tests/run_tests.sh all "files/inventory.$STACKNAME" "../gc_key.pem"
+    - bash -e ./tests/run_tests.sh ComputeNodes "files/inventory.$STACKNAME" "../gc_key.pem"
+    - bash -e ./tests/run_tests.sh LoginNodes "files/inventory.$STACKNAME" "../gc_key.pem"
+    - bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem"
+    - bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
+    - bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
+    
+    # licensing https://gitlab.erc.monash.edu.au/hpc-team/license_server/tree/master/roles/avizo_license_monitor
+
+manual_cluster_spawn:
+  stage: push_button_spawn_cluster
+  tags:
+  - heat
+  - ansible
+  before_script:
+    - echo "press button spawn cluster."
+    - echo "for this to work you have to provide a variable called manual stackname"
+    - echo I still need to handle os password
+    - echo $MANUAL_STACKNAME
+    - echo "$GC_KEY" > gc_key.pem
+    - chmod 400 gc_key.pem
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - source ./$NECTAR_ALLOCATION-openrc.sh
+    - bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME
+    - openstack stack list
+    - export STACKNAME=$MANUAL_STACKNAME
+    - sleep 25
+    - bash -x CICD/ansible_create_cluster_script.sh
+  when: manual 
+  only:
+    refs:
+      - "cicd"
+
+extended:
+  stage: extended
+  tags:
+  - heat
+  - ansible
+  before_script:
+    - echo "cleanup stack"
+    - sleep 30
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - source ./$NECTAR_ALLOCATION-openrc.sh
+    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+  only:
+    variables:
+      - $EXTENDED != null
+  
+clean:
+  stage: clean
+  tags:
+  - heat
+  before_script:
+    - echo "cleanup stack"
+    - sleep 30
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - source ./$NECTAR_ALLOCATION-openrc.sh
+    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+  #when: manual
\ No newline at end of file
diff --git a/CICD/.yamllintconf.yaml b/CICD/.yamllintconf.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d6fd2e00450f3f9520af16e9397ca7719e2e9682
--- /dev/null
+++ b/CICD/.yamllintconf.yaml
@@ -0,0 +1,30 @@
+# copied from luhan
+extends: default
+rules:
+  braces:
+    level: warning
+    max-spaces-inside: 1
+  brackets:
+    level: warning
+    max-spaces-inside: 1
+  colons:
+    level: warning
+  commas:
+    level: warning
+  comments: disable
+  comments-indentation: disable
+  document-start: disable
+  empty-lines:
+    level: warning
+  hyphens:
+    level: warning
+  indentation:
+    level: warning
+    indent-sequences: consistent
+  line-length:
+    level: warning
+    allow-non-breakable-inline-mappings: true
+  truthy: disable
+  trailing-spaces:
+    level: warning
+
diff --git a/CICD/.yamllintheat.yaml b/CICD/.yamllintheat.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d23091c16fdf89facc43781618d43255b5d14d31
--- /dev/null
+++ b/CICD/.yamllintheat.yaml
@@ -0,0 +1,10 @@
+---
+extends: default
+rules:
+  braces: {min-spaces-inside: 0, max-spaces-inside: 1}
+  brackets: {min-spaces-inside: 0, max-spaces-inside: 1}
+  comments: disable
+  comments-indentation: disable
+  document-start: disable
+  indentation: disable
+  line-length: disable
\ No newline at end of file
diff --git a/CICD/ansible_create_cluster_script.sh b/CICD/ansible_create_cluster_script.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b062d4f21e71371698683164a1c81a9ef40a39b2
--- /dev/null
+++ b/CICD/ansible_create_cluster_script.sh
@@ -0,0 +1,21 @@
+#!/bin/bash 
+set -e
+export ANSIBLE_HOST_KEY_CHECKING=False
+
+source ./$NECTAR_ALLOCATION-openrc.sh
+openstack stack list
+
+cd CICD
+
+python3 ../scripts/make_inventory.py static $STACKNAME | tee ./files/inventory.$STACKNAME && chmod 755 ./files/inventory.$STACKNAME
+grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME   #fail if inventory file is empty
+ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
+ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
+
+#cd roles 
+    #- "egrep -lRZ 'sudo: true' . | xargs -0 -l sed -i -e 's/sudo: true/become: true/g' "
+#cd ..
+ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem master_playbook.yml
+sleep 15
+echo uglyuglyfix
+ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -b -a "systemctl restart slurmdbd" ManagementNodes
\ No newline at end of file
diff --git a/CICD/ansiblelint/.gitignore b/CICD/ansiblelint/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..34d524d13cda88e46754f4ad563d95a135d94748
--- /dev/null
+++ b/CICD/ansiblelint/.gitignore
@@ -0,0 +1 @@
+logdir/*
diff --git a/CICD/ansiblelint/run_lint.py b/CICD/ansiblelint/run_lint.py
new file mode 100644
index 0000000000000000000000000000000000000000..cea3e7d6ae52e212c3ee0b151e58b0cdf361af1f
--- /dev/null
+++ b/CICD/ansiblelint/run_lint.py
@@ -0,0 +1,72 @@
+import yaml
+from argparse import ArgumentParser
+import subprocess
+from pathlib import Path
+import re
+import sys
+import os
+from collections import defaultdict
+def parse_argument():
+    parser = ArgumentParser("ansible lint runner with customized spec")
+    parser.add_argument('--targets', type=str, nargs='*',
+                        help="path to roles or playbook targets")
+    parser.add_argument('--logdir', type=Path, default=Path( __file__ + '/../logdir').resolve(), nargs='?', help='log directory default to ./ansiblelint/logdir')
+
+    args = parser.parse_args()
+    args.logdir.mkdir(exist_ok=True)
+    return args
+def parse_rule_output(line):
+    # (filepath, line, rule, severity, rule_desc)
+    expression = '(.*\.yml):([0-9]+): \[(.*)\] \[(.*)\] (.*$)'
+    matched = re.match(expression, line)
+    # print(line)
+    matched_groups = matched.groups()
+    return matched_groups
+
+def group_by(output, idx):
+    res = defaultdict(list)
+    for i in output:
+        # print(i)
+        res[i[idx]].append(i)
+    return res
+cmd_template = "ansible-lint --parseable-severity --nocolor "
+outputs = defaultdict()
+def main():
+    exit_code = 0
+    args = parse_argument()
+    for item in args.logdir.iterdir():
+        item.unlink()
+    cmd = cmd_template
+    if args.targets is not None:
+        cmd += ' ' + ' '.join(args.targets)
+    else:
+        rolenames = [str(i.resolve())
+                     for i in Path(__file__ + '/../../plays/roles').resolve().iterdir() if i.is_dir()]
+        cmd += ' ' + ' '.join(rolenames)
+        # print(cmd)
+    logfile = args.logdir.joinpath('logfile')
+    cmd += ' 2>&1 | tee {}'.format(str(logfile.resolve()))
+    # print(cmd)
+    output = subprocess.check_output(cmd, shell=True)
+    print(output.decode())
+    output = output.decode().splitlines()
+    # print(output)
+    output = [parse_rule_output(line) for line in output]
+
+    # group by serverity
+    output = group_by(output, 3)
+    # print(output.keys())
+    # print(output.keys())
+    for k,v in output.items():
+        # print(k, v)
+        if (k=='VERY_HIGH') and len(v) != 0:
+            exit_code = 1
+        current_log = args.logdir.joinpath(k).resolve()
+
+        with current_log.open(mode='w') as f:
+            f.writelines(['filepath\tline\trule\tserverity\trule description\n'])
+            f.writelines(['\t'.join(list(i)) + '\n' for i in v])
+    sys.exit(exit_code)
+    # return
+if __name__ == "__main__":
+    main()
diff --git a/CICD/ansiblelint/spec.yml b/CICD/ansiblelint/spec.yml
new file mode 100644
index 0000000000000000000000000000000000000000..53ee4a04a8c583cef6d601cd268d0399bbf6cdba
--- /dev/null
+++ b/CICD/ansiblelint/spec.yml
@@ -0,0 +1,37 @@
+---
+# https://docs.ansibl.com/ansibl-lint/ruls/dfault_ruls.html
+error:
+  - 101
+  - 102
+  - 103
+  - 104
+  - 202
+  - 304
+  - 306
+  - 401
+  - 402
+  - 403
+  - 404
+  - 501
+  - 502
+  - 701
+
+warning:
+  - 105
+  - 201
+  - 203
+  - 204
+  - 205
+  - 206
+  - 301
+  - 302
+  - 303
+  - 305
+  - 503
+  - 504
+  - 601
+  - 602
+  - 702
+  - 703
+  - 704
+
diff --git a/CICD/files/.gitignore b/CICD/files/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..37e22cdfa443a09339be8c5dc62c492e2914cce0
--- /dev/null
+++ b/CICD/files/.gitignore
@@ -0,0 +1,4 @@
+ssh_known_hosts
+*.conf
+etcHosts
+inventory.*
diff --git a/CICD/files/etcExports b/CICD/files/etcExports
new file mode 100644
index 0000000000000000000000000000000000000000..0867fd1b7bb1aff1d6be948f7c4fd40ee07f199a
--- /dev/null
+++ b/CICD/files/etcExports
@@ -0,0 +1,4 @@
+/nfsvol/home *(fsid=1,rw,no_root_squash)
+/slurmstate *(fsid=2,rw,no_root_squash)
+/nfsvol/projects *(fsid=4,rw,no_root_squash)
+/nfsvol/scratch *(fsid=5,rw,no_root_squash)
diff --git a/CICD/heat/gc_HOT.yaml b/CICD/heat/gc_HOT.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..da11e6a1bc80ed80ba841064234b5a630fe71b47
--- /dev/null
+++ b/CICD/heat/gc_HOT.yaml
@@ -0,0 +1,269 @@
+---
+heat_template_version: 2013-05-23
+description: "A simple template to boot a cluster of desktops (LoginNode, ManagementNodes and Desktop Nodes)"
+# avz parameters disabled. they are working but I want just more options than monash-02. I would like to have a parameter that says "I don't care"
+
+parameters:
+  ubuntu_1804_image_id:
+    type: string
+    label: Image ID
+    description: Ubuntu Image
+    default: 99d9449a-084f-4901-8bd8-c04aebd589ca
+  centos_7_image_id:
+    type: string
+    label: Image ID
+    description: Centos Image
+    default: c47c3acb-9657-4243-9e14-e6c676157e3b #with NetworkManager
+  ssh_key:
+    type: string
+    default: gc_key
+  avz:
+    type: string
+    default: monash-02
+  project_name:
+    type: string
+  NetID:
+    type: string
+    default: Classic Provider
+  Flavour:
+    type: string
+    default: t3.xsmall
+
+
+resources:
+
+  SlurmSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatslurmsecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 12000,
+               port_range_max: 12999},
+              { protocol: tcp,
+               port_range_min: 6817,
+               port_range_max: 6819},
+              { protocol: tcp,
+               port_range_min: 1019,
+               port_range_max: 1019}]
+  NFSSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatnfssecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 2049,
+               port_range_max: 2049},
+              { protocol: tcp,
+               port_range_min: 111,
+               port_range_max: 111},
+              { protocol: udp,
+               port_range_min: 2049,
+               port_range_max: 2049},
+              { protocol: udp,
+               port_range_min: 111,
+               port_range_max: 111}]
+  MySQLSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatmysqlsecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 3306,
+               port_range_max: 3306} ]
+  SSHMonashSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "SSHMonashSecGroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 118.138.240.0/21
+     } ]
+#  SSHInternalSecGroup:
+#   type: "OS::Neutron::SecurityGroup"
+#   properties:
+#     name: "SSHInternalSecGroup"
+#     rules: [ { protocol: tcp,
+#               port_range_min: 22,
+#               port_range_max: 22,
+#               direction: ingress} ]
+               #remote_ip_prefix: { get_param: REMOTE_IP }, direction: ingress
+  webaccess:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "webaccess"
+     rules: [ { protocol: tcp,
+               port_range_min: 80,
+               port_range_max: 80},
+              { protocol: tcp,
+               port_range_min: 443,
+               port_range_max: 443} ]
+
+  SQLNode0:
+   type: "OS::Nova::Server"
+   properties:
+    name:
+     list_join: [ '-', [ { get_param: "OS::stack_name" }, 'sql0' ]]
+    availability_zone: { get_param: avz }
+    flavor: m3.small
+    image: { get_param: centos_7_image_id }
+    key_name: { get_param: ssh_key }
+    security_groups: [ { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: MySQLSecGroup }, { get_resource: NFSSecGroup } ]
+    metadata:
+     ansible_host_groups: [ SQLNodes, NFSNodes ]
+     ansible_ssh_user: ec2-user
+     project_name: { get_param: project_name }
+    networks:
+      - network: { get_param: NetID }
+
+  NFSVolume:
+   type: OS::Cinder::Volume
+   properties:
+    availability_zone: { get_param: avz }
+    size: 1
+    name: nfsvol
+  NFSVolumeAttachment:
+   type: "OS::Cinder::VolumeAttachment"
+   properties:
+    volume_id: { get_resource: NFSVolume }
+    instance_uuid: { get_resource: SQLNode0 }
+
+  SLURMSTATEVolume:
+   type: OS::Cinder::Volume
+   properties:
+    availability_zone: { get_param: avz }
+    size: 1
+    name: slurmstate
+  SLURMSTATEVolumeAttachment:
+   type: "OS::Cinder::VolumeAttachment"
+   properties:
+    volume_id: { get_resource: SLURMSTATEVolume }
+    instance_uuid: { get_resource: SQLNode0 }
+
+  DBVolume:
+   type: OS::Cinder::Volume
+   properties:
+    availability_zone: { get_param: avz }
+    size: 10
+    name: dbvol
+  DBVolumeAttachment:
+   type: "OS::Cinder::VolumeAttachment"
+   properties:
+    volume_id: { get_resource: DBVolume }
+    instance_uuid: { get_resource: SQLNode0 }
+
+  MgmtNodes:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 2
+    resource_def:
+      type: My::Server::MgmtNode
+      properties:
+        #avz: { get_param: avz }
+        image: { get_param: centos_7_image_id }
+        ansible_ssh_user: ec2-user
+        mynodename:
+         list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmt%index%' ]]
+        ssh_key: { get_param: ssh_key }
+        security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup }, { get_resource: MySQLSecGroup } ]
+        project_name: { get_param: project_name }
+
+  LoginNodes:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 1
+    resource_def:
+     type: "OS::Nova::Server"
+     properties:
+      #availability_zone: { get_param: avz }
+      flavor: m3.xsmall
+      image: { get_param: centos_7_image_id }
+      key_name: { get_param: ssh_key }
+      name:
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'login%index%' ]]
+      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      metadata:
+       ansible_host_groups: [ LoginNodes ]
+       ansible_ssh_user: ec2-user
+       project_name: { get_param: project_name }
+      networks:
+       - network: { get_param: NetID }
+
+  DesktopNodes:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 0
+    resource_def:
+     type: "OS::Nova::Server"
+     properties:
+      #availability_zone: { get_param: avz }
+      flavor: m3.xsmall
+      image: { get_param: centos_7_image_id }
+      key_name: { get_param: ssh_key }
+      name:
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopc%index%' ]]
+      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      metadata:
+       ansible_host_groups: [ DesktopNodes, VisNodes, ComputeNodes ]
+       ansible_ssh_user: ec2-user
+       project_name: { get_param: project_name }
+      networks:
+       - network: { get_param: NetID }
+
+  ComputeNodes:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 1
+    resource_def:
+     type: "OS::Nova::Server"
+     properties:
+      #availability_zone: { get_param: avz }
+      flavor: m3.xsmall
+      image: { get_param: centos_7_image_id }
+      key_name: { get_param: ssh_key }
+      name:
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec%index%' ]]
+      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      metadata:
+       ansible_host_groups: [ ComputeNodes ]
+       ansible_ssh_user: ec2-user
+       project_name: { get_param: project_name }
+      networks:
+       - network: { get_param: NetID }
+
+  UbuntuDesktopNodes:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 0
+    resource_def:
+     type: "OS::Nova::Server"
+     properties:
+      #availability_zone: { get_param: avz }
+      flavor: m3.xsmall
+      image: { get_param: ubuntu_1804_image_id }
+      key_name: { get_param: ssh_key }
+      name:
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopu%index%' ]]
+      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      metadata:
+       ansible_host_groups: [ DesktopNodes ]
+       ansible_ssh_user: ubuntu
+       project_name: { get_param: project_name }
+      networks:
+       - network: { get_param: NetID }
+
+#  PySSHauthz:
+#   type: "OS::Nova::Server"
+#   properties:
+#    name:
+#     list_join: [ '-', [ { get_param: "OS::stack_name" }, 'pysshautz' ]]
+#    availability_zone: { get_param: avz }
+#    flavor: t3.xsmall
+#    image: { get_param: ubuntu_1804_image_id }
+#    key_name: { get_param: ssh_key }
+#    security_groups: [ { get_resource: SSHMonashSecGroup }, { get_resource: webaccess } ]
+#    metadata:
+#     ansible_host_groups: [ PySSHauthz ]
+#     ansible_ssh_user: ubuntu
+#     project_name: { get_param: project_name }
+#    networks:
+#      - network: { get_param: NetID }
diff --git a/CICD/heat/heatcicdwrapper.sh b/CICD/heat/heatcicdwrapper.sh
new file mode 100644
index 0000000000000000000000000000000000000000..abbd2ee6e7734b6126cc217bad45f858f5ee1958
--- /dev/null
+++ b/CICD/heat/heatcicdwrapper.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+# This script does not check available ressources on nectar! 
+
+
+function usage {
+    echo $"Usage: $0 {create|update|show|create_or_update,delete_if_exists} STACKNAME"
+    exit 1
+}
+
+if [ "$#" -ne 2 ]; then
+    echo "Illegal number of parameters expecting 2"
+    usage
+fi
+
+STACKNAME=$2
+
+
+if [[ "$STACKNAME" == "CICD"* ]]; then
+  echo "CICD found in stackname. doing nothing"
+else
+  STACKNAME="CICD"$STACKNAME
+fi
+
+
+
+echo "[heatcicdwrapper] Prefixing Stackname with CICD. This is a safety feature because this script can also delete stacks" $STACKNAME
+
+function check_stack_exists {
+    if openstack stack list | grep -w $STACKNAME;
+        then 
+            echo "stack found";
+        else 
+            echo "stack not found";
+            return 1
+    fi
+}
+
+
+function func_delete_if_exists {
+  if ! check_stack_exists
+  then
+    exit 0
+  fi
+  openstack stack delete -y --wait $STACKNAME
+  ret=$?
+  if [ $ret -ne "0" ]
+  then
+    sleep 15
+    openstack stack delete -y --wait $STACKNAME
+    ret=$?
+  fi
+  exit $ret
+}
+
+function create_stack {
+    
+ if check_stack_exists
+ then
+    echo "I will NOT create existing stack maybe use update"
+    exit -44
+ fi
+ openstack stack create --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
+ createreturn=$?
+ if [ $createreturn -ne "0" ]
+ then
+    openstack stack delete -y --wait $STACKNAME
+    echo "creation failed. trying to delete"
+    exit -47
+ fi
+ exit $createreturn
+}
+
+
+case "$1" in
+        create)
+            create_stack
+            ;;
+         
+        update)
+            if ! check_stack_exists
+               then
+                   echo "I cannot update a stack which does not exist"
+                   exit -45
+            fi
+            openstack stack update --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
+            ret=$?
+            exit $ret
+            ;;
+        create_or_update)
+            if  check_stack_exists
+               then
+               openstack stack update --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
+               ret=$?
+               exit $ret
+            fi
+            create_stack
+                  
+            ;;
+        delete_if_exists)
+            func_delete_if_exists
+
+            ;;
+         
+
+        show)
+            check_stack_exists
+            echo $?
+            OUTPUT=$(openstack stack show $STACKNAME| grep -w stack_status)
+            echo $OUTPUT
+            ;;
+         
+        *)
+            usage
+ 
+esac
diff --git a/CICD/heat/mgmtnode_HOT.yaml b/CICD/heat/mgmtnode_HOT.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d44cf911c1dcc0909330d6f9455500fc28b6ec88
--- /dev/null
+++ b/CICD/heat/mgmtnode_HOT.yaml
@@ -0,0 +1,38 @@
+heat_template_version: 2013-05-23
+parameters:
+  mynodename:
+    type: string
+  ssh_key:
+    type: string
+  image:
+    type: string
+  #avz:
+  #  type: string
+  project_name:
+    type: string
+  ansible_ssh_user:
+    type: string
+  security_groups:
+    type: json
+  NetID:
+    type: string
+    #default: 915a3d96-693d-4c9d-a2ef-04996ab085d3
+    default: Classic Provider
+
+resources:
+
+  instance:
+    type: OS::Nova::Server
+    properties:
+      #availability_zone: { get_param: avz }
+      flavor: m3.xsmall
+      image: { get_param: image }
+      key_name: { get_param: ssh_key }
+      security_groups: { get_param: security_groups }
+      name: { get_param: mynodename }
+      metadata:
+       ansible_host_groups: [ ManagementNodes ]
+       ansible_ssh_user: { get_param: ansible_ssh_user }
+       project_name: { get_param: project_name }
+      networks:
+       - network: { get_param: NetID }
diff --git a/CICD/heat/resource_registry.yaml b/CICD/heat/resource_registry.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0638b887c8c09d5d6a98f51a34d3b4eeb6e9aafb
--- /dev/null
+++ b/CICD/heat/resource_registry.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+  My::Server::MgmtNode: mgmtnode_HOT.yaml
diff --git a/CICD/master_playbook.yml b/CICD/master_playbook.yml
new file mode 100644
index 0000000000000000000000000000000000000000..04dc7e747b8894c60a049697e93c8ac89a2b8dc1
--- /dev/null
+++ b/CICD/master_playbook.yml
@@ -0,0 +1,7 @@
+---
+- import_playbook: plays/make_files.yml
+- import_playbook: plays/allnodes.yml
+- import_playbook: plays/init_slurmconf.yml # this requires management nodes
+- import_playbook: plays/nfssqlnodes.yml
+- import_playbook: plays/mgmtnodes.yml
+- import_playbook: plays/computenodes.yml
diff --git a/CICD/plays/allnodes.yml b/CICD/plays/allnodes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..406bdb797108aa4d19a505742cb4df02f5df6909
--- /dev/null
+++ b/CICD/plays/allnodes.yml
@@ -0,0 +1,48 @@
+- hosts: 'all'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  tasks:
+  - { name: set use shared state, set_fact: usesharedstatedir=False }
+  - { name: set hostgroup, set_fact: hostgroup='ComputeNodes' }
+  tags: [ always ]
+
+- hosts: 'all'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  roles:
+#  - { role: disable_selinux, tags: [ disableselinux ] }
+  - { role: etcHosts, tags: [ networking ] }
+  - { role: config_repos, tags: [ repos ] }
+  - { role: upgrade }
+  - { role: set_password }
+
+
+- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,ManagementNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  roles:
+  - { role: disable_selinux, tags: [ disableselinux ] }
+  #- { role: ldapclient, tags: [ authentication ] }
+  - { role: ssh-password-login, tags: [ authentication ] }
+  - { role: enable_sudo_group, tags: [ authentication, sudo ] }
+  - { role: move_homedir }
+  - { role: calculateKnownHosts, tags: [ calculateKnownHosts ] }
+  - { role: SSHKnownHosts, tags: [ known_hosts ] }
+  - { role: jasons_ssh_ca, tags: [ ssh_ca ] }
diff --git a/CICD/plays/computenodes.yml b/CICD/plays/computenodes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a43a5a927506f800d0f8d5a1eb3da208d404f4b5
--- /dev/null
+++ b/CICD/plays/computenodes.yml
@@ -0,0 +1,64 @@
+
+- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,VisNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  tasks:
+  - { name: set use shared state, set_fact: usesharedstatedir=False }
+  tags: [ always ]
+
+- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  roles:
+  - { role: move_homedir, tags: [ authentication, filesystems ] }
+  - { role: nfs-client, nfsMounts: "{{ computeNfsMounts }}", tags: [ filesystems ] }
+  - { role: slurm-common, tags: [ slurm, slurm-common ] }
+  - { role: lmod, tags: [ other ] }
+  - { role: enable_modules, default_modules: "lmod", tags: [ other ] }
+  - { role: postfix, tags: [ mail, other ] }
+
+- hosts: 'VisNodes'
+  vars_files:
+  - vars/passwords.yml
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml
+  - vars/slurm.yml
+  - vars/vars.yml
+  roles:
+  - { role: gpu, tags: [ gpu ] }
+
+- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
+  vars_files:
+  - vars/passwords.yml
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml
+  - vars/slurm.yml
+  - vars/vars.yml
+  roles:
+  - { role: slurm_config, tags: [slurm, slurm_config] }
+
+- hosts: 'DesktopNodes,ComputeNodes'
+  vars_files:
+  - vars/passwords.yml
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml
+  - vars/slurm.yml
+  - vars/vars.yml
+  strategy: free
+  roles:
+  - { role: slurm-start, start_slurmd: True, tags: [ slurm, slurmstart ] }
+  #- { role: mate-de-install, tags: [ mate-de-install ] }   # TODO this crashes for everything except cmca
\ No newline at end of file
diff --git a/CICD/plays/files b/CICD/plays/files
new file mode 120000
index 0000000000000000000000000000000000000000..feb122881ce2321d72ad6b867bd2a3d01eadaac3
--- /dev/null
+++ b/CICD/plays/files
@@ -0,0 +1 @@
+../files
\ No newline at end of file
diff --git a/CICD/plays/init_slurmconf.yml b/CICD/plays/init_slurmconf.yml
new file mode 100644
index 0000000000000000000000000000000000000000..30667ac53b5b6c387af0bdacb609f09cc8bfa5c3
--- /dev/null
+++ b/CICD/plays/init_slurmconf.yml
@@ -0,0 +1,15 @@
+---
+- hosts: 'all'
+  tasks:
+  - include_vars: vars/passwords.yml 
+  - include_vars: vars/names.yml
+  - include_vars: vars/ldapConfig.yml
+  - include_vars: vars/filesystems.yml 
+  - include_vars: vars/slurm.yml 
+  - include_vars: vars/vars.yml 
+- hosts: 'all'
+  tasks:
+  - { name: setup, setup: }
+- hosts: 'ManagementNodes'
+  roles:
+  - { role: calculateSlurmConf }
diff --git a/CICD/plays/make_files.yml b/CICD/plays/make_files.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b05925ce73f9be136bb46128961990b938c07910
--- /dev/null
+++ b/CICD/plays/make_files.yml
@@ -0,0 +1,22 @@
+---
+# just calculates an etc hosts
+- hosts: 'all'
+  tasks:
+  - include_vars: vars/passwords.yml
+  - include_vars: vars/names.yml
+  - include_vars: vars/ldapConfig.yml
+  - include_vars: vars/filesystems.yml
+  - include_vars: vars/slurm.yml
+  - include_vars: vars/vars.yml
+- hosts: 'all'
+  tasks:
+  - { name: setup, setup: }
+- hosts: 'ManagementNodes'
+  roles:
+  - { role: calculateEtcHosts }
+  
+#- hosts: 'NFSNodes'
+#  roles:
+#  - { role: calculateExports }
+
+
diff --git a/CICD/plays/mgmtnodes.yml b/CICD/plays/mgmtnodes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c890a5456b5306f1478070e3f329fc57adc51340
--- /dev/null
+++ b/CICD/plays/mgmtnodes.yml
@@ -0,0 +1,43 @@
+# Basic stuff to make the nodes functionl
+# i.e. upgrade operating systems, etc
+#
+
+- hosts: 'ManagementNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  tasks:
+      #  - { name: set hostgroup, set_fact: hostgroup='ManagementNodes' }
+  - { name: set use shared state, set_fact: usesharedstatedir=True }
+  tags: [ always ]
+
+- hosts: 'ManagementNodes'
+  strategy: free
+  gather_facts: False
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  roles:
+#  - { role: ldapclient, tags: [ authentication ] }
+#  - { role: ssh-password-login }
+#  - { role: enable_sudo_group }
+#  - { role: make_filesystems, volumes: "{{ glustervolumes }}" }
+#  - { role: gluster_server, volname: "gv", brickmnt: '/gbrick', gluster_servers: "{{ groups['ManagementNodes'] }}", replicas: 2, tags: [ gluster_server ]  }
+#  - { role: gluster_volcreate, volname: "gv", gluster_servers: "{{ groups['ManagementNodes'] }}", brickmnt: '/gbrick', replicas: 2 }
+#  - { role: gluster_client, volname: "gv", gluster_servers: ['mgmt0','mgmt1','sql0'], volmnt: '/glusterVolume' }
+  - { role: nfs-client, nfsMounts: "{{ mgmtNfsMounts }}", tags: [ nfs ] }
+  - { role: slurmdb-config, tags: [ slurm, slurmdb-config ] }
+  - { role: slurm-common, tags: [ slurm, slurm-common ]  }
+  - { role: slurm_config, tags: [ slurm, slurm-config ] }
+  - { role: slurm-start, start_slurmdbd: True, start_slurmctld: True, tags: [ slurm-start ]  }
+#  - { role: provision_slurm, use_active_directory: False, lockpath: "/mnt/home", tags: [ slurm ]  }
+#  - { role: provision_homedir, use_active_directory: False, mntpt: "/mnt/home", tags: [ provisioning ] }
+
diff --git a/CICD/plays/nfssqlnodes.yml b/CICD/plays/nfssqlnodes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..24a7338397f32ac7e0ca448935f394c77b112d86
--- /dev/null
+++ b/CICD/plays/nfssqlnodes.yml
@@ -0,0 +1,83 @@
+# Role to initialize nfs and SQL Nodes
+# 
+#
+
+- hosts: 'all'
+  tasks:
+  - { name: setup, setup: }
+  tags: [ always ]
+  
+#we need this here to gather facts and fill required variables.
+- hosts: 'ManagementNodes'
+  gather_facts: True
+  tasks:
+  - include_vars: vars/passwords.yml 
+  - include_vars: vars/names.yml
+  - include_vars: vars/ldapConfig.yml
+  - include_vars: vars/filesystems.yml 
+  - include_vars: vars/slurm.yml 
+  - include_vars: vars/vars.yml 
+  - { name: set hostgroup, set_fact: hostgroup='ManagementNodes' }
+  - { name: set use shared state, set_fact: usesharedstatedir=True }
+  tags: [ always ]
+  
+- hosts: 'SQLNodes,NFSNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  pre_tasks:
+  - { name: set hostgroup, set_fact: hostgroup='SQLNodes', tags: [ always ] }
+  - { name: set use shared state, set_fact: usesharedstatedir=True, tags: [ always ] }
+
+- hosts: 'SQLNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  gather_facts: True
+  roles:
+  - { role: upgrade, tags: [ upgrade ] }
+  - { role: make_filesystems, volumes: "{{ dbvolumes }}" }
+  - { role: mysql, mysql_type: mysql_server,  mysql_root_password: "{{ sqlrootPasswd }}", mysql_user_name: slurmdb, mysql_user_db_name: slurm_acct_db, mysql_user_hosts_group: "{{ groups['ManagementNodes'] }}", mysql_user_password: "{{ slurmdb_passwd }}", tags: [ database ] }
+  - { role: slurm-mysql-config, tags: [database,slurmdb] }
+  tags: [ sql ]
+ 
+- hosts: 'NFSNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  gather_facts: False
+  roles:
+  - { role: make_filesystems, volumes: "{{ nfsvolumes }}" }
+  tasks:
+  - { name: make homedir, file: { path: /nfsvol/home, state: directory }, become: true, become_user: root }
+  - { name: make projects, file: { path: /nfsvol/projects, state: directory }, become: true, become_user: root }
+  - { name: make projects, file: { path: /nfsvol/scratch, state: directory }, become: true, become_user: root }
+  tags: [ nfs ]
+
+- hosts: 'NFSNodes'
+  strategy: free
+  gather_facts: False
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  roles:
+  - { role: nfs-server }
+  tags: [ nfs,nfs-server ]
diff --git a/CICD/plays/roles b/CICD/plays/roles
new file mode 120000
index 0000000000000000000000000000000000000000..b741aa3dbce62c5259099ec357a14dfd1ac7e2ff
--- /dev/null
+++ b/CICD/plays/roles
@@ -0,0 +1 @@
+../../roles
\ No newline at end of file
diff --git a/CICD/plays/vars b/CICD/plays/vars
new file mode 120000
index 0000000000000000000000000000000000000000..e8d9a6429b3aaab679b98557469104f0f7cc952b
--- /dev/null
+++ b/CICD/plays/vars
@@ -0,0 +1 @@
+../vars
\ No newline at end of file
diff --git a/CICD/tests/LoginNodes/run_slurm_testsuite.inactive b/CICD/tests/LoginNodes/run_slurm_testsuite.inactive
new file mode 100755
index 0000000000000000000000000000000000000000..c5d2f24f1f3cf99c3f7481f3bc467907444425d6
--- /dev/null
+++ b/CICD/tests/LoginNodes/run_slurm_testsuite.inactive
@@ -0,0 +1,29 @@
+#!/bin/bash
+OUTPUT_LOG=$(realpath ${1-slurmtest.out})
+if ! type "scontrol" > /dev/null; then
+	echo "cannot find slurm"
+	exit 1
+fi
+SLURM_DIR=${2-$(dirname $(dirname $(which scontrol)))}
+#SLURM_DIR=$slurm_dir
+
+#if [[ -d $2 ]];then
+#    SLURM_SRC_DIR=$2    
+#else
+#    SLURM_SRC_DIR=./slurm_src
+#    git clone https://github.com/SchedMD/slurm.git $SLURM_SRC_DIR
+#    cd $SLURM_SRC_DIR && ./configure
+#fi
+#cd $SLURM_SRC_DIR/testsuite/expect
+#echo -en "set slurm_dir=$SLURM_DIR\nset max_job_delay 300\n" > globals.local
+#make
+#echo "log is written to $OUTPUT_LOG"
+#echo "slurm dir is defined as $SLURM_DIR"
+./regression > /dev/null 2> $OUTPUT_LOG
+failures="$(sed -n 's/Failures:   \(.*\)/\1/p' $OUTPUT_LOG)"
+if (( $failures > 0 ));then
+	echo "$failures failures found, refer to $OUTPUT_LOG for log"
+	exit 1
+fi
+exit 0
+
diff --git a/CICD/tests/ManagementNodes/check.yml b/CICD/tests/ManagementNodes/check.yml
new file mode 100644
index 0000000000000000000000000000000000000000..95e06a0a034c32c5e8ae30c2a58c40e10a738afc
--- /dev/null
+++ b/CICD/tests/ManagementNodes/check.yml
@@ -0,0 +1,8 @@
+---
+- hosts: ManagementNodes
+  gather_facts: false
+  tasks:
+  - name: have ssh running
+    service:
+      name: sshd
+      state: started
\ No newline at end of file
diff --git a/CICD/tests/Readme.md b/CICD/tests/Readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..feca06268d107c2aeae9a6a8f61a2ed59e8648bc
--- /dev/null
+++ b/CICD/tests/Readme.md
@@ -0,0 +1,7 @@
+this folder should contain tests that will be run automatically by the CICD pipeline
+
+all files with fileending .sh will be executed by a shell
+all files with fileending yml will be executed by ansible-playbook
+./tmp can be used as temporary folder and will be cleaned after execution
+
+because I can I am prefixing tests with 0-9 to give the execution some priority
\ No newline at end of file
diff --git a/CICD/tests/all/0_EXAMPLE_FALSE.sh b/CICD/tests/all/0_EXAMPLE_FALSE.sh
new file mode 100755
index 0000000000000000000000000000000000000000..10c48607688d030fbbf054b1046e18d431b869c3
--- /dev/null
+++ b/CICD/tests/all/0_EXAMPLE_FALSE.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+/bin/false
+
+status=$?
+[ $status -eq 1 ] 
\ No newline at end of file
diff --git a/CICD/tests/all/0_EXAMPLE_TRUE.sh b/CICD/tests/all/0_EXAMPLE_TRUE.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3634c7aa3076c2e8cd2159aca337adb35f1f31cf
--- /dev/null
+++ b/CICD/tests/all/0_EXAMPLE_TRUE.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+/bin/true
+
+status=$?
+[ $status -eq 0 ] 
diff --git a/CICD/tests/run_tests.sh b/CICD/tests/run_tests.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d063e98d1d7e4617882bb14a5e1c51d9e8cda381
--- /dev/null
+++ b/CICD/tests/run_tests.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+function usage {
+    echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql}" INVENTORY_FILE KEY
+    exit 1
+}
+
+function run_them () 
+{
+    #limit='--limit '"$1"
+    #if [ "$1" = "all" ]
+    #then
+    #  limit="all"
+    #fi
+    for filename in ./tests/$1/*.sh; do   # this is not sorted yet
+        [ -e "$filename" ] || continue
+        #/bin/bash -x $filename # local execution. nice for dev
+        ansible -i $2 --key-file $3 -m script -a "$filename" $1
+    done
+    for filename in ./tests/$1/*.yml; do   # this is not sorted yet
+        [ -e "$filename" ] || continue
+        ansible-playbook -i $2 --key-file $3 $filename # I am assuming the playbook cares about visibility here. might have to change later
+    done
+}
+
+# I think I am just checking the if $1 is one of the listes strings (see usage) not proud of this at all but works
+case "$1" in
+        all)
+        ;;
+        ComputeNodes)
+        ;;
+        ManagementNodes)
+        ;;
+        NFSNodes)
+        ;;
+        SQLNodes)
+        ;;
+        LoginNodes)
+        ;;
+        *)
+        usage
+esac
+
+run_them $1 $2 $3
\ No newline at end of file
diff --git a/CICD/vars/filesystems.yml b/CICD/vars/filesystems.yml
new file mode 100644
index 0000000000000000000000000000000000000000..62d917425c4565d5653797e41947f98b2987375f
--- /dev/null
+++ b/CICD/vars/filesystems.yml
@@ -0,0 +1,21 @@
+---
+computeNfsMounts:
+  - { name: '/home', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/nfsvol/home", 'opts': 'defaults,nofail', 'fstype':'nfs4' }
+  - { name: '/usr/local', ipv4: "118.138.235.37", src: "/usr_local", 'opts': 'defaults,rw,nofail', 'fstype':'nfs4' }
+  - { name: '/projects', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/nfsvol/projects", 'opts': 'defaults,rw,nofail', 'fstype':'nfs4' }
+  - { name: '/scratch', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/nfsvol/scratch", 'opts': 'defaults,rw,nofail', 'fstype':'nfs4' }
+mgmtNfsMounts:
+  - { name: '/mnt/home', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/nfsvol/home", 'opts': 'defaults,nofail', 'fstype':'nfs4' }
+  - { name: '/slurmstate', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/slurmstate", 'opts': 'defaults,nofail', 'fstype':'nfs4' }
+dbvolumes:
+  - { fstype: 'ext4', name: 'dbvol', mntpt: '/dbvol', linkto: '/var/lib/mysql' }
+nfsvolumes:
+  - { fstype: 'ext4', name: 'nfsvol', mntpt: '/nfsvol' }
+  - { fstype: 'ext4', name: 'slurmstate', mntpt: '/slurmstate' }
+exportList:
+  - { name: '/home', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/nfsvol/home", 'opts': 'defaults,nofail', 'fstype':'nfs4' }
+  - { name: '/usr/local', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/nfsvol/usr_local_centos7", 'opts': 'defaults,rw,nofail', 'fstype':'nfs4' }
+  - { name: '/projects', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/nfsvol/projects", 'opts': 'defaults,rw,nofail', 'fstype':'nfs4' }
+  - { name: '/scratch', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/nfsvol/scratch", 'opts': 'defaults,rw,nofail', 'fstype':'nfs4' }
+  - { name: '/slurmstate', ipv4: "{{ groups['NFSNodes'][0] }}", src: "/slurmstate", 'opts': 'defaults,rw,nofail', 'fstype':'nfs4' }
+
diff --git a/CICD/vars/ldapConfig.yml b/CICD/vars/ldapConfig.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3ccb6e0fafe68e7b3b601c7ed1efb537eda33c74
--- /dev/null
+++ b/CICD/vars/ldapConfig.yml
@@ -0,0 +1,50 @@
+---
+ldapServerHostIpLine: "118.138.241.196 hpcldap0.erc.monash.edu.au"
+ldapCaCertContents: |
+  -----BEGIN CERTIFICATE-----
+  MIIGODCCBCCgAwIBAgIJAJPlOnRdsYibMA0GCSqGSIb3DQEBCwUAMIGoMQswCQYD
+  VQQGEwJBVTERMA8GA1UECAwIVmljdG9yaWExEDAOBgNVBAcMB0NsYXl0b24xIDAe
+  BgNVBAoMF01vbmFzaCBlUmVzZWFyY2ggQ2VudGVyMREwDwYDVQQLDAhIUEMgVGVh
+  bTEeMBwGA1UEAwwVTWVSQyBIUEMgVGVhbSBSb290IENBMR8wHQYJKoZIhvcNAQkB
+  FhBoZWxwQG1hc3NpdmUub3JnMB4XDTE1MDgxOTAyNDczOFoXDTM1MDgxNDAyNDcz
+  OFowgagxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0b3JpYTEQMA4GA1UEBwwH
+  Q2xheXRvbjEgMB4GA1UECgwXTW9uYXNoIGVSZXNlYXJjaCBDZW50ZXIxETAPBgNV
+  BAsMCEhQQyBUZWFtMR4wHAYDVQQDDBVNZVJDIEhQQyBUZWFtIFJvb3QgQ0ExHzAd
+  BgkqhkiG9w0BCQEWEGhlbHBAbWFzc2l2ZS5vcmcwggIiMA0GCSqGSIb3DQEBAQUA
+  A4ICDwAwggIKAoICAQDJxc194E9MGucoutUvmVvT04D6M3S7LlySwQ5XJd4ec22z
+  csmpoEep+IPVjChVKTN0mRYagAlh5UZ6VYtNA29Lkd4GC5Q2IAlrR9+pgXupuD5v
+  Qv1pFGEuWEPp5PHn4053gYtdVQ0pZQ7ytkVqSW5TJPNcR9AwHpW7JuQkU1jRGCO0
+  t8dthC1msT62UnfjXStznjATm+M253y5PF4IquGb1K6ArR79Os2Ds78NeLyZ24vC
+  ik2AA6QpzkOZOLzRZLyWn4Gdz/jyblZP/A/zjM83symIdn3dv0wC8A3hZsHP771X
+  tw2f6uyiXPftiJt0YuPQdw9kdbDda0Dp7UwiTdaUdzBsQYUGuCQhw3T3NurPZu83
+  K4ftVnIez9VO+5buJQxX0dc0/w0fwIZVtMesdMt+08x6Cf9nVmDrheArTKYWOq0r
+  5eNntg16JAVBixRMwiV+KL4VP/pSKXQK2a9WptzEjVHLSsN0oMAoHkBVz47fSIdD
+  O79jYak+yvPORMkqd0iwMnt0F+wg9JrMVhhCmU5vdqgwQy60LCHn23IX7x821YTt
+  inQM43FsvRCAwWabWinn1prPHLpzaeMgE0wSVBtd4CvPqQ0fW5HJjdOjzyKRim8d
+  1jN+1opa7CbcM2byfUU0yd1wU4jp5DSeZokV8ECr43pUymcc2dJwmTNApcg92wID
+  AQABo2MwYTAdBgNVHQ4EFgQUJ4sfHiRrNF3i/yAaV+OnIvfOAwgwHwYDVR0jBBgw
+  FoAUJ4sfHiRrNF3i/yAaV+OnIvfOAwgwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B
+  Af8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAF/gyOaUKw0AUxfoWfC4+hsD/QFg
+  h+GvOTrT+xA5Z8qpaPJDJijVQ8zAVFRyUsZZ9ZXe+QkIqP1WXnX0ROeDJ3LRnaoO
+  Vq/jy1OratWDsoNCvhjY5ZY2eZh2CXQVj40BD6iZJpfgNayDsId7wUKTraBaZ+k4
+  NXu65f6objeIx8febnazV7s9C0Ola2fpdv7/JmiiizFMn62codyztA6J9+HXirc5
+  Pq+RKVqPvBEWRi2LKAsbOubFklXTwe8cTwmMFUT2BPp6gpwIXtaSOpBQX/Ynthp5
+  LRGU/koLZSKAeYIoUPH4pJHe89fpgtOuKBjRlOFdnUjJ90xIh2dyZm3G4JyINwKF
+  HrdGsu+RunUtE1AfT5S21ilcSjqLvQUfciWEyRcnmAyi/9o7upJlQCNGcPy3l5kJ
+  VdpRBtmVK08k1S9HtvQvqY82fDEnbxzFOla2uPDQ3sE1LodvY4KUZrA9ML3EUyeG
+  F5mvvhUOSMkmB8VouE2gt0g4rFXtHL6nHQ7rr1Ha/xcm/dVQY4e4Z43OYEflRkNV
+  R6VdSNWq3Voh4ASrLfuv4/5Mbt5BnLKvzvnZVeNmJIh2Rc/eYfao1K7K6siAUhP2
+  ONklIbbx/WISO5Vchcw65DclkEBZos2KqRoMb/Rxn5sFIvRWgrXvzw39o8agWO0K
+  9jGyW0SYdK9x4Qxn
+  -----END CERTIFICATE-----
+ldapCaCertFile: /etc/ssl/certs/cacert.crt
+ldapDomain: "erc.monash.edu.au"
+ldapURI: "ldaps://hpcldap0.erc.monash.edu.au:636"
+ldapROURI: "ldaps://hpcldap1.erc.monash.edu.au:636"
+ldapBindDN: "cn=ldapuser,ou=People,dc=erc,dc=monash,dc=edu,dc=au"
+ldapBindDNPassword: "thisisafakepassword"
+ldapManagerDN: "cn=Manager,dc=erc,dc=monash,dc=edu,dc=au"
+ldapBase: "dc=erc,dc=monash,dc=edu,dc=au"
+ldapGroupBase: "ou=Groups,dc=erc,dc=monash,dc=edu,dc=au"
+ldapRfc2307Pam: ""
+ldap_access_filter: "(&(objectClass=posixAccount)(memberOf=cn=m3,ou=aclgroups,dc=erc,dc=monash,dc=edu,dc=au))"
diff --git a/CICD/vars/names.yml b/CICD/vars/names.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fa7063762a3477f082cd454fce0101dbcb8a0bbc
--- /dev/null
+++ b/CICD/vars/names.yml
@@ -0,0 +1,3 @@
+---
+domain: massive.org.au
+smtp_smarthost: smtp.monash.edu.au
diff --git a/CICD/vars/passwords.yml b/CICD/vars/passwords.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3d9b84303496ba0dc41b2869783b3183310ba271
--- /dev/null
+++ b/CICD/vars/passwords.yml
@@ -0,0 +1,7 @@
+---
+mungekey: ySdSOpFMyLihx4tQlR0znm07UlvALxB1
+slurmdb_passwd: ySdSOpFMyLihx4tQlR0znm07UlvALxB2
+sqlrootPasswd: ySdSOpFMyLihx4tQlR0znm07UlvALxB3
+sudo_group: systems
+default_user_password_clear: ySdSOpFMyLihx4tQlR0znm07UlvALxBL
+default_user_password: ySdSOpFMyLihx4tQlR0znm07UlvALxBL
diff --git a/CICD/vars/slurm.yml b/CICD/vars/slurm.yml
new file mode 100644
index 0000000000000000000000000000000000000000..65def4d949685d32b7f6b705a6390c9a6dfdab2a
--- /dev/null
+++ b/CICD/vars/slurm.yml
@@ -0,0 +1,45 @@
+---
+desktopNodeList:
+  - { name : 'DesktopNodes', interface : 'eth0' }
+clustername: "m3"
+projectname: "m3"
+slurm_version: 19.05.3-2
+munge_version: 0.5.11
+nhc_version: 1.4.2
+munge_dir: /opt/munge-{{ munge_version }}
+slurm_dir: /opt/slurm-{{ slurm_version }}
+nhc_dir: /opt/nhc-{{ nhc_version }}
+nhc_config_file: nhc.conf
+nhc_log_level: 0
+nhc_emails: nobody@nowhere.nowhere
+nhc_email_subject: "Node Health Check"
+openmpi_version: 1.8.3
+mysql_host: "{{ groups['SQLNodes'][0] }}"
+slurmctrl: "{{ groups['ManagementNodes'][0] }}"
+slurmctrlbackup: "{{ groups['ManagementNodes'][1] }}"
+slurmdbd: "{{ groups['ManagementNodes'][0] }}"
+slurmdbdbackup: "{{ groups['ManagementNodes'][1] }}"
+slurm_use_vpn: false
+slurm_lua: true
+slurmqueues:
+  - {name: batch, group: ComputeNodes, default: yes}
+#  - {name: vis, group: DesktopNodes, default: no}
+slurmlogin: "{{ groups['LoginNodes'][0] }}"
+slurmlogdir: "/var/log"
+slurmctlddebug: {level: 5, log: '/mnt/slurm-logs/slurmctld.log'}
+slurmddebug: {level: 5, log: '/var/log/slurmd.log'}
+slurmschedlog: {level: 5, log: '/mnt/slurm-logs/slurmsched.log'}
+slurmdbdlog: {level: 5, log: '/mnt/slurm-logs/slurmdbd.log'}
+slurmfairshare: {def: false, val: 10000}
+slurmdatadir: "/opt/slurm/var/spool"
+slurmstatedir: "/opt/slurm/var/state"
+slurmsharedstatedir: "/slurmstate"
+slurmpiddir: "/opt/slurm-latest/var/run"
+slurmdbdpiddir: "/opt/slurm/var/run"
+slurmaccount_create_user: "/usr/local/sbin/slurmuseraccount.sh"
+slurm_provision: "/cinderVolume/local/sbin/slurm_provision.sh"
+slurmselecttype: "select/linear"
+slurmfastschedule: "1"
+slurmschedulertype: "sched/backfill"
+restartServerList:
+  - slurm
diff --git a/CICD/vars/vars.yml b/CICD/vars/vars.yml
new file mode 100644
index 0000000000000000000000000000000000000000..83485426b7e370a91d2fd15a5083156c483a1f4e
--- /dev/null
+++ b/CICD/vars/vars.yml
@@ -0,0 +1,21 @@
+---
+sudo_group: systems
+nagios_home: "/var/lib/nagios"
+nvidia_version: "390.46"
+
+yumdisablerepo: 
+ - 'base'
+ - 'extras'
+ - 'updates'
+yumenablerepo: 
+ - 'monashhpc_base'
+ - 'monashhpc_updates'
+ - 'monashhpc_extras'
+ - 'monashhpc_centosplus'
+ - 'monashhpc_otherstuff'
+
+gpumap:
+ 'K1': 'K1'
+ 'K80': 'K80'
+ 'P100-PCIE-16GB': 'P100'
+ 'V100-PCIE-16GB': 'V100'
diff --git a/README.md b/README.md
index df8c0a84b279f979830ad1230ffff2060cd239a6..f06edfcda4693187e6cff8e9daaa00f40a8ba763 100644
--- a/README.md
+++ b/README.md
@@ -6,6 +6,8 @@ We are working from
 https://docs.google.com/a/monash.edu/spreadsheets/d/1IZNE7vMid_SHYxImGVtQcNUiUIrs_Nu1xqolyblr0AE/edit#gid=0
 as our architecture document.
 
+[![pipeline status](https://gitlab.erc.monash.edu.au/hpc-team/ansible_cluster_in_a_box/badges/cicd/pipeline.svg)](https://gitlab.erc.monash.edu.au/hpc-team/ansible_cluster_in_a_box/commits/cicd)
+
 We aim to make these roles as generic as possible. You should be able to start from an inventory file, an ssh key and a git clone of this and end up with a working cluster. In the longer term we might branch to include utilities to make an inventory file using NeCTAR credentials.
 
 If you need a password use get_or_make_password.py (delegated to the passwword server/localhost) to generate a random one that can be shared between nodes
diff --git a/buildCert.yml b/buildCert.yml
index 6ac5a3df76833d1f00febd5e100e30ff5a75360c..eb6a72f3154d9632effe3b54a6c008ecf1b836c1 100644
--- a/buildCert.yml
+++ b/buildCert.yml
@@ -14,7 +14,7 @@
 - name: "Check key"
   register: key
   stat: "path={{ x509_key_file }}"
-  sudo: true
+  become: true
 
 - name: "Default: we don't need a new certificate"
   set_fact: needcert=False
@@ -39,50 +39,50 @@
 - name: "Creating CSR"
   shell: " cd /etc/easy-rsa/2.0; source ./vars; export EASY_RSA=\"${EASY_RSA:-.}\"; \"$EASY_RSA\"/pkitool --csr {{ x509_csr_args }} {{ common_name }}"
   when: needcert
-  sudo: true
+  become: true
 
 - name: "Copy CSR to ansible host"
   fetch: "src=/etc/easy-rsa/2.0/keys/{{ common_name }}.csr dest=/tmp/{{ common_name }}/ fail_on_missing=yes validate_md5=yes flat=yes"
-  sudo: true
+  become: true
   when: needcert
 
 - name: "Copy CSR to CA"
   delegate_to: "{{ x509_ca_server }}"
   copy: "src=/tmp/{{ ansible_fqdn }}/{{ common_name }}.csr dest=/etc/easy-rsa/2.0/keys/{{ common_name }}.csr force=yes"
   when: needcert
-  sudo: true
+  become: true
 
 - name: "Sign Certificate"
   delegate_to: "{{ x509_ca_server }}"
   shell:    "source ./vars; export EASY_RSA=\"${EASY_RSA:-.}\" ;\"$EASY_RSA\"/pkitool --sign {{ common_name }}"
   args:
     chdir: "/etc/easy-rsa/2.0"
-  sudo: true
+  become: true
   when: needcert
 
 - name: "Copy the Certificate to ansible host"
   delegate_to: "{{ x509_ca_server }}"
   fetch: "src=/etc/easy-rsa/2.0/keys/{{ common_name }}.crt dest=/tmp/{{ common_name }}/ fail_on_missing=yes validate_md5=yes flat=yes"
-  sudo: true
+  become: true
   when: needcert
 
 - name: "Copy the CA Certificate to the ansible host"
   delegate_to: "{{ x509_ca_server }}"
   fetch: "src=/etc/easy-rsa/2.0/keys/ca.crt dest=/tmp/ca.crt fail_on_missing=yes validate_md5=yes flat=yes"
-  sudo: true
+  become: true
   when: "ca_cert.stat.exists == false"
 
 - name: "Copy the certificate to the node"
   copy: "src=/tmp/{{ common_name }}/{{ common_name }}.crt dest={{ x509_cert_file }} force=yes"
-  sudo: true
+  become: true
   when: needcert
 
 - name: "Copy the CA certificate to the node"
   copy: "src=/tmp/ca.crt dest={{ x509_cacert_file }}"
-  sudo: true
+  become: true
   when: "ca_cert.stat.exists == false"
 
 - name: "Copy the key to the correct location"
   shell: "mkdir -p `dirname {{ x509_key_file }}` ; chmod 700 `dirname {{ x509_key_file }}` ; cp /etc/easy-rsa/2.0/keys/{{ common_name }}.key {{ x509_key_file }}"
-  sudo: true
+  become: true
   when: needcert
diff --git a/buildKaraage3.x.yml b/buildKaraage3.x.yml
index 16c15168613e7e18925483d26ce2097064d05c28..fcd336022770c1aace87d490ab52404741fb7bdd 100644
--- a/buildKaraage3.x.yml
+++ b/buildKaraage3.x.yml
@@ -11,7 +11,7 @@
     - easy-rsa-CA
     - easy-rsa-certificate
     - ldapserver
-  sudo: true
+  become: true
   vars:
    - x509_ca_server: "{% for host in groups['ldap-server'] %}{{ hostvars[host]['ansible_fqdn'] }}{% endfor %}"
    - countryName: "AU"
@@ -51,7 +51,7 @@
     - easy-rsa-certificate
     - karaage3.1.17
     - shibboleth-sp
-  sudo: true
+  become: true
   vars:
    - x509_ca_server: "{% for host in groups['ldap-server'] %}{{ hostvars[host]['ansible_fqdn'] }}{% endfor %}"
    - countryName: "AU"
diff --git a/extra_packages/tasks/main.yml b/extra_packages/tasks/main.yml
index dde65974accbf019afbac0d1655e129ce9e84277..5a8c87642139c65c2e5f8ae7aad81ec894964306 100644
--- a/extra_packages/tasks/main.yml
+++ b/extra_packages/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: "Install extra packages"
   yum: "name={{ item }} state=present"
   with_items: "{{ pkgs }}"
-  sudo: true
+  become: true
   ignore_errors: true
   when: ansible_os_family == 'RedHat'
 
@@ -14,7 +14,7 @@
 
 - name: "Fix fusermount user access permission"
   file: path=/bin/fusermount mode="o=rx"
-  sudo: true
-  when: ansible_os_family == 'RedHat' and fusermount_user_access_error | failed
+  become: true
+  when: ansible_os_family == 'RedHat' and fusermount_user_access_error.failed
 
 
diff --git a/installNFS.yml b/installNFS.yml
index 01aff573bfbb9bbfbbd632941c8fe2c9f064764e..6568c45077cdba9a1f26dae797dc20cb059632eb 100644
--- a/installNFS.yml
+++ b/installNFS.yml
@@ -5,7 +5,7 @@
   roles:
     #- OpenVPN-Server 
     - nfs-server
-  sudo: true
+  become: true
   vars: 
     x509_ca_server: vm-118-138-240-224.erc.monash.edu.au
 - 
@@ -17,7 +17,7 @@
     #- OpenVPN-Client
     - syncExports
     - nfs-client
-  sudo: true
+  become: true
   vars: 
     x509_ca_server: vm-118-138-240-224.erc.monash.edu.au
     openvpn_servers: ['vm-118-138-240-224.erc.monash.edu.au']
diff --git a/plays/allnodes.yml b/plays/allnodes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ac098f4dd1496fd05c7ce869b09ab54144cd307b
--- /dev/null
+++ b/plays/allnodes.yml
@@ -0,0 +1,47 @@
+- hosts: 'all'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  tasks:
+  - { name: set use shared state, set_fact: usesharedstatedir=False }
+  - { name: set hostgroup, set_fact: hostgroup='ComputeNodes' }
+  tags: [ always ]
+
+- hosts: 'all'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  roles:
+#  - { role: disable_selinux, tags: [ disableselinux ] }
+  - { role: upgrade }
+  - { role: set_password }
+  - { role: etcHosts, tags: [ networking ] }
+#  - { role: config_repos, tags: [ repos ] }
+
+- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,ManagementNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  roles:
+  - { role: disable_selinux, tags: [ disableselinux ] }
+  - { role: ldapclient, tags: [ authentication ] }
+  - { role: ssh-password-login, tags: [ authentication ] }
+  - { role: enable_sudo_group, tags: [ authentication, sudo ] }
+  - { role: move_homedir }
+  - { role: calculateKnownHosts, tags: [ calculateKnownHosts ] }
+  - { role: SSHKnownHosts, tags: [ known_hosts ] }
+  - { role: jasons_ssh_ca, tags: [ ssh_ca ] }
diff --git a/plays/computenodes.yml b/plays/computenodes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..208ad954f57c479461c4270b69abefe20384c468
--- /dev/null
+++ b/plays/computenodes.yml
@@ -0,0 +1,64 @@
+
+- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,VisNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  tasks:
+  - { name: set use shared state, set_fact: usesharedstatedir=False }
+  tags: [ always ]
+
+- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  roles:
+  - { role: move_homedir, tags: [ authentication, filesystems ] }
+  - { role: nfs-client, nfsMounts: "{{ computeNfsMounts }}", tags: [ filesystems ] }
+  - { role: slurm-common, tags: [ slurm, slurm-common ] }
+  - { role: lmod, tags: [ other ] }
+  - { role: enable_modules, default_modules: "lmod", tags: [ other ] }
+  - { role: postfix, tags: [ mail, other ] }
+
+- hosts: 'VisNodes'
+  vars_files:
+  - vars/passwords.yml
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml
+  - vars/slurm.yml
+  - vars/vars.yml
+  roles:
+  - { role: gpu, tags: [ gpu ] }
+
+- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
+  vars_files:
+  - vars/passwords.yml
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml
+  - vars/slurm.yml
+  - vars/vars.yml
+  roles:
+  - { role: slurm_config, tags: [slurm, slurm_config] }
+
+- hosts: 'DesktopNodes,ComputeNodes'
+  vars_files:
+  - vars/passwords.yml
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml
+  - vars/slurm.yml
+  - vars/vars.yml
+  strategy: free
+  roles:
+  - { role: slurm-start, start_slurmd: True, tags: [ slurm, slurmstart ] }
+  - { role: mate-de-install, tags: [ mate-de-install ] }   # TODO this crashes for everything except cmca
diff --git a/plays/files b/plays/files
new file mode 120000
index 0000000000000000000000000000000000000000..feb122881ce2321d72ad6b867bd2a3d01eadaac3
--- /dev/null
+++ b/plays/files
@@ -0,0 +1 @@
+../files
\ No newline at end of file
diff --git a/plays/init_slurmconf.yml b/plays/init_slurmconf.yml
new file mode 100644
index 0000000000000000000000000000000000000000..30667ac53b5b6c387af0bdacb609f09cc8bfa5c3
--- /dev/null
+++ b/plays/init_slurmconf.yml
@@ -0,0 +1,15 @@
+---
+- hosts: 'all'
+  tasks:
+  - include_vars: vars/passwords.yml 
+  - include_vars: vars/names.yml
+  - include_vars: vars/ldapConfig.yml
+  - include_vars: vars/filesystems.yml 
+  - include_vars: vars/slurm.yml 
+  - include_vars: vars/vars.yml 
+- hosts: 'all'
+  tasks:
+  - { name: setup, setup: }
+- hosts: 'ManagementNodes'
+  roles:
+  - { role: calculateSlurmConf }
diff --git a/plays/make_files.yml b/plays/make_files.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b05925ce73f9be136bb46128961990b938c07910
--- /dev/null
+++ b/plays/make_files.yml
@@ -0,0 +1,22 @@
+---
+# just calculates an etc hosts
+- hosts: 'all'
+  tasks:
+  - include_vars: vars/passwords.yml
+  - include_vars: vars/names.yml
+  - include_vars: vars/ldapConfig.yml
+  - include_vars: vars/filesystems.yml
+  - include_vars: vars/slurm.yml
+  - include_vars: vars/vars.yml
+- hosts: 'all'
+  tasks:
+  - { name: setup, setup: }
+- hosts: 'ManagementNodes'
+  roles:
+  - { role: calculateEtcHosts }
+  
+#- hosts: 'NFSNodes'
+#  roles:
+#  - { role: calculateExports }
+
+
diff --git a/plays/mgmtnodes.yml b/plays/mgmtnodes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5d4241194324fe13739e074b4ee749c969935dfb
--- /dev/null
+++ b/plays/mgmtnodes.yml
@@ -0,0 +1,44 @@
+# Basic stuff to make the nodes functionl
+# i.e. upgrade operating systems, etc
+#
+
+- hosts: 'ManagementNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  tasks:
+      #  - { name: set hostgroup, set_fact: hostgroup='ManagementNodes' }
+  - { name: set use shared state, set_fact: usesharedstatedir=True }
+  tags: [ always ]
+
+- hosts: 'ManagementNodes'
+  strategy: free
+  gather_facts: False
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  roles:
+#  - { role: ldapclient, tags: [ authentication ] }
+#  - { role: ssh-password-login }
+#  - { role: enable_sudo_group }
+#  - { role: make_filesystems, volumes: "{{ glustervolumes }}" }
+#  - { role: gluster_server, volname: "gv", brickmnt: '/gbrick', gluster_servers: "{{ groups['ManagementNodes'] }}", replicas: 2, tags: [ gluster_server ]  }
+#  - { role: gluster_volcreate, volname: "gv", gluster_servers: "{{ groups['ManagementNodes'] }}", brickmnt: '/gbrick', replicas: 2 }
+#  - { role: gluster_client, volname: "gv", gluster_servers: ['mgmt0','mgmt1','sql0'], volmnt: '/glusterVolume' }
+  - { role: nfs-client, nfsMounts: "{{ mgmtNfsMounts }}", tags: [ nfs ] }
+  - { role: slurmdb-config, tags: [ slurm, slurmdb-config ] }
+  - { role: slurm-common, tags: [ slurm, slurm-common ]  }
+  - { role: slurm_config, tags: [ slurm, slurm-config ] }
+  - { role: slurm-start, start_slurmdbd: True, start_slurmctld: True, tags: [ slurm-start ]  }
+  - { role: telegraf, tags: [ monitoring ] }
+#  - { role: provision_slurm, use_active_directory: False, lockpath: "/mnt/home", tags: [ slurm ]  }
+#  - { role: provision_homedir, use_active_directory: False, mntpt: "/mnt/home", tags: [ provisioning ] }
+
diff --git a/plays/nfssqlnodes.yml b/plays/nfssqlnodes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..30b3b1ed1d6ddab06d6b538757ef636538338082
--- /dev/null
+++ b/plays/nfssqlnodes.yml
@@ -0,0 +1,84 @@
+# Role to initialize nfs and SQL Nodes
+# 
+#
+
+- hosts: 'all'
+  tasks:
+  - { name: setup, setup: }
+  tags: [ always ]
+  
+#we need this here to gather facts and fill required variables.
+- hosts: 'ManagementNodes'
+  gather_facts: True
+  tasks:
+  - include_vars: vars/passwords.yml 
+  - include_vars: vars/names.yml
+  - include_vars: vars/ldapConfig.yml
+  - include_vars: vars/filesystems.yml 
+  - include_vars: vars/slurm.yml 
+  - include_vars: vars/vars.yml 
+  - { name: set hostgroup, set_fact: hostgroup='ManagementNodes' }
+  - { name: set use shared state, set_fact: usesharedstatedir=True }
+  tags: [ always ]
+  
+- hosts: 'SQLNodes,NFSNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  pre_tasks:
+  - { name: set hostgroup, set_fact: hostgroup='SQLNodes', tags: [ always ] }
+  - { name: set use shared state, set_fact: usesharedstatedir=True, tags: [ always ] }
+
+- hosts: 'SQLNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  gather_facts: True
+  roles:
+  - { role: upgrade, tags: [ upgrade ] }
+  - { role: make_filesystems, volumes: "{{ dbvolumes }}" }
+  - { role: mysql, mysql_type: mysql_server,  mysql_root_password: "{{ sqlrootPasswd }}", mysql_user_name: slurmdb, mysql_user_db_name: slurm_acct_db, mysql_user_hosts_group: "{{ groups['ManagementNodes'] }}", mysql_user_password: "{{ slurmdb_passwd }}", tags: [ database ] }
+  - { role: slurm-mysql-config, tags: [database,slurmdb] }
+  tags: [ sql ]
+ 
+- hosts: 'NFSNodes'
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  strategy: free
+  gather_facts: False
+  roles:
+  - { role: make_filesystems, volumes: "{{ nfsvolumes }}" }
+  tasks:
+  - { name: make homedir, file: { path: /nfsvol/home, state: directory }, become: true, become_user: root }
+  - { name: make usr_local, file: { path: /nfsvol/usr_local_centos7, state: directory }, become: true, become_user: root }
+  - { name: make projects, file: { path: /nfsvol/projects, state: directory }, become: true, become_user: root }
+  - { name: make projects, file: { path: /nfsvol/scratch, state: directory }, become: true, become_user: root }
+  tags: [ nfs ]
+
+- hosts: 'NFSNodes'
+  strategy: free
+  gather_facts: False
+  vars_files: 
+  - vars/passwords.yml 
+  - vars/names.yml
+  - vars/ldapConfig.yml
+  - vars/filesystems.yml 
+  - vars/slurm.yml 
+  - vars/vars.yml 
+  roles:
+  - { role: nfs-server }
+  tags: [ nfs ]
diff --git a/plays/roles b/plays/roles
new file mode 120000
index 0000000000000000000000000000000000000000..d8c4472ca1b65cea039252e137ff3b4ab5d3a555
--- /dev/null
+++ b/plays/roles
@@ -0,0 +1 @@
+../roles
\ No newline at end of file
diff --git a/plays/vars b/plays/vars
new file mode 120000
index 0000000000000000000000000000000000000000..e8d9a6429b3aaab679b98557469104f0f7cc952b
--- /dev/null
+++ b/plays/vars
@@ -0,0 +1 @@
+../vars
\ No newline at end of file
diff --git a/roles/MonashBioinformaticsPlatform_node_allocation/tasks/main.yml b/roles/MonashBioinformaticsPlatform_node_allocation/tasks/main.yml
index 468368d2d8ec076db68aabd76385696f57075939..49bf8f2803a42de7624514bdd62669f085649310 100644
--- a/roles/MonashBioinformaticsPlatform_node_allocation/tasks/main.yml
+++ b/roles/MonashBioinformaticsPlatform_node_allocation/tasks/main.yml
@@ -1,12 +1,12 @@
 ---
 - name: make sure /usr/local/bin exists
   file: path=/usr/local/bin state=directory mode=755 owner=root
-  sudo: true
+  become: true
 
 - name: install get_node.py
   copy: src=get_node.py dest=/usr/local/bin/get_node.py mode=755 owner=root
-  sudo: true
+  become: true
 
 - name: install mbp_node
   copy: src=mbp_node dest=/usr/local/bin/mbp_node mode=755 owner=root
-  sudo: true
+  become: true
diff --git a/roles/OpenVPN-Client/handlers/main.yml b/roles/OpenVPN-Client/handlers/main.yml
index 576203a36d646abaeed00f1688f1758cb750eb3b..55c0c9f1a57a76774a3b0769a140cafb1dd6a4e1 100644
--- a/roles/OpenVPN-Client/handlers/main.yml
+++ b/roles/OpenVPN-Client/handlers/main.yml
@@ -1,4 +1,4 @@
 ---
   - name: restart openvpn
     service: name=openvpn state=restarted
-    sudo: true
+    become: true
diff --git a/roles/OpenVPN-Client/tasks/installOpenVPN.yml b/roles/OpenVPN-Client/tasks/installOpenVPN.yml
index 7c1777813feab0e88526addd358c7c5c29d4b9d2..12041d4028f8b273cbb8de09df81a38dd0a83ef9 100644
--- a/roles/OpenVPN-Client/tasks/installOpenVPN.yml
+++ b/roles/OpenVPN-Client/tasks/installOpenVPN.yml
@@ -1,11 +1,11 @@
 --- 
 - name: "Install OpenVPN"
   yum: "name=openvpn state=present"
-  sudo: true
+  become: true
   notify: restart openvpn
 
 - name: "Copying client.conf to the OpenVPN client"
   template: "src=client.conf.j2 dest=/etc/openvpn/client.conf"
-  sudo: true
+  become: true
   notify: restart openvpn
 
diff --git a/roles/OpenVPN-Client/tasks/main.yml b/roles/OpenVPN-Client/tasks/main.yml
index 44aab742b4a4ea1f6a4cdf736ae7c367897c197a..1f5c624fb44c54e725445be045f4552ee592fd6e 100644
--- a/roles/OpenVPN-Client/tasks/main.yml
+++ b/roles/OpenVPN-Client/tasks/main.yml
@@ -4,5 +4,5 @@
 
 - name: "Start OpenVPN"
   service: name=openvpn state=started enabled=yes
-  sudo: true
+  become: true
 
diff --git a/roles/OpenVPN-Server/handlers/main.yml b/roles/OpenVPN-Server/handlers/main.yml
index 576203a36d646abaeed00f1688f1758cb750eb3b..55c0c9f1a57a76774a3b0769a140cafb1dd6a4e1 100644
--- a/roles/OpenVPN-Server/handlers/main.yml
+++ b/roles/OpenVPN-Server/handlers/main.yml
@@ -1,4 +1,4 @@
 ---
   - name: restart openvpn
     service: name=openvpn state=restarted
-    sudo: true
+    become: true
diff --git a/roles/OpenVPN-Server/tasks/installOpenVPN.yml b/roles/OpenVPN-Server/tasks/installOpenVPN.yml
index 05c43b6017eb9de06d88391412ebb22d87de4f7c..58b5dbc0c707cdc246d40ec06f3ec58e6a34780e 100644
--- a/roles/OpenVPN-Server/tasks/installOpenVPN.yml
+++ b/roles/OpenVPN-Server/tasks/installOpenVPN.yml
@@ -2,21 +2,21 @@
 - name: "Install OpenVPN"
   yum: "name=openvpn state=present"
   notify: "restart openvpn"
-  sudo: true
+  become: true
 
 - name: Create path
   shell: mkdir -p {{ dhparms_file | dirname }}
   args:
     creates: "{{ dhparms_file | dirname }}"
-  sudo: true
+  become: true
 
 - name: "Generate DH parameters"
   shell: openssl dhparam -out {{ dhparms_file }} 512
   args:
     creates: "{{ dhparms_file }}"
-  sudo: true
+  become: true
 
 - name: "Configure OpenVPN Server"
   template: "src=server.conf.j2 dest=/etc/openvpn/server.conf"
   notify: "restart openvpn"
-  sudo: true
+  become: true
diff --git a/roles/OpenVPN-Server/tasks/main.yml b/roles/OpenVPN-Server/tasks/main.yml
index b69a74c0cc65f0ad56cf046337355662ea7b3b03..d479ba8b411aac5b76e3f8791e343ee25de5e496 100644
--- a/roles/OpenVPN-Server/tasks/main.yml
+++ b/roles/OpenVPN-Server/tasks/main.yml
@@ -4,4 +4,4 @@
 
 - name: "Start OpenVPN"
   service: name=openvpn state=started enabled=yes
-  sudo: true
+  become: true
diff --git a/roles/apache2/tasks/apacheDebian.yml b/roles/apache2/tasks/apacheDebian.yml
index b62913c6dcb53f519733a3053071387723485197..9b9b751e944f3a4f59281e5eb37f19962891d77d 100644
--- a/roles/apache2/tasks/apacheDebian.yml
+++ b/roles/apache2/tasks/apacheDebian.yml
@@ -5,26 +5,26 @@
  with_items:
   - apache2
   - apache2-dev
- sudo: true
+ become: true
 
 -
  name: "Templating default-ssl site"
  template: src=default-ssl.j2 dest=/etc/apache2/sites-available/default-ssl.conf owner=www-data group=www-data
- sudo: true
+ become: true
 -
  name: "Templating default site"
  template: src=default.j2 dest=/etc/apache2/sites-available/000-default.conf owner=www-data group=www-data
- sudo: true
+ become: true
 
 -
  name: "Enable ssl module"
  apache2_module: state=present name=ssl
- sudo: true
+ become: true
 
 -
  name: "Enable default-ssl site"
  shell: a2ensite default-ssl
- sudo: true
+ become: true
  notify: restart apache2
 
 
diff --git a/roles/apache2/tasks/apacheRedHat.yml b/roles/apache2/tasks/apacheRedHat.yml
index 71e520a982b4be7552aaeea70c55bb83b68c9b4c..2a5a83dacc74919106efa8a5ad817ec808143cfe 100644
--- a/roles/apache2/tasks/apacheRedHat.yml
+++ b/roles/apache2/tasks/apacheRedHat.yml
@@ -2,7 +2,7 @@
 
 -
  name: "Installing Apache"
- sudo: true
+ become: true
  yum: name={{ item }} state=present
  with_items:
   - mod_ssl
@@ -12,19 +12,19 @@
   - httpd-devel
 -
  name: Setting httpd.conf
- sudo: true
+ become: true
  replace: dest=/etc/httpd/conf/httpd.conf regexp="^#ServerName www.example.com:80" replace="ServerName {{ ansible_fqdn }}"
 
 -
  name: "Templating default-ssl site"
  template: src=default-ssl.j2 dest=/etc/httpd/conf.d/ssl.conf owner=apache group=apache
- sudo: true
+ become: true
 
 -
  name: Templating wsgi.conf
- sudo: true
+ become: true
  template: src=wsgi.conf.j2 dest=/etc/httpd/conf.d/wsgi.conf owner=root group=root
 -
  name: Restarting Apache
- sudo: true
+ become: true
  service: name=httpd state=restarted
diff --git a/roles/apache2/tasks/main.yml b/roles/apache2/tasks/main.yml
index f09076e625b90acca9565e84ea215b4cef808d5d..bd4c18322727fd84b8ae48804c2be525a69f897d 100644
--- a/roles/apache2/tasks/main.yml
+++ b/roles/apache2/tasks/main.yml
@@ -6,35 +6,35 @@
 
 - name: "Create apache key directory"
   file: path={{ x509_key | dirname }} state=directory owner={{ apache_user }} mode=700
-  sudo: true
+  become: true
 
 - name: "Create apache cert directory"
   file: path={{ x509_cert | dirname }} state=directory owner={{ apache_user }} mode=755
-  sudo: true
+  become: true
 
 - name: "Copying the apache key file"
   template: src="files/{{ apache_key_file }}" dest="{{ x509_key }}" mode=0600 owner={{ apache_user }} group={{ apache_group }}
-  sudo: true
+  become: true
   when: apache_key_file is defined
 
 - name: "Copying the apache cert file"
   template: src="files/{{ apache_cert_file }}" dest="{{ x509_cert }}" mode=0644 owner={{ apache_user }} group={{ apache_group }}
-  sudo: true
+  become: true
   when: apache_cert_file is defined
 
 - name: "Create log directory, start aoacge will have errors without it"
   file: dest=/etc/apache2/logs state=directory
-  sudo: true
+  become: true
 
 -
  name: "Change permissions for /var/www"
  file: path=/var/www state=directory owner=root group={{ apache_user }} mode=0775
- sudo: true
+ become: true
 
 -
  name: "Starting Apache2"
  service: name=apache2 state=started enabled=yes
- sudo: true
+ become: true
  when: ansible_os_family=="Debian"
 
 
diff --git a/roles/apt-get-update/tasks/main.yml b/roles/apt-get-update/tasks/main.yml
index 460364c28b119fa664cfa571f761dadb001891ae..5a72fc8099073df702c7e6bb60b90b47b8e8c82f 100644
--- a/roles/apt-get-update/tasks/main.yml
+++ b/roles/apt-get-update/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
 - name: apt-get update
   apt: update_cache=True
-  sudo: true
+  become: true
   when: ansible_os_family=="Debian"
diff --git a/roles/calculateEtcHosts/tasks/main.yml b/roles/calculateEtcHosts/tasks/main.yml
index ff71a956bb33175ab3ebe2227ed0b13a55165746..1d1c617050cd33d1fbf3ffe17b3b5696ebc2c254 100644
--- a/roles/calculateEtcHosts/tasks/main.yml
+++ b/roles/calculateEtcHosts/tasks/main.yml
@@ -8,7 +8,7 @@
 - name: make hosts data
   command: /tmp/makehosts.py /tmp/groups {{ domain }}
   register: hosts_data
-               
+
 - name: write hosts file
   template: dest=/tmp/etcHosts src=etcHosts.j2
 
diff --git a/roles/calculateEtcHosts/templates/etcHosts.j2 b/roles/calculateEtcHosts/templates/etcHosts.j2
index 590826feef324307abcff9aa6f8fcbaccfee30e0..88e5044c73072be51b55b3a34b7d565d4c11a313 100644
--- a/roles/calculateEtcHosts/templates/etcHosts.j2
+++ b/roles/calculateEtcHosts/templates/etcHosts.j2
@@ -4,8 +4,6 @@ fe00::0 ip6-localnet
 ff00::0 ip6-mcastprefix
 ff02::1 ip6-allnodes
 ff02::2 ip6-allrouters
-118.138.241.196 hpcldap0.erc.monash.edu.au
-118.138.244.7 consistency0
 
 {% for item in hosts_data.stdout_lines %}
 {{ item }}
diff --git a/roles/calculateExports/tasks/main.yml b/roles/calculateExports/tasks/main.yml
index ce3046bce137ec9701c0fbd435c894cbc1819314..5340665abacacc4c97d3028f5f3812838c71caf5 100644
--- a/roles/calculateExports/tasks/main.yml
+++ b/roles/calculateExports/tasks/main.yml
@@ -1,6 +1,6 @@
 - name: "Templating /etc/exports"
   template: src=exports.j2 dest=/tmp/exports owner=root group=root mode=644
-  sudo: true
+  become: true
 
 - name: "Fetch etcExports"
   fetch: src=/tmp/exports dest=files/etcExports flat=yes
diff --git a/roles/calculateKnownHosts/tasks/main.yml b/roles/calculateKnownHosts/tasks/main.yml
index 5714eb665161bde40336e00b0cf9360ba4721a15..73cc26492306be3afc3139ab0c140d43672926c0 100644
--- a/roles/calculateKnownHosts/tasks/main.yml
+++ b/roles/calculateKnownHosts/tasks/main.yml
@@ -1,6 +1,6 @@
 - name: "Templating /etc/ssh/known_hosts"
   template: src=known_hosts.j2 dest=/tmp/ssh_known_hosts owner=root group=root mode=644
-  sudo: true
+  become: true
   register: sshknownhost 
 
 - name: fetch known_hosts file
@@ -8,6 +8,6 @@
 
 - name: delete ssh_known_hosts
   file: path=/tmp/ssh_known_hosts  state=absent
-  sudo: true
+  become: true
 
 
diff --git a/roles/calculateNhcConfig/tasks/main.yml b/roles/calculateNhcConfig/tasks/main.yml
index b6f9f9f7e6c0ce8de690265995faeab47328879e..47e11eb9aee223222baf5752b79527bfd3e821de 100644
--- a/roles/calculateNhcConfig/tasks/main.yml
+++ b/roles/calculateNhcConfig/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 - name: "Templating nhc.conf"
   template: src=nhc.conf.j2 dest=/tmp/nhc.conf owner=root group=root mode=644
-  sudo: true
+  become: true
 
 - name: fetch nhc.conf
   fetch: src=/tmp/nhc.conf dest=files/nhc.conf flat=yes
diff --git a/roles/calculateSlurmConf/tasks/main.yml b/roles/calculateSlurmConf/tasks/main.yml
index 800ad4a5db8148fe6ff6a02d2906285ba157deed..2c9e3352ab2680e691150b64791a94464802f85e 100644
--- a/roles/calculateSlurmConf/tasks/main.yml
+++ b/roles/calculateSlurmConf/tasks/main.yml
@@ -1,13 +1,13 @@
 - name: "Templating slurm.conf"
   template: src=slurm.conf.j2 dest=/tmp/slurm.conf owner=root group=root mode=644
-  sudo: true
+  become: true
 
 - name: fetch slurm.conf
   fetch: src=/tmp/slurm.conf dest=files/slurm.conf flat=yes
 
 - name: "Templating slurmdbd.conf"
   template: src=slurmdbd.conf.j2 dest=/tmp/slurmdbd.conf owner=root group=root mode=644
-  sudo: true
+  become: true
 
 - name: fetch slurm.conf
   fetch: src=/tmp/slurmdbd.conf dest=files/slurmdbd.conf flat=yes
diff --git a/roles/calculateSlurmConf/templates/slurmdbd.conf.j2 b/roles/calculateSlurmConf/templates/slurmdbd.conf.j2
index dc471330d5cdf3368efac17a85ba168e1ed4eab2..0fa258b1212b8eb266b30a4fafebd401e231a992 100644
--- a/roles/calculateSlurmConf/templates/slurmdbd.conf.j2
+++ b/roles/calculateSlurmConf/templates/slurmdbd.conf.j2
@@ -32,7 +32,7 @@ LogFile={{ slurmdbdlog.log }}
 #DebugLevel=
 #LogFile=
 {% endif %}
-PidFile=/var/run/slurmdbd.pid
+PidFile=/opt/slurm/var/run/slurmdbd.pid
 #PluginDir=/usr/lib/slurm
 #PrivateData=accounts,users,usage,jobs
 #TrackWCKey=yes
diff --git a/roles/centos7Base/tasks/installBasePackages.yml b/roles/centos7Base/tasks/installBasePackages.yml
index 74f25096720e42f66a08b597b87bb3c00ef076d7..be70570c7041a91a3adc3623602ec507165e73da 100644
--- a/roles/centos7Base/tasks/installBasePackages.yml
+++ b/roles/centos7Base/tasks/installBasePackages.yml
@@ -2,15 +2,15 @@
 -
  name: Removing the RDO repository
  file: path=/etc/yum.repos.d/rdo-release.repo state=absent
- sudo: true
+ become: true
 -
  name: Install epel-release
  yum: name=epel-release-7-5.noarch state=present
- sudo: true
+ become: true
 -
  name: Enable epel
  command: yum-config-manager --enable epel
- sudo: true
+ become: true
 -
  name: Installing Base Packages
  yum: name={{ item }} state=present
@@ -18,14 +18,14 @@
   - yum-utils
   - deltarpm-3.6-3.el7.x86_64
   - yum-plugin-versionlock
- sudo: true
+ become: true
 -
  name: Installing Core packages
  yum: name="{{ item.software }}-{{ item.version }}.{{ item.arch }}" state=present
  with_items: package_list
- sudo: true
+ become: true
 -
  name: Performing version lock on the packages
  shell: yum versionlock \*
- sudo: true
+ become: true
 
diff --git a/roles/collectd/tasks/main.yml b/roles/collectd/tasks/main.yml
index 082ea96fca59f1760ea50b2bd780414fa66d78e4..fccbe6af077098d463721d352ab9d20e878f3079 100644
--- a/roles/collectd/tasks/main.yml
+++ b/roles/collectd/tasks/main.yml
@@ -50,5 +50,5 @@
   service: name=collectd state=restarted enabled=true
   become: true
   become_user: root
-  when: configchange | changed
+  when: configchange.changed
 
diff --git a/roles/config_repos/defaults/main.yml b/roles/config_repos/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a5ac4ab98ab70895b62ab4c1140122007b29df96
--- /dev/null
+++ b/roles/config_repos/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# default variables for  config_repos
+
+reposervername: consistency0
+reposerverip: 118.138.244.7
diff --git a/roles/config_repos/tasks/main.yml b/roles/config_repos/tasks/main.yml
index f397640fc77541b7daf56e35a499bef69e561ba8..684c327aa9be6fb9c90f588bf44857f94794bfe7 100644
--- a/roles/config_repos/tasks/main.yml
+++ b/roles/config_repos/tasks/main.yml
@@ -1,8 +1,12 @@
 ---
 
 - name: make sure out repo server is resolvable
-  lineinfile: dest=/etc/hosts line="118.138.244.7 consistency0"
-  sudo: true
+  lineinfile:
+    dest: /etc/hosts
+    line: "{{ reposerverip }} {{ reposervername }}"  #this is duplicated in the role calculateEtcHosts
+    owner: root
+    group: root
+  become: true
 
 
 #- name: remove default repos
@@ -29,8 +33,8 @@
 #  when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == "7"
 
 - name: add our repos
-  copy: src={{ item }} dest=/etc/yum.repos.d/{{ item }}
-  sudo: true
+  template: src={{ item }}.j2 dest=/etc/yum.repos.d/{{ item }}
+  become: true
   when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == "7"
   with_items:
   - monashhpc_base.repo
@@ -42,40 +46,41 @@
   shell: yum repolist all | grep enabled | cut -f 1 -d '/' | sed -s 's/\!//'
   register: repolist
   check_mode: no
+  changed_when: False
   args:
     warn: False
 
 - name: disable unwanted repos
   shell: yum-config-manager --disable "{{ item }}"
-  with_items: "{{repolist.stdout_lines|difference(yumenablerepo)}}"
+  with_items: "{{ repolist.stdout_lines|difference(yumenablerepo) }}"
   become: true
   become_user: root
-  ignore_errors: true
+  ignore_errors: false
 
 
 #- name: Enable epel
 #  command: yum-config-manager --enable epel
-#  sudo: true
+#  become: true
 #  when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
 
 # Use mate DE on systems that have moved to gnome3, since there is no gpu acceleration by default on NeCTAR openstack
 # Trusty (Ubuntu 14.04 LTS) needs repos added. Wheezy (Debian Stable) gets mate from backports, Utopic (Ubuntu 14.10) Jessie (Debian testing) and Sid (Debian unstable) get it by default
 - name: add repos apt
   shell: "add-apt-repository -y ppa:ubuntu-mate-dev/ppa"
-  sudo: true
+  become: true
   when: ansible_distribution_release == 'trusty'
 
 - name: add repos apt
   shell: "add-apt-repository -y ppa:ubuntu-mate-dev/trusty-mate"
-  sudo: true
+  become: true
   when: ansible_distribution_release == 'trusty'
 
 - name: add repos apt
   shell: "add-apt-repository -y ppa:gluster/glusterfs-3.7"
-  sudo: true
+  become: true
   when: ansible_distribution == 'Ubuntu'
 
 - name: apt-get update
   apt: update_cache=True
-  sudo: true
+  become: true
   when: ansible_os_family=="Debian"
diff --git a/roles/config_repos/files/epel.repo b/roles/config_repos/templates/epel.repo.j2
similarity index 70%
rename from roles/config_repos/files/epel.repo
rename to roles/config_repos/templates/epel.repo.j2
index 053ed43c62542e860cdb16660bdab1918b90fd7d..4b1017bf11cd106d2f54fb47bc7accb2ab19b28b 100644
--- a/roles/config_repos/files/epel.repo
+++ b/roles/config_repos/templates/epel.repo.j2
@@ -2,7 +2,7 @@
 
 [epel]
 name=Extra Packages for Enterprise Linux 7 - $basearch
-baseurl=https://consistency0/epel/$releasever/$basearch/
+baseurl=https://{{ reposervername }}/epel/$releasever/$basearch/
 enabled=0
 gpgcheck=0
 sslverify=false
diff --git a/roles/config_repos/files/glusterfs-epel.repo b/roles/config_repos/templates/glusterfs-epel.repo.j2
similarity index 100%
rename from roles/config_repos/files/glusterfs-epel.repo
rename to roles/config_repos/templates/glusterfs-epel.repo.j2
diff --git a/roles/config_repos/files/monashhpc_base.repo b/roles/config_repos/templates/monashhpc_base.repo.j2
similarity index 64%
rename from roles/config_repos/files/monashhpc_base.repo
rename to roles/config_repos/templates/monashhpc_base.repo.j2
index dfbb0a77a5110c72150ecc7e0481368487e2abc4..0f0cd119928b6e6803e491a495ff625b9fc54cf9 100644
--- a/roles/config_repos/files/monashhpc_base.repo
+++ b/roles/config_repos/templates/monashhpc_base.repo.j2
@@ -2,24 +2,24 @@
 
 [monashhpc_base]
 name=MonashHPC base repository mirrored to control the update process
-baseurl=https://consistency0/centos/$releasever/os/$basearch/
+baseurl=https://{{ reposervername }}/centos/$releasever/os/$basearch/
 enabled=1
 sslverify=false
 
 [monashhpc_updates]
 name=MonashHPC base repository mirrored to control the update process
-baseurl=https://consistency0/centos/$releasever/updates/$basearch/
+baseurl=https://{{ reposervername }}/centos/$releasever/updates/$basearch/
 enabled=1
 sslverify=false
 
 [monashhpc_extras]
 name=MonashHPC base repository mirrored to control the update process
-baseurl=https://consistency0/centos/$releasever/extras/$basearch/
+baseurl=https://{{ reposervername }}/centos/$releasever/extras/$basearch/
 enabled=1
 sslverify=false
 
 [monashhpc_centosplus]
 name=MonashHPC base repository mirrored to control the update process
-baseurl=https://consistency0/centos/$releasever/centosplus/$basearch/
+baseurl=https://{{ reposervername }}/centos/$releasever/centosplus/$basearch/
 enabled=1
 sslverify=false
diff --git a/roles/config_repos/files/monashhpc_others.repo b/roles/config_repos/templates/monashhpc_others.repo.j2
similarity index 70%
rename from roles/config_repos/files/monashhpc_others.repo
rename to roles/config_repos/templates/monashhpc_others.repo.j2
index e78702bf53f5fe0a1284c0474aac75bba615aabd..bd0b06270231095c8fa52cdffdb65f6b1f1202a7 100644
--- a/roles/config_repos/files/monashhpc_others.repo
+++ b/roles/config_repos/templates/monashhpc_others.repo.j2
@@ -2,7 +2,7 @@
 
 [monashhpc_otherstuff]
 name=MonashHPC base repository mirrored to control the update process
-baseurl=https://consistency0/centos/hpcsystems/$releasever/$basearch/
+baseurl=https://{{ reposervername }}/centos/hpcsystems/$releasever/$basearch/
 enabled=1
 sslverify=false
 gpgcheck=0
diff --git a/roles/config_repos_upstream/tasks/main.yml b/roles/config_repos_upstream/tasks/main.yml
index 2cd2254249275933b4416bde711758fc76e8611e..b14ba33d308a9740f9fed30df65502724f287670 100644
--- a/roles/config_repos_upstream/tasks/main.yml
+++ b/roles/config_repos_upstream/tasks/main.yml
@@ -1,38 +1,38 @@
 ---
 - name: add gluster repo
   copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
-  sudo: true
+  become: true
   when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
 
 - name: enable epel
   yum: name=epel-release state='latest'
-  sudo: true
+  become: true
   when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
 
 
 - name: Enable epel
   command: yum-config-manager --enable epel
-  sudo: true
+  become: true
   when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
 
 # Use mate DE on systems that have moved to gnome3, since there is no gpu acceleration by default on NeCTAR openstack
 # Trusty (Ubuntu 14.04 LTS) needs repos added. Wheezy (Debian Stable) gets mate from backports, Utopic (Ubuntu 14.10) Jessie (Debian testing) and Sid (Debian unstable) get it by default
 - name: add repos apt
   shell: "add-apt-repository -y ppa:ubuntu-mate-dev/ppa"
-  sudo: true
+  become: true
   when: ansible_distribution_release == 'trusty'
 
 - name: add repos apt
   shell: "add-apt-repository -y ppa:ubuntu-mate-dev/trusty-mate"
-  sudo: true
+  become: true
   when: ansible_distribution_release == 'trusty'
 
 - name: add repos apt
   shell: "add-apt-repository -y ppa:gluster/glusterfs-3.7"
-  sudo: true
+  become: true
   when: ansible_distribution == 'Ubuntu'
 
 - name: apt-get update
   apt: update_cache=True
-  sudo: true
+  become: true
   when: ansible_os_family=="Debian"
diff --git a/roles/cron-access/tasks/main.yml b/roles/cron-access/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..09e97aea34682a7e66782f3f2e98389ffa3f892a
--- /dev/null
+++ b/roles/cron-access/tasks/main.yml
@@ -0,0 +1,8 @@
+- name: Adding pamd access for users who can run cron jobs
+  lineinfile:
+    path: /etc/security/access.conf
+    state: present
+    insertbefore: '^-:ALL EXCEPT root systems ec2-user debian ubuntu admin :ALL'
+    line: '+: cron-users : cron crond :0'
+  become: true
+  become_user: root
diff --git a/roles/cvl-menus/tasks/main.yml b/roles/cvl-menus/tasks/main.yml
index b103cde02503c65a51d0f8f7a7614596ccd91882..dc1a1775cf3056b65969813774b2bc50c4b2f539 100644
--- a/roles/cvl-menus/tasks/main.yml
+++ b/roles/cvl-menus/tasks/main.yml
@@ -2,17 +2,17 @@
   shell: mkdir -p /etc/xdg/menus/applications-merged 
   args:
     creates: "/etc/xdg/menus/applications-merged"
-  sudo: true
+  become: true
   
 - name: configure the cvl menues
   copy: src=cvl.menu dest=/etc/xdg/menus/applications-merged/cvl.menu mode=644 owner=root group=root
-  sudo: true
+  become: true
 
 - name: configure the cvl backgroud image 
   copy: src={{ item }} dest=/usr/share/backgrounds/{{ item }} mode=644 owner=root group=root
   with_items:
     - cvl_desktop.svg
     - default.xml
-  sudo: true
+  become: true
 
 
diff --git a/roles/cvlExtraFiles/tasks/main.yml b/roles/cvlExtraFiles/tasks/main.yml
index 6ecb7068275297b5f09ec3157814095677793a15..d8a6ed4d32c88bdc5cb3530e5e4d7beea648e461 100644
--- a/roles/cvlExtraFiles/tasks/main.yml
+++ b/roles/cvlExtraFiles/tasks/main.yml
@@ -2,5 +2,5 @@
 - name: extra file symbolic links 
   file: src={{ item.src }} path={{ item.dest }} state={{ item.type }} force=yes
   with_items: "{{ extraFiles }}"
-  sudo: true
+  become: true
   when: extraFiles is defined
diff --git a/roles/cvlFijiCronJob/tasks/main.yml b/roles/cvlFijiCronJob/tasks/main.yml
index f3f586d0dd7ee60957f4c292e9b0b6d86a7a94b7..01fc36fddb13e59f6909de639bd3dd0b69d00292 100644
--- a/roles/cvlFijiCronJob/tasks/main.yml
+++ b/roles/cvlFijiCronJob/tasks/main.yml
@@ -6,5 +6,5 @@
 
 - name: CVL fiji cron job 
   cron: name=fijiupdate job=/cvl/local/bin/fiji_daily_update.sh user=root hour=5 state=present
-  sudo: true
+  become: true
   when: fijiupdatescript | success
diff --git a/roles/deploy-xorg/tasks/main.yml b/roles/deploy-xorg/tasks/main.yml
index c1e2d8af8b3c3ff82b848ec5a1b182251788208c..fb5ec1bb27a58127edb7f8c204cce430f16a7b27 100644
--- a/roles/deploy-xorg/tasks/main.yml
+++ b/roles/deploy-xorg/tasks/main.yml
@@ -3,6 +3,7 @@
   script: scripts/nvidia-xconf-gen.py
   register: nvidiacards
   check_mode: no
+  changed_when: False
 
 - name: set env for nvidia_card_lists
   set_fact: 
diff --git a/roles/disable_selinux/tasks/main.yml b/roles/disable_selinux/tasks/main.yml
index 98feca3ad6d7f1a72b4e5c4ad91df5b8782d3f75..4816690bc2a507ede98fd39c9343ef2d7eca91ea 100644
--- a/roles/disable_selinux/tasks/main.yml
+++ b/roles/disable_selinux/tasks/main.yml
@@ -3,3 +3,4 @@
   selinux: state=disabled
   become: True
   become_user: root
+  when: ansible_os_family=="RedHat"
diff --git a/roles/easy-rsa-certificate/tasks/buildCert.yml b/roles/easy-rsa-certificate/tasks/buildCert.yml
index df849b969aee561f41ddb16a4481e1ace89b890c..0850d47a23b2c78698c5876473c7be343d74e239 100644
--- a/roles/easy-rsa-certificate/tasks/buildCert.yml
+++ b/roles/easy-rsa-certificate/tasks/buildCert.yml
@@ -2,22 +2,22 @@
 - name: "Check client ca certificate"
   register: ca_cert
   stat: "path={{ x509_cacert_file }}"
-  sudo: true
+  become: true
 
 - name: "Check certificate and key"
   shell: (openssl x509 -noout -modulus -in {{ x509_cert_file }}  | openssl md5 ; openssl rsa -noout -modulus -in {{ x509_key_file }} | openssl md5) | uniq | wc -l
   register: certcheck
-  sudo: true
+  become: true
 
 - name: "Check certificate"
   register: cert
   stat: "path={{ x509_cert_file }}"
-  sudo: true
+  become: true
 
 - name: "Check key"
   register: key
   stat: "path={{ x509_key_file }}"
-  sudo: true
+  become: true
 
 - name: "Default: we don't need a new certificate"
   set_fact: needcert=False
@@ -35,7 +35,7 @@
   delegate_to: "{{ x509_ca_server }}"
   shell: rm -rf /etc/easy-rsa/2.0/keys/{{ x509_common_name }}.*
   when: cert is defined and cert.stat.size == 0
-  sudo: true
+  become: true
 
 - name: "set needcert if cert doesn't match key"
   set_fact: needcert=True
@@ -49,7 +49,7 @@
 - name: "Creating CSR"
   shell: "cd /etc/easy-rsa/2.0; . ./vars; export EASY_RSA=\"${EASY_RSA:-.}\"; \"$EASY_RSA\"/pkitool --csr {{ x509_csr_args }} {{ x509_common_name }}"
   when: needcert
-  sudo: true
+  become: true
 
 - name: "Create node tmp directory"
   delegate_to: 127.0.0.1
@@ -57,7 +57,7 @@
 
 - name: "Copy CSR to ansible host"
   fetch: "src=/etc/easy-rsa/2.0/keys/{{ x509_common_name }}.csr dest=/tmp/{{ inventory_hostname }}/{{ inventory_hostname }}.csr fail_on_missing=yes validate_md5=yes flat=yes"
-  sudo: true
+  become: true
   when: needcert
 
 - name: "Copy CSR to CA"
@@ -65,49 +65,49 @@
   delegate_to: "{{ x509_ca_server }}"
   copy: "src=/tmp/{{ inventory_hostname }}/{{ inventory_hostname }}.csr dest=/etc/easy-rsa/2.0/keys/{{ x509_common_name }}.csr force=yes"
   when: needcert
-  sudo: true
+  become: true
 
 - name: "Sign Certificate"
   remote_user: "{{ hostvars[x509_ca_server]['ansible_user'] }}"
   delegate_to: "{{ x509_ca_server }}"
   shell:    "cd /etc/easy-rsa/2.0; . ./vars; export EASY_RSA=\"${EASY_RSA:-.}\" ;\"$EASY_RSA\"/pkitool --sign {{ x509_sign_args }} {{ x509_common_name }}"
   when: needcert
-  sudo: true
+  become: true
 
 - name: "Copy the Certificate to ansible host"
   remote_user: "{{ hostvars[x509_ca_server]['ansible_user'] }}"
   delegate_to: "{{ x509_ca_server }}"
   fetch: "src=/etc/easy-rsa/2.0/keys/{{ x509_common_name }}.crt dest=/tmp/{{ inventory_hostname }}/{{ x509_common_name }}.crt fail_on_missing=yes validate_md5=yes flat=yes"
-  sudo: true
+  become: true
   when: needcert
 
 - name: "Copy the CA Certificate to the ansible host"
   remote_user: "{{ hostvars[x509_ca_server]['ansible_user'] }}"
   delegate_to: "{{ x509_ca_server }}"
   fetch: "src=/etc/easy-rsa/2.0/keys/ca.crt dest=/tmp/{{ inventory_hostname }}/ca.crt fail_on_missing=yes validate_md5=yes flat=yes"
-  sudo: true
+  become: true
   when: "ca_cert.stat.exists == false"
 
 - name: "Make sure the path to the certificate exists"
   shell: "mkdir -p `dirname {{ x509_cert_file }}` ; chmod 755  `dirname {{ x509_cert_file }}`"
-  sudo: true
+  become: true
 
 - name: "Copy the certificate to the node"
   copy: "src=/tmp/{{ inventory_hostname }}/{{ x509_common_name }}.crt dest=/tmp/{{ x509_common_name }}.crt force=yes"
-  sudo: true
+  become: true
   when: needcert
 
 - name: "Copy the certificate to the right location"
   shell: "cp -f /tmp/{{ x509_common_name }}.crt {{ x509_cert_file }}"
-  sudo: true
+  become: true
   when: needcert
 
 - name: "Copy the CA certificate to the node"
   copy: "src=/tmp/{{ inventory_hostname }}/ca.crt dest={{ x509_cacert_file }}"
-  sudo: true
+  become: true
   when: "ca_cert.stat.exists == false"
 
 - name: "Copy the key to the correct location"
   shell: "mkdir -p `dirname {{ x509_key_file }}` ; chmod 700 `dirname {{ x509_key_file }}` ; cp /etc/easy-rsa/2.0/keys/{{ x509_common_name }}.key {{ x509_key_file }}"
-  sudo: true
+  become: true
   when: needcert
diff --git a/roles/easy-rsa-common/tasks/copyConfigurationFile.yml b/roles/easy-rsa-common/tasks/copyConfigurationFile.yml
index c7e3635b36f2e4e862fe47b6e70285f26bf26dd5..766ad10375ded9e6b9b023629430ae95709e0be7 100644
--- a/roles/easy-rsa-common/tasks/copyConfigurationFile.yml
+++ b/roles/easy-rsa-common/tasks/copyConfigurationFile.yml
@@ -17,4 +17,4 @@
   args:
     chdir: "/etc/easy-rsa/2.0"
     creates: "/etc/easy-rsa/2.0/keys"
-  sudo: true
+  become: true
diff --git a/roles/easy-rsa-common/tasks/installEasyRsaSource.yml b/roles/easy-rsa-common/tasks/installEasyRsaSource.yml
index 922cdf2dfc5964fd8ea1ae7c2d3c9fef7742e6f0..3c1bd4d2e4a25aef218cc9628e5dfb39b4d70d5d 100644
--- a/roles/easy-rsa-common/tasks/installEasyRsaSource.yml
+++ b/roles/easy-rsa-common/tasks/installEasyRsaSource.yml
@@ -17,5 +17,5 @@
   args:
     creates: "/etc/easy-rsa/2.0/keys/"
   when: installed|changed
-  sudo: true
+  become: true
  
diff --git a/roles/easy-rsa-common/tasks/yumList.yml b/roles/easy-rsa-common/tasks/yumList.yml
index 485a0f28a0769e8afe09f20c3910b3fd2ffb75c1..317196bacbcf78f8c041c4da4dbb3932c005861a 100644
--- a/roles/easy-rsa-common/tasks/yumList.yml
+++ b/roles/easy-rsa-common/tasks/yumList.yml
@@ -7,7 +7,7 @@
     - tcsh
     - bind-utils
   yum: "name={{ item }} state=present"
-  sudo: true
+  become: true
 -
   name: "Setting hostname"
   shell: sysctl kernel.hostname={{ inventory_hostname }} 
diff --git a/roles/enable_modules/tasks/main.yml b/roles/enable_modules/tasks/main.yml
index 3ae936b3b0b2aac7da49cb17a98db177259584fd..77e5c1fc305dffe2dc191f8100da6186be22777a 100644
--- a/roles/enable_modules/tasks/main.yml
+++ b/roles/enable_modules/tasks/main.yml
@@ -30,6 +30,12 @@
   become_user: root
   when: default_modules == "modulecmd"
 
+- name: template patchmodulecmd bash
+  template: src=patchmodulecmd.sh.j2 dest=/etc/profile.d/patchmodulecmd.sh.j2
+  become: true
+  become_user: root
+  when: default_modules == "modulecmd"
+
 - name: template modulecmd csh
   template: src=modulecmd.csh.j2 dest=/etc/profile.d/modulecmd.csh
   become: true
diff --git a/roles/enable_modules/templates/patchmodulecmd.sh.j2 b/roles/enable_modules/templates/patchmodulecmd.sh.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c467fa83290b2cb8f7d2edd4904c9732af7d9ac8
--- /dev/null
+++ b/roles/enable_modules/templates/patchmodulecmd.sh.j2
@@ -0,0 +1,4 @@
+
+module() { eval `/usr/bin/modulecmd bash $*`;  /usr/local/bin/modulelog $*;}
+export -f module
+
diff --git a/roles/enable_root/tasks/main.yml b/roles/enable_root/tasks/main.yml
index d7c65d2cd51bbe4d60bc0add181f778c552be49f..660c74f29556f6253a425d6fcb2822ddf9ae520b 100644
--- a/roles/enable_root/tasks/main.yml
+++ b/roles/enable_root/tasks/main.yml
@@ -1,3 +1,3 @@
 - name: add key to root ssh
   template: dest=/root/.ssh/authorized_keys mode=600 owner=root group=root src=authorized_keys.j2
-  sudo: true
+  become: true
diff --git a/roles/etcHosts/tasks/main.yml b/roles/etcHosts/tasks/main.yml
index dadad0ef4e7688fa691fd87696879f84ca6f5d39..677f6e571e728ac91fe951158295d2d71cfcf2a7 100644
--- a/roles/etcHosts/tasks/main.yml
+++ b/roles/etcHosts/tasks/main.yml
@@ -1,39 +1,41 @@
 - name: install hosts file
   copy: src=files/etcHosts dest=/etc/hosts owner=root mode=644
-  sudo: true
+  become: true
 
 - name: get hostname by sysctl
   shell: sysctl kernel.hostname | cut -f 3 -d " "
   register: sysctl_hostname
   check_mode: no
+  changed_when: False
   become: true
   become_user: root
 
 - name: set hostname by sysctl
   shell: sysctl kernel.hostname="{{ inventory_hostname }}"
-  sudo: true
-  when: not "{{ sysctl_hostname.stdout }}" == "{{ inventory_hostname }}"
+  become: true
+  when: not sysctl_hostname.stdout == inventory_hostname
 
 - name: get domainname by sysctl
   shell: sysctl kernel.domainname | cut -f 3 -d " "
   register: sysctl_domainname
   check_mode: no
+  changed_when: False
   become: true
   become_user: root
 
 - name: set domainname by sysctl
   shell: sysctl kernel.domainname="{{ domain }}"
-  sudo: true
-  when: not "{{ sysctl_domainname.stdout }}" == "{{ domain }}"
+  become: true
+  when: not sysctl_domainname.stdout  ==  domain 
 
 - name: set /etc/sysconfig/network on CentOS 6
   lineinfile: dest=/etc/sysconfig/network line='HOSTNAME={{ inventory_hostname }}' regexp='^HOSTNAME'
-  sudo: true
+  become: true
   when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "6"
 
 - name: set /etc/sysctl.conf on Debian 8
   lineinfile: dest=/etc/sysctl.conf line='kernel.domainname = {{ domain }}' regexp='^#kernel.domainname'
-  sudo: true
+  become: true
   when: ansible_distribution == "Debian" and ansible_distribution_major_version == "8"
 
 - name: set preserve hostname on CentOS
@@ -41,7 +43,7 @@
   args:
     dest: /etc/cloud/cloud.cfg 
     line: "preserve_hostname: True"
-  sudo: true
+  become: true
   when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
 
 - name: set /etc/hostname
diff --git a/roles/extra_packages/README.md b/roles/extra_packages/README.md
index ffa7d0b01fbadf231490c94caecde085275ef2fc..29b7f54a025d5b3dd94084e616eb5b6e05b6e4b9 100644
--- a/roles/extra_packages/README.md
+++ b/roles/extra_packages/README.md
@@ -1 +1,11 @@
 This role adds all the packages we think are useful but aren't clearly a dependency of some function.
+
+Before calling, you may want to  define the following lists:
+
+extra_packages  #list of yum packages. Or see code on how to it loads an  alternative file  if not defined
+exclude 	#list of packages to exclude
+
+
+Usage:
+  roles:
+  - { role: extra_packages, tags: [ other, extra_packages ] }
diff --git a/roles/extra_packages/tasks/main.yml b/roles/extra_packages/tasks/main.yml
index 68e5226db7662f6be3b37ecafc4fa6c7764f08b2..46ce40a581ae7baf34981217f2313fb5d506841a 100644
--- a/roles/extra_packages/tasks/main.yml
+++ b/roles/extra_packages/tasks/main.yml
@@ -33,6 +33,14 @@
    - '"DGX" in ansible_product_name'
    - '"RedHat" in ansible_distribution'
   register: result
+ 
+- name: "Install extra packages with the epel repo enabled"
+  yum: "name={{ item }} exclude={{ excludes|join(',') }} update_cache=yes state=present enablerepo=epel"
+  with_items: "{{ extra_packages }}"
+  become: true
+  become_user: root
+  when: ansible_os_family == 'RedHat' 
+  register: result
 
 - name: "Show yum install output"
   debug: 
diff --git a/roles/fail2ban/handlers/main.yml b/roles/fail2ban/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5be6582979900ad06d458d2d544cefd5146f02b2
--- /dev/null
+++ b/roles/fail2ban/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+- name: restart fail2ban
+  systemd:
+    name: fail2ban
+    enabled: yes
+    state: restarted
+  become: true
+  become_user: root
diff --git a/roles/fail2ban/tasks/main.yml b/roles/fail2ban/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f5f9c59b9abfa6ffd01abcc27d2b1b0f6a1a85e4
--- /dev/null
+++ b/roles/fail2ban/tasks/main.yml
@@ -0,0 +1,31 @@
+---
+- name: Install fail2ban on Red Hat system
+  yum:
+    name: 
+    - fail2ban-server
+    - fail2ban-sendmail
+    state: present
+  become: true
+  become_user: root
+  when: ansible_os_family == "RedHat"
+
+- name: Copy jail.conf.j2 to /etc/fail2ban/jail.conf
+  template:
+    src: jail.conf.j2
+    dest: /etc/fail2ban/jail.conf
+    backup: yes
+    mode: 0644
+    owner: root
+    group: root
+  become: true
+  become_user: root
+  notify:
+    - restart fail2ban
+
+- name: Enable fail2ban service
+  systemd:
+    name: fail2ban
+    enabled: yes
+    state: started
+  become: true
+  become_user: root
diff --git a/roles/fail2ban/templates/jail.conf.j2 b/roles/fail2ban/templates/jail.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..b0cf21862f7da89210563ebc952a9a9ccb603771
--- /dev/null
+++ b/roles/fail2ban/templates/jail.conf.j2
@@ -0,0 +1,863 @@
+#
+# WARNING: heavily refactored in 0.9.0 release.  Please review and
+#          customize settings for your setup.
+#
+# Changes:  in most of the cases you should not modify this
+#           file, but provide customizations in jail.local file,
+#           or separate .conf files under jail.d/ directory, e.g.:
+#
+# HOW TO ACTIVATE JAILS:
+#
+# YOU SHOULD NOT MODIFY THIS FILE.
+#
+# It will probably be overwritten or improved in a distribution update.
+#
+# Provide customizations in a jail.local file or a jail.d/customisation.local.
+# For example to change the default bantime for all jails and to enable the
+# ssh-iptables jail the following (uncommented) would appear in the .local file.
+# See man 5 jail.conf for details.
+#
+# [DEFAULT]
+# bantime = 3600
+#
+# [sshd]
+# enabled = true
+#
+# See jail.conf(5) man page for more information
+
+
+
+# Comments: use '#' for comment lines and ';' (following a space) for inline comments
+
+
+[INCLUDES]
+
+#before = paths-distro.conf
+before = paths-fedora.conf
+
+# The DEFAULT allows a global definition of the options. They can be overridden
+# in each jail afterwards.
+
+[DEFAULT]
+
+#
+# MISCELLANEOUS OPTIONS
+#
+
+# "ignoreip" can be an IP address, a CIDR mask or a DNS host. Fail2ban will not
+# ban a host which matches an address in this list. Several addresses can be
+# defined using space (and/or comma) separator.
+#ignoreip = 127.0.0.1/8
+ignoreip = {{ fail2ban_whitelist_all }}
+
+# External command that will take an tagged arguments to ignore, e.g. <ip>,
+# and return true if the IP is to be ignored. False otherwise.
+#
+# ignorecommand = /path/to/command <ip>
+ignorecommand =
+
+# "bantime" is the number of seconds that a host is banned.
+bantime  = 600
+
+# A host is banned if it has generated "maxretry" during the last "findtime"
+# seconds.
+findtime  = 600
+
+# "maxretry" is the number of failures before a host get banned.
+maxretry = 5
+
+# "backend" specifies the backend used to get files modification.
+# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto".
+# This option can be overridden in each jail as well.
+#
+# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
+#              If pyinotify is not installed, Fail2ban will use auto.
+# gamin:     requires Gamin (a file alteration monitor) to be installed.
+#              If Gamin is not installed, Fail2ban will use auto.
+# polling:   uses a polling algorithm which does not require external libraries.
+# systemd:   uses systemd python library to access the systemd journal.
+#              Specifying "logpath" is not valid for this backend.
+#              See "journalmatch" in the jails associated filter config
+# auto:      will try to use the following backends, in order:
+#              pyinotify, gamin, polling.
+#
+# Note: if systemd backend is chosen as the default but you enable a jail
+#       for which logs are present only in its own log files, specify some other
+#       backend for that jail (e.g. polling) and provide empty value for
+#       journalmatch. See https://github.com/fail2ban/fail2ban/issues/959#issuecomment-74901200
+backend = auto
+
+# "usedns" specifies if jails should trust hostnames in logs,
+#   warn when DNS lookups are performed, or ignore all hostnames in logs
+#
+# yes:   if a hostname is encountered, a DNS lookup will be performed.
+# warn:  if a hostname is encountered, a DNS lookup will be performed,
+#        but it will be logged as a warning.
+# no:    if a hostname is encountered, will not be used for banning,
+#        but it will be logged as info.
+# raw:   use raw value (no hostname), allow use it for no-host filters/actions (example user)
+usedns = warn
+
+# "logencoding" specifies the encoding of the log files handled by the jail
+#   This is used to decode the lines from the log file.
+#   Typical examples:  "ascii", "utf-8"
+#
+#   auto:   will use the system locale setting
+logencoding = auto
+
+# "enabled" enables the jails.
+#  By default all jails are disabled, and it should stay this way.
+#  Enable only relevant to your setup jails in your .local or jail.d/*.conf
+#
+# true:  jail will be enabled and log files will get monitored for changes
+# false: jail is not enabled
+enabled = false
+
+
+# "filter" defines the filter to use by the jail.
+#  By default jails have names matching their filter name
+#
+filter = %(__name__)s
+
+
+#
+# ACTIONS
+#
+
+# Some options used for actions
+
+# Destination email address used solely for the interpolations in
+# jail.{conf,local,d/*} configuration files.
+destemail = root@localhost
+
+# Sender email address used solely for some actions
+sender = root@localhost
+
+# E-mail action. Since 0.8.1 Fail2Ban uses sendmail MTA for the
+# mailing. Change mta configuration parameter to mail if you want to
+# revert to conventional 'mail'.
+mta = sendmail
+
+# Default protocol
+protocol = tcp
+
+# Specify chain where jumps would need to be added in iptables-* actions
+chain = INPUT
+
+# Ports to be banned
+# Usually should be overridden in a particular jail
+port = 0:65535
+
+# Format of user-agent https://tools.ietf.org/html/rfc7231#section-5.5.3
+fail2ban_agent = Fail2Ban/%(fail2ban_version)s
+
+#
+# Action shortcuts. To be used to define action parameter
+
+# Default banning action (e.g. iptables, iptables-new,
+# iptables-multiport, shorewall, etc) It is used to define
+# action_* variables. Can be overridden globally or per
+# section within jail.local file
+banaction = iptables-multiport
+banaction_allports = iptables-allports
+
+# The simplest action to take: ban only
+action_ = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
+
+# ban & send an e-mail with whois report to the destemail.
+action_mw = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
+            %(mta)s-whois[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s"]
+
+# ban & send an e-mail with whois report and relevant log lines
+# to the destemail.
+action_mwl = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
+             %(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s"]
+
+# See the IMPORTANT note in action.d/xarf-login-attack for when to use this action
+#
+# ban & send a xarf e-mail to abuse contact of IP address and include relevant log lines
+# to the destemail.
+action_xarf = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
+             xarf-login-attack[service=%(__name__)s, sender="%(sender)s", logpath=%(logpath)s, port="%(port)s"]
+
+# ban IP on CloudFlare & send an e-mail with whois report and relevant log lines
+# to the destemail.
+action_cf_mwl = cloudflare[cfuser="%(cfemail)s", cftoken="%(cfapikey)s"]
+                %(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s"]
+
+# Report block via blocklist.de fail2ban reporting service API
+# 
+# See the IMPORTANT note in action.d/blocklist_de.conf for when to
+# use this action. Create a file jail.d/blocklist_de.local containing
+# [Init]
+# blocklist_de_apikey = {api key from registration]
+#
+action_blocklist_de  = blocklist_de[email="%(sender)s", service=%(filter)s, apikey="%(blocklist_de_apikey)s", agent="%(fail2ban_agent)s"]
+
+# Report ban via badips.com, and use as blacklist
+#
+# See BadIPsAction docstring in config/action.d/badips.py for
+# documentation for this action.
+#
+# NOTE: This action relies on banaction being present on start and therefore
+# should be last action defined for a jail.
+#
+action_badips = badips.py[category="%(__name__)s", banaction="%(banaction)s", agent="%(fail2ban_agent)s"]
+#
+# Report ban via badips.com (uses action.d/badips.conf for reporting only)
+#
+action_badips_report = badips[category="%(__name__)s", agent="%(fail2ban_agent)s"]
+
+# Choose default action.  To change, just override value of 'action' with the
+# interpolation to the chosen action shortcut (e.g.  action_mw, action_mwl, etc) in jail.local
+# globally (section [DEFAULT]) or per specific section
+action = %(action_)s
+
+
+#
+# JAILS
+#
+
+#
+# SSH servers
+#
+
+[sshd]
+
+# To use more aggressive sshd filter (inclusive sshd-ddos failregex):
+#filter = sshd-aggressive
+port    = ssh
+logpath = %(sshd_log)s
+backend = %(sshd_backend)s
+
+
+[sshd-ddos]
+# This jail corresponds to the standard configuration in Fail2ban.
+# The mail-whois action send a notification e-mail with a whois request
+# in the body.
+port    = ssh
+logpath = %(sshd_log)s
+backend = %(sshd_backend)s
+
+
+[dropbear]
+
+port     = ssh
+logpath  = %(dropbear_log)s
+backend  = %(dropbear_backend)s
+
+
+[selinux-ssh]
+
+port     = ssh
+logpath  = %(auditd_log)s
+
+
+#
+# HTTP servers
+#
+
+[apache-auth]
+
+port     = http,https
+logpath  = %(apache_error_log)s
+
+
+[apache-badbots]
+# Ban hosts which agent identifies spammer robots crawling the web
+# for email addresses. The mail outputs are buffered.
+port     = http,https
+logpath  = %(apache_access_log)s
+bantime  = 172800
+maxretry = 1
+
+
+[apache-noscript]
+
+port     = http,https
+logpath  = %(apache_error_log)s
+
+
+[apache-overflows]
+
+port     = http,https
+logpath  = %(apache_error_log)s
+maxretry = 2
+
+
+[apache-nohome]
+
+port     = http,https
+logpath  = %(apache_error_log)s
+maxretry = 2
+
+
+[apache-botsearch]
+
+port     = http,https
+logpath  = %(apache_error_log)s
+maxretry = 2
+
+
+[apache-fakegooglebot]
+
+port     = http,https
+logpath  = %(apache_access_log)s
+maxretry = 1
+ignorecommand = %(ignorecommands_dir)s/apache-fakegooglebot <ip>
+
+
+[apache-modsecurity]
+
+port     = http,https
+logpath  = %(apache_error_log)s
+maxretry = 2
+
+
+[apache-shellshock]
+
+port    = http,https
+logpath = %(apache_error_log)s
+maxretry = 1
+
+
+[openhab-auth]
+
+filter = openhab
+action = iptables-allports[name=NoAuthFailures]
+logpath = /opt/openhab/logs/request.log
+
+
+[nginx-http-auth]
+
+port    = http,https
+logpath = %(nginx_error_log)s
+
+# To use 'nginx-limit-req' jail you should have `ngx_http_limit_req_module` 
+# and define `limit_req` and `limit_req_zone` as described in nginx documentation
+# http://nginx.org/en/docs/http/ngx_http_limit_req_module.html
+# or for example see in 'config/filter.d/nginx-limit-req.conf'
+[nginx-limit-req]
+port    = http,https
+logpath = %(nginx_error_log)s
+
+[nginx-botsearch]
+
+port     = http,https
+logpath  = %(nginx_error_log)s
+maxretry = 2
+
+
+# Ban attackers that try to use PHP's URL-fopen() functionality
+# through GET/POST variables. - Experimental, with more than a year
+# of usage in production environments.
+
+[php-url-fopen]
+
+port    = http,https
+logpath = %(nginx_access_log)s
+          %(apache_access_log)s
+
+
+[suhosin]
+
+port    = http,https
+logpath = %(suhosin_log)s
+
+
+[lighttpd-auth]
+# Same as above for Apache's mod_auth
+# It catches wrong authentifications
+port    = http,https
+logpath = %(lighttpd_error_log)s
+
+
+#
+# Webmail and groupware servers
+#
+
+[roundcube-auth]
+
+port     = http,https
+logpath  = %(roundcube_errors_log)s
+
+
+[openwebmail]
+
+port     = http,https
+logpath  = /var/log/openwebmail.log
+
+
+[horde]
+
+port     = http,https
+logpath  = /var/log/horde/horde.log
+
+
+[groupoffice]
+
+port     = http,https
+logpath  = /home/groupoffice/log/info.log
+
+
+[sogo-auth]
+# Monitor SOGo groupware server
+# without proxy this would be:
+# port    = 20000
+port     = http,https
+logpath  = /var/log/sogo/sogo.log
+
+
+[tine20]
+
+logpath  = /var/log/tine20/tine20.log
+port     = http,https
+
+
+#
+# Web Applications
+#
+#
+
+[drupal-auth]
+
+port     = http,https
+logpath  = %(syslog_daemon)s
+backend  = %(syslog_backend)s
+
+[guacamole]
+
+port     = http,https
+logpath  = /var/log/tomcat*/catalina.out
+
+[monit]
+#Ban clients brute-forcing the monit gui login
+port = 2812
+logpath  = /var/log/monit
+
+
+[webmin-auth]
+
+port    = 10000
+logpath = %(syslog_authpriv)s
+backend = %(syslog_backend)s
+
+
+[froxlor-auth]
+
+port    = http,https
+logpath  = %(syslog_authpriv)s
+backend  = %(syslog_backend)s
+
+
+#
+# HTTP Proxy servers
+#
+#
+
+[squid]
+
+port     =  80,443,3128,8080
+logpath = /var/log/squid/access.log
+
+
+[3proxy]
+
+port    = 3128
+logpath = /var/log/3proxy.log
+
+
+#
+# FTP servers
+#
+
+
+[proftpd]
+
+port     = ftp,ftp-data,ftps,ftps-data
+logpath  = %(proftpd_log)s
+backend  = %(proftpd_backend)s
+
+
+[pure-ftpd]
+
+port     = ftp,ftp-data,ftps,ftps-data
+logpath  = %(pureftpd_log)s
+backend  = %(pureftpd_backend)s
+
+
+[gssftpd]
+
+port     = ftp,ftp-data,ftps,ftps-data
+logpath  = %(syslog_daemon)s
+backend  = %(syslog_backend)s
+
+
+[wuftpd]
+
+port     = ftp,ftp-data,ftps,ftps-data
+logpath  = %(wuftpd_log)s
+backend  = %(wuftpd_backend)s
+
+
+[vsftpd]
+# or overwrite it in jails.local to be
+# logpath = %(syslog_authpriv)s
+# if you want to rely on PAM failed login attempts
+# vsftpd's failregex should match both of those formats
+port     = ftp,ftp-data,ftps,ftps-data
+logpath  = %(vsftpd_log)s
+
+
+#
+# Mail servers
+#
+
+# ASSP SMTP Proxy Jail
+[assp]
+
+port     = smtp,465,submission
+logpath  = /root/path/to/assp/logs/maillog.txt
+
+
+[courier-smtp]
+
+port     = smtp,465,submission
+logpath  = %(syslog_mail)s
+backend  = %(syslog_backend)s
+
+
+[postfix]
+
+port     = smtp,465,submission
+logpath  = %(postfix_log)s
+backend  = %(postfix_backend)s
+
+
+[postfix-rbl]
+
+port     = smtp,465,submission
+logpath  = %(postfix_log)s
+backend  = %(postfix_backend)s
+maxretry = 1
+
+
+[sendmail-auth]
+
+port    = submission,465,smtp
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[sendmail-reject]
+
+port     = smtp,465,submission
+logpath  = %(syslog_mail)s
+backend  = %(syslog_backend)s
+
+
+[qmail-rbl]
+
+filter  = qmail
+port    = smtp,465,submission
+logpath = /service/qmail/log/main/current
+
+
+# dovecot defaults to logging to the mail syslog facility
+# but can be set by syslog_facility in the dovecot configuration.
+[dovecot]
+
+port    = pop3,pop3s,imap,imaps,submission,465,sieve
+logpath = %(dovecot_log)s
+backend = %(dovecot_backend)s
+
+
+[sieve]
+
+port   = smtp,465,submission
+logpath = %(dovecot_log)s
+backend = %(dovecot_backend)s
+
+
+[solid-pop3d]
+
+port    = pop3,pop3s
+logpath = %(solidpop3d_log)s
+
+
+[exim]
+
+port   = smtp,465,submission
+logpath = %(exim_main_log)s
+
+
+[exim-spam]
+
+port   = smtp,465,submission
+logpath = %(exim_main_log)s
+
+
+[kerio]
+
+port    = imap,smtp,imaps,465
+logpath = /opt/kerio/mailserver/store/logs/security.log
+
+
+#
+# Mail servers authenticators: might be used for smtp,ftp,imap servers, so
+# all relevant ports get banned
+#
+
+[courier-auth]
+
+port     = smtp,465,submission,imap3,imaps,pop3,pop3s
+logpath  = %(syslog_mail)s
+backend  = %(syslog_backend)s
+
+
+[postfix-sasl]
+
+port     = smtp,465,submission,imap3,imaps,pop3,pop3s
+# You might consider monitoring /var/log/mail.warn instead if you are
+# running postfix since it would provide the same log lines at the
+# "warn" level but overall at the smaller filesize.
+logpath  = %(postfix_log)s
+backend  = %(postfix_backend)s
+
+
+[perdition]
+
+port   = imap3,imaps,pop3,pop3s
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[squirrelmail]
+
+port = smtp,465,submission,imap2,imap3,imaps,pop3,pop3s,http,https,socks
+logpath = /var/lib/squirrelmail/prefs/squirrelmail_access_log
+
+
+[cyrus-imap]
+
+port   = imap3,imaps
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[uwimap-auth]
+
+port   = imap3,imaps
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+#
+#
+# DNS servers
+#
+
+
+# !!! WARNING !!!
+#   Since UDP is connection-less protocol, spoofing of IP and imitation
+#   of illegal actions is way too simple.  Thus enabling of this filter
+#   might provide an easy way for implementing a DoS against a chosen
+#   victim. See
+#    http://nion.modprobe.de/blog/archives/690-fail2ban-+-dns-fail.html
+#   Please DO NOT USE this jail unless you know what you are doing.
+#
+# IMPORTANT: see filter.d/named-refused for instructions to enable logging
+# This jail blocks UDP traffic for DNS requests.
+# [named-refused-udp]
+#
+# filter   = named-refused
+# port     = domain,953
+# protocol = udp
+# logpath  = /var/log/named/security.log
+
+# IMPORTANT: see filter.d/named-refused for instructions to enable logging
+# This jail blocks TCP traffic for DNS requests.
+
+[named-refused]
+
+port     = domain,953
+logpath  = /var/log/named/security.log
+
+
+[nsd]
+
+port     = 53
+action   = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
+           %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
+logpath = /var/log/nsd.log
+
+
+#
+# Miscellaneous
+#
+
+[asterisk]
+
+port     = 5060,5061
+action   = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
+           %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
+           %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"]
+logpath  = /var/log/asterisk/messages
+maxretry = 10
+
+
+[freeswitch]
+
+port     = 5060,5061
+action   = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
+           %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
+           %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"]
+logpath  = /var/log/freeswitch.log
+maxretry = 10
+
+
+# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld] or
+# equivalent section:
+# log-warning = 2
+#
+# for syslog (daemon facility)
+# [mysqld_safe]
+# syslog
+#
+# for own logfile
+# [mysqld]
+# log-error=/var/log/mysqld.log
+[mysqld-auth]
+
+port     = 3306
+logpath  = %(mysql_log)s
+backend  = %(mysql_backend)s
+
+
+# Log wrong MongoDB auth (for details see filter 'filter.d/mongodb-auth.conf')
+[mongodb-auth]
+# change port when running with "--shardsvr" or "--configsvr" runtime operation
+port     = 27017
+logpath  = /var/log/mongodb/mongodb.log
+
+
+# Jail for more extended banning of persistent abusers
+# !!! WARNINGS !!!
+# 1. Make sure that your loglevel specified in fail2ban.conf/.local
+#    is not at DEBUG level -- which might then cause fail2ban to fall into
+#    an infinite loop constantly feeding itself with non-informative lines
+# 2. Increase dbpurgeage defined in fail2ban.conf to e.g. 648000 (7.5 days)
+#    to maintain entries for failed logins for sufficient amount of time
+[recidive]
+
+logpath  = /var/log/fail2ban.log
+banaction = %(banaction_allports)s
+bantime  = 604800  ; 1 week
+findtime = 86400   ; 1 day
+
+
+# Generic filter for PAM. Has to be used with action which bans all
+# ports such as iptables-allports, shorewall
+
+[pam-generic]
+# pam-generic filter can be customized to monitor specific subset of 'tty's
+banaction = %(banaction_allports)s
+logpath  = %(syslog_authpriv)s
+backend  = %(syslog_backend)s
+
+
+[xinetd-fail]
+
+banaction = iptables-multiport-log
+logpath   = %(syslog_daemon)s
+backend   = %(syslog_backend)s
+maxretry  = 2
+
+
+# stunnel - need to set port for this
+[stunnel]
+
+logpath = /var/log/stunnel4/stunnel.log
+
+
+[ejabberd-auth]
+
+port    = 5222
+logpath = /var/log/ejabberd/ejabberd.log
+
+
+[counter-strike]
+
+logpath = /opt/cstrike/logs/L[0-9]*.log
+# Firewall: http://www.cstrike-planet.com/faq/6
+tcpport = 27030,27031,27032,27033,27034,27035,27036,27037,27038,27039
+udpport = 1200,27000,27001,27002,27003,27004,27005,27006,27007,27008,27009,27010,27011,27012,27013,27014,27015
+action  = %(banaction)s[name=%(__name__)s-tcp, port="%(tcpport)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
+           %(banaction)s[name=%(__name__)s-udp, port="%(udpport)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
+
+# consider low maxretry and a long bantime
+# nobody except your own Nagios server should ever probe nrpe
+[nagios]
+
+logpath  = %(syslog_daemon)s     ; nrpe.cfg may define a different log_facility
+backend  = %(syslog_backend)s
+maxretry = 1
+
+
+[oracleims]
+# see "oracleims" filter file for configuration requirement for Oracle IMS v6 and above
+logpath = /opt/sun/comms/messaging64/log/mail.log_current
+banaction = %(banaction_allports)s
+
+[directadmin]
+logpath = /var/log/directadmin/login.log
+port = 2222
+
+[portsentry]
+logpath  = /var/lib/portsentry/portsentry.history
+maxretry = 1
+
+[pass2allow-ftp]
+# this pass2allow example allows FTP traffic after successful HTTP authentication
+port         = ftp,ftp-data,ftps,ftps-data
+# knocking_url variable must be overridden to some secret value in jail.local
+knocking_url = /knocking/
+filter       = apache-pass[knocking_url="%(knocking_url)s"]
+# access log of the website with HTTP auth
+logpath      = %(apache_access_log)s
+blocktype    = RETURN
+returntype   = DROP
+bantime      = 3600
+maxretry     = 1
+findtime     = 1
+
+
+[murmur]
+# AKA mumble-server
+port     = 64738
+action   = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol=tcp, chain="%(chain)s", actname=%(banaction)s-tcp]
+           %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol=udp, chain="%(chain)s", actname=%(banaction)s-udp]
+logpath  = /var/log/mumble-server/mumble-server.log
+
+
+[screensharingd]
+# For Mac OS Screen Sharing Service (VNC)
+logpath  = /var/log/system.log
+logencoding = utf-8
+
+[haproxy-http-auth]
+# HAProxy by default doesn't log to file you'll need to set it up to forward
+# logs to a syslog server which would then write them to disk.
+# See "haproxy-http-auth" filter for a brief cautionary note when setting
+# maxretry and findtime.
+logpath  = /var/log/haproxy.log
+
+[slapd]
+port    = ldap,ldaps
+filter  = slapd
+logpath = /var/log/slapd.log
+
+[domino-smtp]
+port    = smtp,ssmtp
+filter  = domino-smtp
+logpath = /home/domino01/data/IBM_TECHNICAL_SUPPORT/console.log
diff --git a/roles/fastest_mirror/tasks/main.yml b/roles/fastest_mirror/tasks/main.yml
index 7b18f9451a9a422eb2f25fbef9faa60cb608960a..a0ebb5deb84d882c3281137f432f57742f520503 100644
--- a/roles/fastest_mirror/tasks/main.yml
+++ b/roles/fastest_mirror/tasks/main.yml
@@ -1,20 +1,20 @@
 - name: "Clear yum cache"
   command: yum clean all
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 - name: remove the bad repo on centos7 images
   file: path=/etc/yum.repos.d/rdo-release.repo state=absent
-  sudo: true
+  become: true
 
 - name: "Make yum cache"
   command: yum makecache
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 # For some reason ed went missing from the NeCTAR official CentOS 7 image
 # This meant that fail2ban could ban you, but could never unban you
 - name: "make sure ed is installed"
   yum: name=ed state=installed
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
diff --git a/roles/gluster_client/tasks/main.yml b/roles/gluster_client/tasks/main.yml
index c27e4a7ad1f2ee1280c52942498d22dd9f8cc489..f7eb6a1855fc3a93be5d8a8b3e06daa724482f4a 100644
--- a/roles/gluster_client/tasks/main.yml
+++ b/roles/gluster_client/tasks/main.yml
@@ -5,14 +5,14 @@
   when: ansible_os_family == 'RedHat'
   with_items:
   - glusterfs-client
-  sudo: true
+  become: true
 
 - name: install gluster
   apt: name=glusterfs-client state='latest'
   when: ansible_os_family == 'Debian'
-  sudo: true
+  become: true
 
 - name: mount volume
   mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,_netdev,backupvolfile-server={{ gluster_servers[1] }}"
 #  mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,_netdev,acl,backupvolfile-server={{ gluster_servers[1] }},comment=systemd.automount"
-  sudo: true
+  become: true
diff --git a/roles/gluster_volcreate/tasks/main.yml b/roles/gluster_volcreate/tasks/main.yml
index b1af0ed86d7367e1f90b51de8adc13537bb18d84..f2bbdb5e2a2f3b3f60138f74c4fb8318dd1def0f 100644
--- a/roles/gluster_volcreate/tasks/main.yml
+++ b/roles/gluster_volcreate/tasks/main.yml
@@ -33,7 +33,7 @@
     replicas: "{{ replicas }}"
     host: "{{ inventory_hostname }}"
     state: present
-  sudo: true
+  become: true
   ignore_errors: true
 #  run_once: true
   delegate_to: "{{ gluster_servers[0] }}"
diff --git a/roles/gpu/tasks/main.yml b/roles/gpu/tasks/main.yml
index bbaf99596489f7e1b62634f99a41c4386957e79e..f76796f1881d3a0efb9b3eb3974e261e2b9dab58 100644
--- a/roles/gpu/tasks/main.yml
+++ b/roles/gpu/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 - name: install deps 
   yum: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items:
     - gcc
     - perl
@@ -57,6 +57,7 @@
   shell: uname -r
   register: kernel_version
   check_mode: no
+  changed_when: False
 
 
 - name: check nvidia driver
@@ -73,6 +74,7 @@
   register: installed_driver_version
   when: nvidia_driver.stat.exists
   check_mode: no
+  changed_when: False
 
 - name: set install default
   set_fact: 
@@ -94,7 +96,7 @@
 
 - name: Unload nvidia driver
   shell: rmmod nvidia_uvm nvidia_drm nvidia_modeset nvidia || true
-  sudo: true
+  become: true
   when: install_driver
 
 
@@ -118,19 +120,26 @@
 
 #- name: Copy boot file
 #  template: src=grub.conf.j2 dest=/boot/grub/grub.conf 
-#  sudo: true
+#  become: true
 #
 #- name: Copy X config file
 #  template: src=xorg.conf.j2 dest=/etc/X11/xorg.conf 
-#  sudo: true
+#  become: true
 
 - name: Copy xserver file
   template: src=xserver.j2 dest=/etc/pam.d/xserver
-  sudo: true
+  become: true
 
-- name: build nvidia driver 
-  shell: chmod 755 /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run; /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run --silent
-  sudo: true
+- name: chmod nvidia driver builder
+  file:
+    path: /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run
+    mode: 0755
+  become: true
+  when: install_driver
+
+- name: build nvidia driver
+  shell: /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run
+  become: true
   when: install_driver
 
 - name: set the GOM
@@ -160,13 +169,14 @@
   script: scripts/nvidia-xconf-gen.py
   register: nvidiacards
   check_mode: no
+  changed_when: False
 
 - name: set env for nvidia_card_lists
   set_fact: 
     nvidiacardslist: "{{ nvidiacards.stdout | from_json }}"
 
 - name: generate nvidia-xorg-conf
-  sudo: true
+  become: true
   template:
     src: xorg.conf.j2
     dest: "{{ item['filename'] }}"
diff --git a/roles/gpu_update/tasks/main.yml b/roles/gpu_update/tasks/main.yml
index 72635811db1b655a6bfedb90227fbbe1961baf23..59f95b0d2b9cc153b4f51c7c203555e1dd14a716 100644
--- a/roles/gpu_update/tasks/main.yml
+++ b/roles/gpu_update/tasks/main.yml
@@ -41,12 +41,12 @@
 
 - name: Set cuda init script
   template: dest=/etc/init.d/cuda src=cuda mode="u=rwx,g=rx,o=rx"
-  sudo: true
+  become: true
 
 ## Install packages
 - name: install deps 
   yum: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items:
     - gcc
     - perl
@@ -78,12 +78,12 @@
     dest: /etc/modprobe.d/blacklist.conf 
     line: "blacklist nouveau"  
     state: present
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat" and ansible_lsb.major_release|int == 6
 # M3 But this is the preferred method (which is what the installer does
 - name: Template nvidia-installer-disable-nouveau.conf 
   template: dest=/etc/modprobe.d/nvidia-installer-disable-nouveau.conf src=nvidia-installer-disable-nouveau.conf.j2
-  sudo: true
+  become: true
   when: ansible_os_family != "RedHat" and ansible_lsb.major_release|int != 6
 
 - name: Check if nouveau module is loaded
@@ -95,7 +95,7 @@
   shell: "sleep 2 && shutdown -r now &"
   async: 1
   poll: 1
-  sudo: true
+  become: true
   ignore_errors: true
   when: modules_result.stdout.find('nouveau') != -1
 
@@ -106,7 +106,7 @@
 # Removed as this is related to old ways of controlling access to driver files
 # - name: Template nvidia.conf 
 #  template: dest=/etc/modprobe.d/nvidia.conf src=nvidia.conf.j2
-#  sudo: true
+#  become: true
 
 ## Install NVIDIA driver
 - name: Check nvidia driver version 
@@ -140,7 +140,7 @@
   shell: "sleep 2 && shutdown -r now &"
   async: 1
   poll: 1
-  sudo: true
+  become: true
   ignore_errors: true
   when: upgrading_driver and (nvidia_modules_result.stdout.find('nvidia') != -1)
 
@@ -154,19 +154,19 @@
      dest=/tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run
      mode=755
   # shell: cp -f /usr/local/src/CUDA/driver/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run
-  # sudo: true 
+  # become: true 
   when: upgrading_driver
 
 - name: Install nvidia driver 
   shell: /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run --silent 
-  sudo: true
+  become: true
   when: upgrading_driver
   # when: nvidia_result.stdout.find("{{ nvidia_version }}") != -1
 
 ## Configure stuff for using the GPU
 - name: Configure xorg.conf with nvidia-xconfig so xorg.conf matches gpu number
   shell: /usr/bin/nvidia-xconfig -a --use-display-device=none
-  sudo: true
+  become: true
 
 - name: Comment out auth required so xserver can start from slurm job 
   lineinfile: dest=/etc/pam.d/xserver
@@ -174,7 +174,7 @@
     line='#auth       required    pam_console.so'
     backrefs=yes
     # state=present
-  sudo: true
+  become: true
 
 - name: set persistence mode 
   lineinfile: 
@@ -182,13 +182,13 @@
     dest: /etc/rc.d/rc.local
     line: "nvidia-smi --persistence-mode=1"  
     state: present
-  sudo: true
+  become: true
 
 - name: Restart host to enable new driver 
   shell: "sleep 2 && shutdown -r now &"
   async: 1
   poll: 1
-  sudo: true
+  become: true
   ignore_errors: true
   when: upgrading_driver
 
diff --git a/roles/installPackage/tasks/main.yml b/roles/installPackage/tasks/main.yml
index a42426cb3e5fe964c18da7a8870c1729446ce213..56379ea06801ecf34682834bdc51a42e514385b6 100644
--- a/roles/installPackage/tasks/main.yml
+++ b/roles/installPackage/tasks/main.yml
@@ -1,13 +1,13 @@
 ---
 - name: Pre installation
   shell: "{{ preInstallation }}" 
-  sudo: true
+  become: true
   ignore_errors: true
   when: ansible_distribution == 'CentOS' and preInstallation is defined
   
 - name: Add new repo file 
   shell: "{{ importRepo.command }} {{ importRepo.destination }}"
-  sudo: true
+  become: true
   run_once: true
   args:
     creates: "{{ importRepo.destination }}"
@@ -16,23 +16,23 @@
 - name: Install yum packages 
   yum: name={{ item }} state=present
   with_items: yumPackageList
-  sudo: true
+  become: true
   when: ansible_distribution == 'CentOS' and yumPackageList is defined
 
 - name: Install yum group packages 
   shell: yum --setopt=protected_multilib=false -y groupinstall "{{ item }}"
   with_items: yumGroupPackageList
-  sudo: true
+  become: true
   when: ansible_distribution == 'CentOS' and yumGroupPackageList is defined
 
 - name: Post installation
   shell: "{{ postInstallation }}" 
-  sudo: true
+  become: true
   when: ansible_distribution == 'CentOS' and postInstallation is defined
  
 - name: conditional shell copy command 
   shell: "{{ cliCopy.run }}"
-  sudo: true
+  become: true
   run_once: true
   args:
     creates: "{{ cliCopy.check }}"
diff --git a/roles/jasons_ssh_ca/handlers/main.yml b/roles/jasons_ssh_ca/handlers/main.yml
index 53ca43d6f6ad089c3324ad822ddbd1244e0c52bd..5ae58a8c4223af952565ceaeebca0389e255dc49 100644
--- a/roles/jasons_ssh_ca/handlers/main.yml
+++ b/roles/jasons_ssh_ca/handlers/main.yml
@@ -3,4 +3,4 @@
 
 - name: restart ssh
   service: name={{ sshd_name }} state=restarted
-  sudo: true
+  become: true
diff --git a/roles/jasons_ssh_ca/tasks/main.yml b/roles/jasons_ssh_ca/tasks/main.yml
index 9f71db4d4da6e41560d41e6eaf5646da55d843b2..40ba4484813bf64228b2358f3ff03d3341229dcd 100644
--- a/roles/jasons_ssh_ca/tasks/main.yml
+++ b/roles/jasons_ssh_ca/tasks/main.yml
@@ -3,7 +3,7 @@
 
 - name: copy ca cert
   copy: src=server_ca.pub dest=/etc/ssh/server_ca.pub owner=root group=root mode=644
-  sudo: true
+  become: true
 
 - name: edit sshd_config
   lineinfile:
@@ -11,5 +11,5 @@
     dest: /etc/ssh/sshd_config
     line: TrustedUserCAKeys /etc/ssh/server_ca.pub
     state: present
-  sudo: true
+  become: true
   notify: restart ssh
diff --git a/roles/karaage2.7/handlers/main.yml b/roles/karaage2.7/handlers/main.yml
index ed584cc2e94ad052af1e0f5e5e69b1073c6c8c1f..e36fe7610b3670c9cac8b576257b44777b5cf198 100644
--- a/roles/karaage2.7/handlers/main.yml
+++ b/roles/karaage2.7/handlers/main.yml
@@ -1,8 +1,8 @@
 ---
 - name: restart apache
   service: name=apache2 state=restarted
-  sudo: true
+  become: true
 
 - name: restart postfix
   service: name=postfix state=restarted
-  sudo: true
+  become: true
diff --git a/roles/karaage2.7/tasks/install_via_apt.yml b/roles/karaage2.7/tasks/install_via_apt.yml
index 4e947e739f33a409ca6fbbfd1259fc7c2a7454c7..319175595b6fccfc5103e6b7a120daca179dba4f 100644
--- a/roles/karaage2.7/tasks/install_via_apt.yml
+++ b/roles/karaage2.7/tasks/install_via_apt.yml
@@ -6,23 +6,23 @@
 
 - name: add repo key
   shell: wget http://code.vpac.org/debian/vpac-debian-key.gpg -O - | apt-key add -
-  sudo: true
+  become: true
   when: repoConfigured|failed
 
 - name: template vpac.list
   template: src=vpac_list.j2 dest=/etc/apt/sources.list.d/vpac.list
-  sudo: true
+  become: true
   when: repoConfigured|failed
 
 - name: update cache
   apt: update_cache=true
-  sudo: true
+  become: true
   when: repoConfigured|failed
   
 
 - name: install karaage
   apt: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items:
     - karaage-admin
     - karaage-registration
diff --git a/roles/karaage2.7/tasks/main.yml b/roles/karaage2.7/tasks/main.yml
index 4512a69b07c8243dc057b1034c90a821d42331ce..26362b91497bcad5679aa6ef4e27e8ebf5a862f2 100644
--- a/roles/karaage2.7/tasks/main.yml
+++ b/roles/karaage2.7/tasks/main.yml
@@ -3,13 +3,13 @@
 
 - name: install system packages apt
   apt: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items: system_packages
   when: ansible_os_family == 'Debian'
 
 - name: install system packages yum
   yum: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items: system_packages
   when: ansible_os_family == 'RedHat'
 
@@ -20,13 +20,13 @@
 
 - name: check kg secret key
   shell: cat /etc/karaage/global_settings.py | grep "SECRET_KEY = '.*'"
-  sudo: true
+  become: true
   ignore_errors: true
   register: kg_secret_key_set
 
 - name: set kg secret key
   shell: kg_set_secret_key
-  sudo: true
+  become: true
   when: kg_secret_key_set|failed
 
 - name: mysql db
@@ -42,7 +42,7 @@
     regexp: "#ALLOW_REGISTRATIONS" 
     line: "ALLOW_REGISTRATIONS = True" 
     backrefs: yes
-  sudo: true
+  become: true
 
 # Why not template the whole of global_settings.py? 
 # Because I don't know what kg_set_secret_key does so I can't easily template my own secret key
@@ -54,7 +54,7 @@
     owner: root
     group: "{{ wwwgroup }}"
     mode: 0640
-  sudo: true
+  become: true
 
 - name: karaage settings db type
   lineinfile: 
@@ -63,7 +63,7 @@
     regexp: "        'ENGINE': 'django.db.backends.'," 
     line: "        'ENGINE': 'django.db.backends.mysql'," 
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: karaage settings db db
   lineinfile: 
@@ -72,7 +72,7 @@
     regexp: "       'NAME': ''," 
     line: "        'NAME': 'karaage'," 
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: karaage settings db user
   lineinfile: 
@@ -81,7 +81,7 @@
     regexp: "       'USER': ''," 
     line: "        'USER': 'karaage'," 
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: karaage settings db password
   lineinfile: 
@@ -90,7 +90,7 @@
     regexp: "       'PASSWORD': ''," 
     line: "        'PASSWORD': '{{ karaageSqlPassword }}'," 
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: ldap url
   lineinfile:
@@ -99,7 +99,7 @@
     regexp: "LDAP_URL = "
     line: "LDAP_URL = '{{ ldapURI }}'"
     backrefs: yes
-  sudo: true
+  become: true
 
 - include_vars: "roles/ldapserver/vars/main.yml"
 
@@ -110,7 +110,7 @@
     regexp: "LDAP_BASE ="
     line: "LDAP_BASE = '{{ ldapBase }}'"
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: ldap user base
   lineinfile:
@@ -119,7 +119,7 @@
     regexp: "LDAP_USER_BASE="
     line: "LDAP_USER_BASE = '{{ ldapUserBase }}'"
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: ldap group base
   lineinfile:
@@ -128,7 +128,7 @@
     regexp: "LDAP_GROUP_BASE="
     line: "LDAP_GROUP_BASE = '{{ ldapGroupBase }}'"
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: ldap admin user
   lineinfile:
@@ -137,7 +137,7 @@
     regexp: "LDAP_ADMIN_USER ="
     line: "LDAP_ADMIN_USER = '{{ ldapManager }}'"
     backrefs: yes
-  sudo: true
+  become: true
   
 
 - name: ldap admin passwd
@@ -147,7 +147,7 @@
     regexp: "LDAP_ADMIN_PASSWORD ="
     line: "LDAP_ADMIN_PASSWORD = '{{ ldapManagerPassword }}'"
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: ldap use TLS CA
   lineinfile:
@@ -156,7 +156,7 @@
     regexp: "LDAP_USE_TLS ="
     line: "LDAP_USE_TLS = True"
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: ldap TLS CA
   lineinfile:
@@ -165,7 +165,7 @@
     insertafter: "LDAP_USE_TLS ="
     line: "LDAP_TLS_CA = '/etc/ssl/certs/ca.crt'"
     state: present
-  sudo: true
+  become: true
 
 - name: check karaage tables exist
   shell: echo 'describe auth_user' | mysql -u karaage --password={{ karaageSqlPassword }} karaage
@@ -174,16 +174,16 @@
   
 - name: template ldap.conf
   template: src=ldap_conf.j2 dest=/etc/ldap/ldap.conf
-  sudo: true
+  become: true
 
 #- name: karaage sql db setup
 #  shell: kg-manage syncdb --noinput
-#  sudo: true
+#  become: true
 #  when: karaageTablesCreated|failed
 #
 #- name: karaage sql db migrate
 #  shell: yes n | kg-manage migrate --all
-#  sudo: true
+#  become: true
 #
 # I had to use syncdb --all --noinput migrate --fake then
       #  sudo vi ./dist-packages/tldap/transaction.py
@@ -193,12 +193,12 @@
 
 - name: karaage sql syncdb
   shell: kg-manage syncdb --all --noinput
-  sudo: true
+  become: true
   when: karaageTablesCreated|failed
 
 - name: karaage sql db migrate
   shell: kg-manage migrate --fake
-  sudo: true
+  become: true
   when: karaageTablesCreated|failed
 
 - name: fix up karaage transactions.py
@@ -208,7 +208,7 @@
     insertafter: import tldap
     state: present
     dest: /usr/lib/python2.7/dist-packages/tldap/transaction.py
-  sudo: true
+  become: true
 
 - name: fix up karaage tldap/manager.py
   lineinfile: 
@@ -217,15 +217,15 @@
     insertafter: import tldap
     state: present
     dest: /usr/lib/python2.7/dist-packages/tldap/manager.py
-  sudo: true
+  become: true
 
 - name: enable ssl
   shell: a2enmod ssl
-  sudo: true
+  become: true
 
 - name: enable wsgi
   shell: a2enmod wsgi
-  sudo: true
+  become: true
 
 
 
@@ -233,45 +233,45 @@
   command: ln -s /etc/karaage/kgadmin-apache.conf /etc/apache2/conf.d/karaage-admin.conf
   args:
     creates: /etc/apache2/conf.d/karaage-admin.conf
-  sudo: true
+  become: true
   notify: restart apache
 
 - name: enable karaage registration
   command: ln -s /etc/karaage/kgreg-apache.conf /etc/apache2/conf.d/karaage-registration.conf
   args:
     creates: /etc/apache2/conf.d/karaage-registration.conf
-  sudo: true
+  become: true
   notify: restart apache
 
 - name: make ssl directory
   file: name=/etc/apache2/ssl state=directory
-  sudo: true
+  become: true
 
 - name: copy ssl key
   command: cp /etc/ssl/private/server.key /etc/apache2/ssl/server.key
   args:
     creates: /etc/apache2/ssl/server.key
-  sudo: true
+  become: true
 
 - name: chmod ssl key
   file: path=/etc/apache2/ssl/server.key mode=600 owner={{ wwwuser }}
-  sudo: true
+  become: true
 
 - name: copy cert
   command: cp /etc/ssl/certs/server.crt /etc/apache2/ssl/server.pem
-  sudo: true
+  become: true
 
 - name: enable ssl
   command: ln -s /etc/apache2/sites-available/default-ssl /etc/apache2/sites-enabled/default-ssl
   args:
     creates: /etc/apache2/sites-enabled/default-ssl
-  sudo: true
+  become: true
   notify: restart apache
 
 
 - name: configure postfix
   template: src=main_cf.j2 dest=/etc/postfix/main.cf
-  sudo: true
+  become: true
   notify: restart postfix
 
 - name: SSL Cert Chain
@@ -281,7 +281,7 @@
     regexp: ".*#SSLCertificateChainFile.*"
     line: "        SSLCertificateChainFile    /etc/ssl/certs/ca.crt"
     backrefs: yes
-  sudo: true
+  become: true
   notify: restart apache
 
 - name: SSL Cert
@@ -291,7 +291,7 @@
     regexp: ".*SSLCertificateFile    /etc/ssl/certs/ssl-cert-snakeoil.pem"
     line: "        SSLCertificateFile    /etc/apache2/ssl/server.pem"
     backrefs: yes
-  sudo: true
+  become: true
   notify: restart apache
 
 - name: SSL Key
@@ -301,5 +301,5 @@
     regexp: ".*SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key"
     line: "        SSLCertificateKeyFile /etc/apache2/ssl/server.key"
     backrefs: yes
-  sudo: true
+  become: true
   notify: restart apache
diff --git a/roles/karaage2.7/tasks/set_mysql_root_password.yml b/roles/karaage2.7/tasks/set_mysql_root_password.yml
index 365a18df55de8a5e52ac9262a72aac0d1d1fa679..70c413584920fdc685f7b981a580fff85be838b7 100644
--- a/roles/karaage2.7/tasks/set_mysql_root_password.yml
+++ b/roles/karaage2.7/tasks/set_mysql_root_password.yml
@@ -2,11 +2,11 @@
 
 - name: template secure script
   template: src=set_root_passwd_sql.j2 dest=/tmp/set_root_passwd.sql mode=600 owner=root
-  sudo: true
+  become: true
 
 - name: run script
   shell: cat /tmp/set_root_passwd.sql | mysql -u root
-  sudo: true
+  become: true
   ignore_errors: true
 
 - name: test passwd set
diff --git a/roles/karaage2.7_noppolicy/tasks/main.yml b/roles/karaage2.7_noppolicy/tasks/main.yml
index 9fbafca47eaa413ab8b6ca8f4fb2a08a479077c9..7e51106c4a9c68dad466eb3328be8b1c95222e1c 100644
--- a/roles/karaage2.7_noppolicy/tasks/main.yml
+++ b/roles/karaage2.7_noppolicy/tasks/main.yml
@@ -1,13 +1,13 @@
 ---
 - name: install defaultUnlocked mixin
   copy: dest=/usr/share/pyshared/placard/schemas/defaultUnlocked.py src=defaultUnlocked.py owner=root mode=644
-  sudo: true
+  become: true
 
 - name: link defaultUnlocked mixin
   shell: ln -s /usr/share/pyshared/placard/schemas/defaultUnlocked.py /usr/lib/python2.7/dist-packages/placard/schemas/defaultUnlocked.py ;  ln -s /usr/share/pyshared/placard/schemas/defaultUnlocked.py /usr/lib/python2.6/dist-packages/placard/schemas/defaultUnlocked.py 
   args:
     creates: /usr/lib/python2.6/dist-packages/placard/schemas/defaultUnlocked.py
-  sudo: true
+  become: true
 
 - name: configure ldap mixins 1
   lineinfile:  
@@ -16,7 +16,7 @@
     regexp: 'from placard.schemas.pwdpolicy import pwdPolicyMixin'
     line: "from placard.schemas.defaultUnlocked import defaultUnlockedMixin" 
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: configure ldap mixins 2
   lineinfile:  
@@ -25,7 +25,7 @@
     regexp: '    mixin_list = \[ common.personMixin, pwdPolicyMixin, kPersonMixin \]'
     line: "    mixin_list = [ common.personMixin, defaultUnlockedMixin, kPersonMixin ]"
     backrefs: yes
-  sudo: true
+  become: true
 
 - name: configure ldap mixins 3
   lineinfile:  
@@ -34,4 +34,4 @@
     regexp: 'class person\(rfc.person, rfc.organizationalPerson, rfc.inetOrgPerson, rfc.pwdPolicy, common.baseMixin\):'
     line: "class person(rfc.person, rfc.organizationalPerson, rfc.inetOrgPerson, common.baseMixin):"
     backrefs: yes
-  sudo: true
+  become: true
diff --git a/roles/karaage3.1.17/handlers/main.yml b/roles/karaage3.1.17/handlers/main.yml
index fb97f1971ff637b1d7e27713e2386c53603d6a8c..d0a0c30f115363c1b5c70c7f199f3f8262112bd4 100644
--- a/roles/karaage3.1.17/handlers/main.yml
+++ b/roles/karaage3.1.17/handlers/main.yml
@@ -1,8 +1,8 @@
 ---
 - name: restart apache2 
   service: name=apache2 state=restarted
-  sudo: true
+  become: true
 
 - name: restart postfix 
   service: name=postfix state=restarted
-  sudo: true
+  become: true
diff --git a/roles/karaage3.1.17/tasks/karaage.yml b/roles/karaage3.1.17/tasks/karaage.yml
index aae0d0b95f190ee3d70b53f186386a32ce09b828..4daa387c22cb392145b4eb70af25a04df8229424 100644
--- a/roles/karaage3.1.17/tasks/karaage.yml
+++ b/roles/karaage3.1.17/tasks/karaage.yml
@@ -2,7 +2,7 @@
 -
  name: "Installing prerequisites Debian"
  apt: name={{ item }} update_cache=yes
- sudo: true
+ become: true
  with_items:
   - libxml2-dev
   - libxslt1-dev
@@ -27,7 +27,7 @@
 -
  name: "Installing prerequisites Redhat"
  yum: name={{ item }} state=present
- sudo: true
+ become: true
  with_items:
   - libxml2-devel
   - libxslt-devel
@@ -52,17 +52,17 @@
 -
  name: "Getting Karaage from Github"
  git: repo="https://github.com/monash-merc/karaage.git" dest="/root/karaage3.1.7" force=yes
- sudo: true
+ become: true
 
 - name: Copy dependence file
   copy: src=files/requirements.txt dest=/tmp/requirements.txt mode=644
 
 - name: "Installing Karaage Dependencies"
   pip: requirements=/tmp/requirements.txt
-  sudo: true
+  become: true
 -
  name: "Restrict Django version to 1.7.8"
- sudo: true
+ become: true
  replace: dest=/root/karaage3.1.7/setup.py regexp="Django >= 1.7" replace="Django == 1.7.8"
 -
  name: "Installing Karaage from source"
@@ -70,23 +70,23 @@
  args:
   chdir: /root/karaage3.1.7
   creates: /root/karaage3.1.7/build/bdist.linux-x86_64
- sudo: true
+ become: true
 -
  name: "Templating Karaage settings"
  template: src=settings.py.j2 dest=/etc/karaage3/settings.py owner=root group={{ apache_user }} mode=0640
- sudo: true
+ become: true
 -
  name: "Templating project conf"
  template: src=kginit.conf.j2 dest=/etc/karaage3/kginit.conf owner=root group={{ apache_user }} mode=0640
- sudo: true
+ become: true
 -
  name: "Templating project init script"
  template: src=kg_init.j2 dest=/usr/bin/kg_init owner=root mode=755
- sudo: true
+ become: true
 -
  name: "Templating adding admin role script"
  template: src=kg_add_admin.j2 dest=/usr/bin/kg_add_admin owner=root mode=755
- sudo: true
+ become: true
 -
  name: "Creating karaage3 in /var directories log, lib "
  file: path={{ item }} state=directory owner=root group={{ apache_user }} mode=0775
@@ -94,19 +94,19 @@
   - /var/log/karaage3
   - /var/lib/karaage3
   - /var/cache/karaage3
- sudo: true
+ become: true
 
 - name: install karaage3-wsgi.conf
   template: src=karaage3-wsgi.conf.j2 dest=/etc/apache2/conf-available/karaage3-wsgi.conf
-  sudo: true
+  become: true
 
 - name: install karaage3-wsgi.conf
   template: src=index.html.j2 dest=/var/www/index.html
-  sudo: true
+  become: true
 
 - name: install karaage3-wsgi.conf
   template: src=kg-idps.j2 dest=/usr/bin/kg-idps mode=755
-  sudo: true
+  become: true
 
 - name: install shibboleth cache file 
   template: src="files/{{ shibboleth_deploy }}_metadata.aaf.xml.j2" dest=/tmp/metadata.aaf.xml 
@@ -114,19 +114,19 @@
 -
  name: "enabling Karaage configuration"
  shell: a2enconf karaage3-wsgi
- sudo: true
+ become: true
  when: ansible_os_family == "Debian"
 -
  name: "enabling Karaage configuration"
  shell: cp -rvpf /root/karaage3.1.7/conf/karaage3-wsgi.conf /etc/httpd/conf.d/karaage3-wsgi.conf
- sudo: true
+ become: true
  when: ansible_os_family == "RedHat"
 
 
 -
  name: "Set Secret Key"
  lineinfile: dest=/etc/karaage3/settings.py regexp="SECRET_KEY = ''" line="SECRET_KEY = '{{ karaageSecretKey }}'" state=present
- sudo: true
+ become: true
 
 -
  name: "Check karaage DB has been initialized or not"
@@ -135,59 +135,59 @@
 
 - name: enable wsgi
   shell: a2enmod wsgi
-  sudo: true
+  become: true
 
 - name: enable shibboleth
   shell: a2enmod shib2
-  sudo: true
+  become: true
 
 -
  name: " Create DB tables"
  shell: kg-manage migrate 
- sudo: true
+ become: true
  when: karaage_db_init.stdout.find("0") == 0
 
 -
  name: "Create IDP institutes (disable it as cache is not available)"
  shell: kg-idps /tmp/metadata.aaf.xml 
- sudo: true
+ become: true
  when: karaage_db_init.stdout.find("0") == 0
 
 -
  name: "Create projects"
  shell: kg_init /etc/karaage3/kginit.conf {{ admin_password }}
- sudo: true
+ become: true
  when: karaage_db_init.stdout.find("0") == 0
 
 - name: install postfix
   apt: name=postfix state=present
-  sudo: true
+  become: true
 
 - name: configure postfix
   template: src=main_cf.j2 dest=/etc/postfix/main.cf
-  sudo: true
+  become: true
   notify: restart postfix
 
 -
  name: "Reloading apache"
  service: name=apache2 state=reloaded
- sudo: true
+ become: true
  when: ansible_os_family == "Debian"
 
 -
  name: "Reloading apache"
  service: name=httpd state=reloaded
- sudo: true
+ become: true
  when: ansible_os_family == "RedHat"
 
 - name: "Start cron job for creating idps"
   cron: name=idps job=/usr/bin/kg-idps user=root day=*/1 state=present
-  sudo: true
+  become: true
 
 -
  name: "Templating username list"
  template: src=files/{{ item }} dest=/{{ user_id_file_dir }}/{{ item }}
  with_items: user_id_file
- sudo: true
+ become: true
  when: user_id_file is defined and user_id_file_dir is defined
 
diff --git a/roles/karaage3.1.17/tasks/main.yml b/roles/karaage3.1.17/tasks/main.yml
index d8657d099b129c88e7ee30713d2bfeddb602ec26..3db8200eb2e6478e019a70106db32ffa467e1bfd 100644
--- a/roles/karaage3.1.17/tasks/main.yml
+++ b/roles/karaage3.1.17/tasks/main.yml
@@ -2,7 +2,7 @@
 
  - name: "Copying the ldap ca cert file"
    template: src="files/{{ ldap_TLSCARoot }}" dest="/etc/apache2/ssl/certs/ldap_ca.cert.pem" mode=0644
-   sudo: true
+   become: true
    when: apache_cert_file is defined
 
  - include: prerequisitesDebian.yml
diff --git a/roles/karaage3.1.17/tasks/prerequisitesDebian.yml b/roles/karaage3.1.17/tasks/prerequisitesDebian.yml
index 2b21c1c82aa3cd80563fe33ec5cebd6cf3ad68bb..40115776cd0d9aeda37d1c55ddd27e61adcf05e0 100644
--- a/roles/karaage3.1.17/tasks/prerequisitesDebian.yml
+++ b/roles/karaage3.1.17/tasks/prerequisitesDebian.yml
@@ -1,12 +1,12 @@
 ---
 -
   name: "Installing prereq packages"
-  sudo: true
+  become: true
   apt: name={{ item }} update_cache=yes
   with_items:
    - debian-keyring
 - 
   apt: update_cache=yes
   name: "Upgrading apt..."
-  sudo: true
+  become: true
 
diff --git a/roles/ldapclient/handlers/main.yml b/roles/ldapclient/handlers/main.yml
index 6005eec0b09c629769272934a285635147bb3e90..947a057a10954a52a463a9a9278ce915740c0bb5 100644
--- a/roles/ldapclient/handlers/main.yml
+++ b/roles/ldapclient/handlers/main.yml
@@ -1,4 +1,4 @@
 ---
 - name: restart sssd 
   service: name=sssd state=restarted
-  sudo: true
+  become: true
diff --git a/roles/ldapclient/tasks/installOpenLdap.yml b/roles/ldapclient/tasks/installOpenLdap.yml
index f0db145ecbf8f310695cdcaebb672d10baacf9d9..371381e960b8399a38400a13a075695682314c58 100644
--- a/roles/ldapclient/tasks/installOpenLdap.yml
+++ b/roles/ldapclient/tasks/installOpenLdap.yml
@@ -3,7 +3,7 @@
   yum: 
     name: ['openldap', 'openldap-clients', 'sssd', 'sssd-common', 'sssd-client', 'nss', 'nss-tools']
     state: present
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 - name: "Install open ldap package apt"
@@ -12,5 +12,5 @@
     - ldap-utils
     - sssd 
     - libpam-sss 
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
diff --git a/roles/ldapserver/tasks/main.yml b/roles/ldapserver/tasks/main.yml
index f7e0edf00a711c9c12b7e35e7e7b7d1e96893d20..8620de3d25fb44620be85f5b2a88b6d29a7981b1 100644
--- a/roles/ldapserver/tasks/main.yml
+++ b/roles/ldapserver/tasks/main.yml
@@ -9,19 +9,19 @@
 
 - name: install system packages apt
   apt: name={{ item }} state=installed update_cache=true
-  sudo: true
+  become: true
   with_items: "{{ system_packages }}"
   when: ansible_os_family == 'Debian'
 
 - name: install system packages yum
   yum: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items: "{{ system_packages }}"
   when: ansible_os_family == 'RedHat'
 
 - name: Fixed default configuration 
   lineinfile: dest=/etc/default/slapd regexp='^SLAPD_SERVICES="ldap:/// ldapi:///"' line='SLAPD_SERVICES="ldaps:/// ldap:/// ldapi:///"'
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
 
 - name: hash password
@@ -72,66 +72,66 @@
 
 - name: template manager.ldif
   template: src=manager_ldif.j2 dest=/tmp/manager.ldif mode=600
-  sudo: true
+  become: true
 - name: template manager2.ldif
   template: src=manager_ldif2.j2 dest=/tmp/manager2.ldif mode=600
-  sudo: true
+  become: true
 - name: template manager3.ldif
   template: src=manager_ldif3.j2 dest=/tmp/manager3.ldif mode=600
-  sudo: true
+  become: true
 
 - name: make ldap certs dir
   file: path={{ ldapCertDest | dirname }} state=directory owner={{ ldapuser }} group={{ ldapgroup }}
-  sudo: true
+  become: true
 
 - name: make ldap private dir
   file: path={{ ldapKeyDest | dirname }} state=directory owner={{ ldapuser }} group={{ ldapgroup }} mode=700
-  sudo: true
+  become: true
 
 - name: copy cert
   copy: src="files/{{ ldapCert }}" dest="{{ ldapCertDest }}"
-  sudo: true
+  become: true
 
 - name: copy ca cert
   copy: src="files/{{ ldapCAChain }}" dest="{{ ldapCAChainDest }}"
-  sudo: true
+  become: true
 
 - name: copy ca root cert
   copy: src="files/{{ ldap_TLSCARoot }}" dest="{{ ldapCARootDest }}"
-  sudo: true
+  become: true
   when: ldap_TLSCARoot is defined
 
 - name: copy key
   copy: src="files/{{ ldapKey }}" dest="{{ ldapKeyDest }}" mode=600 owner={{ ldapuser }} group={{ ldapgroup }} 
-  sudo: true
+  become: true
 
 - name: enable ssl centos
   lineinfile: regexp="SLAPD_LDAPS=no" state=present line="SLAPD_LDAPS=yes" dest=/etc/sysconfig/ldap
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat' and ansible_distribution_major_version < '7'
 
 - name: enable ssl centos 7
   lineinfile: regexp="^SLAPD_URLS=" state=present line="SLAPD_URLS='ldaps:/// ldapi:/// ldap:///'" dest=/etc/sysconfig/slapd
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat' and ansible_distribution_major_version >= '7'
 
 - name: check TLS config
   shell: "slapcat -b cn=config | grep 'olcTLSCertificateKeyFile: {{ ldapKeyDest }}'"
   ignore_errors: true
-  sudo: true
+  become: true
   register: tlsConfigured
 
 - name: copy db config
   copy: src=files/DB_CONFIG dest=/var/lib/ldap/DB_CONFIG owner=ldap group=ldap mode=644
-  sudo: true
+  become: true
 
 - name: start ldap
   service: name=slapd state=restarted
-  sudo: true
+  become: true
 
 - name: initialise server ssl
   shell: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/ssl.ldif -D cn=config 
-  sudo: true
+  become: true
   when: tlsConfigured|failed
 
 - name: Initialise cosine and ppolicy
@@ -142,7 +142,7 @@
    - nis
    - inetorgperson
   ignore_errors: true
-  sudo: true
+  become: true
 
 - name: copy the auxposixgroup schema
   copy: src="files/auxposixgroup.ldif" dest="{{ ldapDir }}/schema/auxposixgroup.ldif"
@@ -156,46 +156,46 @@
       
 - name: check ppolicy module loaded
   shell: slapcat -b cn=config | grep "olcModuleLoad. {.*}ppolicy"
-  sudo: true
+  become: true
   ignore_errors: true
   register: ppolicyModuleLoaded
 
 - name: load ppolicy module
   shell: ldapadd -Y EXTERNAL -H ldapi:/// -f /tmp/load_modules.ldif -D cn=config 
-  sudo: true
+  become: true
   when: ppolicyModuleLoaded|failed
 
 - name: check ppolicy overlay config
   shell: "slapcat -b cn=config | grep 'dn: olcOverlay={.*}ppolicy,olcDatabase={.*}.db,cn=config'"
   ignore_errors: true
-  sudo: true
+  become: true
   register: ppolicyOverlayConfigured
 
 - name: add ppolicy overlay
   shell: ldapadd -Y EXTERNAL -H ldapi:/// -f /tmp/ppolicy_overlay.ldif -D cn=config 
-  sudo: true
+  become: true
   when: ppolicyOverlayConfigured|failed
 
 - name: check refint module loaded
   shell: slapcat -b cn=config | grep "olcModuleLoad. {.*}refint"
-  sudo: true
+  become: true
   ignore_errors: true
   register: refintModuleLoaded
 
 - name: load refint module
   shell: ldapadd -Y EXTERNAL -H ldapi:/// -f /tmp/load_refint.ldif -D cn=config 
-  sudo: true
+  become: true
   when: refintModuleLoaded|failed
 
 - name: check memberof module loaded
   shell: slapcat -b cn=config | grep "olcModuleLoad. {.*}memberof"
-  sudo: true
+  become: true
   ignore_errors: true
   register: memberofModuleLoaded
 
 - name: load memberof module
   shell: ldapadd -Y EXTERNAL -H ldapi:/// -f /tmp/load_memberof.ldif -D cn=config 
-  sudo: true
+  become: true
   when: memberofModuleLoaded|failed
 
 - name: check member of config
@@ -219,28 +219,28 @@
 - name: check Manager config
   shell: "slapcat -b cn=config | grep 'olcRootDN: {{ ldapManager }}'"
   ignore_errors: true
-  sudo: true
+  become: true
   register: managerConfigured
 
 - name: initialise server manager
   shell:  ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/manager.ldif -D cn=config
-  sudo: true
+  become: true
   when: managerConfigured|failed
 - name: initialise server manager
   shell: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/manager2.ldif -D cn=config 
-  sudo: true
+  become: true
   ignore_errors: true
   when: managerConfigured|failed
 - name: initialise server manager
   shell: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/manager3.ldif -D cn=config 
-  sudo: true
+  become: true
   when: managerConfigured|failed
 
 # slapcat does a line wrap at character 78. Don't attempt to match on {{ ldapManager }} as it will cross two lines
 - name: check ACL config
   shell: "slapcat -b cn=config | grep 'olcAccess:' | grep 'cn=Manager'"
   ignore_errors: true
-  sudo: true
+  become: true
   register: aclConfigured
 
 - name: template acls.ldif
@@ -248,7 +248,7 @@
 
 - name: initialise server acls
   shell: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/acls.ldif -D cn=config
-  sudo: true
+  become: true
   when: aclConfigured|failed
 
 - name: check DIT config
@@ -309,11 +309,11 @@
 
 - name: template binddn.ldif
   template: src=binddn_ldif.j2 dest=/tmp/binddn.ldif mode=600
-  sudo: true
+  become: true
 
 - name: add binddn
   shell: ldapadd -x -D {{ ldapManager }} -w {{ ldapManagerPassword }} -x -H ldap://localhost -f /tmp/binddn.ldif
-  sudo: true
+  become: true
   when: binddnConfigured|failed
 
 - name: check pwpolicies config
diff --git a/roles/link_directories/tasks/main.yml b/roles/link_directories/tasks/main.yml
index 091d49c29e80bcb470bd909a41c1ff84f2ab3b75..fd9f2f83f4c1f56b2ee912ec6116513fdd5bfeb2 100644
--- a/roles/link_directories/tasks/main.yml
+++ b/roles/link_directories/tasks/main.yml
@@ -10,7 +10,7 @@
 - name: mv
   command: mv "{{ dest }}" "{{ dest }}_old"
   when: stat_r.stat.exists and stat_r.stat.isdir
-  sudo: true
+  become: true
 
 - name: stat 
   stat: path={{ dest }}
@@ -22,4 +22,4 @@
 - name: link
   file: src="{{ src }}" dest="{{ dest }}" state=link
   when: not stat_r.stat.exists
-  sudo: true
+  become: true
diff --git a/roles/lmod/tasks/main.yml b/roles/lmod/tasks/main.yml
index 0b7aec7b6ed8404ceed8ec60bf18f0e006468ce0..9e2ac4af909db08388e570ca586bec83e0889118 100644
--- a/roles/lmod/tasks/main.yml
+++ b/roles/lmod/tasks/main.yml
@@ -11,8 +11,8 @@
     - rsync
     - gcc
     - lua-devel
-  sudo: true
-  when: 
+  become: true
+  when:
    - '"CentOS" in ansible_distribution'
 
 - name: install lua RHEL7
@@ -25,11 +25,11 @@
     - rsync
     - gcc
     - lua-devel
-  sudo: true
-  when:
   when:
    - '"DGX" in ansible_product_name'
    - '"RedHat" in ansible_distribution'
+  become: true
+
 
 - name: install lua debian
   apt: name={{ item }} state=installed
@@ -42,7 +42,7 @@
     - liblua5.2-0
     - liblua5.2-dev
     - tcl
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
 
 - name: stat lmod
@@ -53,7 +53,7 @@
 - name: Download LMOD
   get_url:
     url=http://consistency0/src/Lmod-{{ lmod_version }}.tar.bz2
-    dest={{source_dir}}/Lmod-{{ lmod_version }}.tar.bz2
+    dest={{ source_dir }}/Lmod-{{ lmod_version }}.tar.bz2
     mode=0444
   when: not lmodstat.stat.exists
 
@@ -62,11 +62,11 @@
     src={{ source_dir }}/Lmod-{{ lmod_version }}.tar.bz2
     dest={{ source_dir }}
     copy=no
-    creates={{source_dir}}/Lmod-{{ lmod_version }}/README
+    creates={{ source_dir }}/Lmod-{{ lmod_version }}/README
   when: not lmodstat.stat.exists
 
 - name: Compile and install Lmod
   shell: cd {{ source_dir }}/Lmod-{{ lmod_version }}; ./configure --prefix={{ soft_dir }} --with-mpathSearch=YES --with-caseIndependentSorting=YES && make install LUA_INCLUDE={{ lua_include }}
   args:
     creates: "{{ soft_dir }}/lmod/{{ lmod_version }}"
-  sudo: true
+  become: true
diff --git a/roles/lustre-client/tasks/main.yml b/roles/lustre-client/tasks/main.yml
index 6bc03f0e956977421c18da2528b5a06ea1838bb8..0729e5ecd38faa7ee7592341c517914d336aeea2 100644
--- a/roles/lustre-client/tasks/main.yml
+++ b/roles/lustre-client/tasks/main.yml
@@ -9,17 +9,17 @@
 
 #- name: install rpms
 #  yum: name="/tmp/{{ item }}"
-#  sudo: true
+#  become: true
 #  with_items: "{{ lustre_pkgs }}"
 
 - name: install rpms
   yum:  name=/tmp/lustre-client-modules-2.7.65-3.10.0_327.4.4.el7.x86_64_gab38c3a.x86_64.rpm
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 - name: install rpms
   yum:  name=/tmp/lustre-client-2.7.65-3.10.0_327.4.4.el7.x86_64_gab38c3a.x86_64.rpm
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 # instructions to build these debs:
@@ -44,22 +44,22 @@
 
 - name: install  linux-patch-lustre_2.7.62-1_all.deb
   apt: deb="/tmp/linux-patch-lustre_2.7.62-1_all.deb"
-  sudo: true
+  become: true
   when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version == "14"
 
 - name: install lustre-client-modules-3.13.0-83-generic_2.7.62-1_amd64.deb
   apt: deb="/tmp/lustre-client-modules-3.13.0-83-generic_2.7.62-1_amd64.deb"
-  sudo: true
+  become: true
   when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version == "14"
 
 - name: install lustre-utils_2.7.62-1_amd64.deb
   apt: deb="/tmp/lustre-utils_2.7.62-1_amd64.deb"
-  sudo: true
+  become: true
   when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version == "14"
 
 - name: "Mount lustre filesystems"
   mount: name="{{ item.mntpt }}" src="{{ item.servers }}"/"{{ item.src }}" state="mounted" fstype="lustre" opts="_netdev,flock" 
-  sudo: true
+  become: true
   with_items: "{{ mntlist }}"
 
 
diff --git a/roles/make_filesystems/tasks/main.yml b/roles/make_filesystems/tasks/main.yml
index deab4d7bd64144ab6ae1ab53736a0632fab155bd..41c096e1d48f0428e522151be604107f3af67d80 100644
--- a/roles/make_filesystems/tasks/main.yml
+++ b/roles/make_filesystems/tasks/main.yml
@@ -10,44 +10,46 @@
 - name: format volumes
   filesystem: fstype={{ item.fstype }} dev={{ hostvars[inventory_hostname]['ansible_host_volumes'][item.name]['dev'] }}
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
+  become_user: root
   when: cinder
 
 - name: format volumes
   mount: name={{ item.mntpt }} fstype={{ item.fstype }} src={{ hostvars[inventory_hostname]['ansible_host_volumes'][item.name]['dev'] }} state=mounted
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
+  become_user: root
   when: cinder
 
 - name: format volumes
   filesystem: fstype={{ item.fstype }} dev={{ item.name }}
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
   when: not cinder
 
 - name: format volumes
   mount: name={{ item.mntpt }} fstype={{ item.fstype }} src={{ item.name }} state=mounted
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
   when: not cinder
 
 - name: symlink volumes
   file: force=yes state=link src="{{ item.mntpt }}" path="{{ item.linkto }}"
   when: item.linkto is defined
   with_items: "{{ volumes }}"
-  sudo: true
+  become: true
 
 
 #- name: Format File Systems
 #  filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
 #  with_items: mkFileSystems
-#  sudo: true
+#  become: true
 #  when: mkFileSystems is defined
 #
 #- name: Mount device 
 #  mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
 #  with_items: mountFileSystems
-#  sudo: true
+#  become: true
 #  when: mountFileSystems is defined
 #
 #
diff --git a/roles/mellanox_drivers/tasks/main.yml b/roles/mellanox_drivers/tasks/main.yml
index 756c3b1aff462dcc22238fe7d05c6540cf2828cf..f8167fd22d2c9f44604484d170e64b41ca944249 100644
--- a/roles/mellanox_drivers/tasks/main.yml
+++ b/roles/mellanox_drivers/tasks/main.yml
@@ -15,6 +15,7 @@
   register: drivers_installed
   ignore_errors: true
   check_mode: no
+  changed_when: False
 
 - name: debug - print out installed driver
   debug: var=drivers_installed
@@ -41,7 +42,7 @@
   set_fact: 
     install_now: true 
     reboot_now: true
-  when: drivers_installed | failed
+  when: drivers_installed.failed
 
 - name: debug
   debug: var=driver_version
@@ -53,7 +54,7 @@
   set_fact: 
     install_now: true 
     reboot_now: true
-  when: driver_version | failed or not desired_driver_version.stdout in driver_version.stdout
+  when: driver_version.failed or not desired_driver_version.stdout in driver_version.stdout
 
 - name: debug - print out value of install_now
   debug: var=install_now
diff --git a/roles/mellanox_drivers/vars/mellanoxVars.yml b/roles/mellanox_drivers/vars/mellanoxVars.yml
index 6aa643d58ac88337df835db8fd81dd6e3b84fb17..4fb42fdb4a61d9550761bf4ba9616f83957b47b2 100644
--- a/roles/mellanox_drivers/vars/mellanoxVars.yml
+++ b/roles/mellanox_drivers/vars/mellanoxVars.yml
@@ -1,4 +1,5 @@
 ---
  #note. do not add '.tgz' to driver src. done in playbook
 #MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-4.4-1.0.0.0-rhel7.4-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
-MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-4.5-1.0.1.0-rhel7.6-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
+#MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-4.5-1.0.1.0-rhel7.6-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
+MELLANOX_DRIVER_SRC: "{% if ansible_os_family == 'RedHat'  %}MLNX_OFED_LINUX-4.7-3.2.9.0-rhel7.7-x86_64{% elif ansible_os_family == 'Debian' %}MLNX_OFED_LINUX-3.1-1.0.3-ubuntu14.04-x86_64{% endif %}"
diff --git a/roles/modulefiles/tasks/main.yml b/roles/modulefiles/tasks/main.yml
index 38a3a8c2b4420e54db8ca8dd854d913cb929b98c..05e8ca7af86ded06c96965e7eb9bdfa43ceb04d9 100644
--- a/roles/modulefiles/tasks/main.yml
+++ b/roles/modulefiles/tasks/main.yml
@@ -1,12 +1,12 @@
 ---
 - name: install environment-modules
   yum: name=environment-modules state=installed
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 - name: install environment-modules
   apt: name=environment-modules state=installed
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
 
 - name: add /usr/local/Modules to the module file path
@@ -15,7 +15,7 @@
     dest: /usr/share/Modules/init/.modulespath
     line: /usr/local/Modules/modulefiles
   ignore_errors: true
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 # for some reason ubuntu uses lowercase modules
@@ -25,5 +25,5 @@
     dest: /usr/share/modules/init/.modulespath
     line: /usr/local/Modules/modulefiles
   ignore_errors: true
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
diff --git a/roles/move_homedir/tasks/main.yml b/roles/move_homedir/tasks/main.yml
index 61d6b03a2b82e920011c3c03a773509d3b77bfce..9493ebf98c8ec25cab3e7ae5d41942953ec9a26c 100644
--- a/roles/move_homedir/tasks/main.yml
+++ b/roles/move_homedir/tasks/main.yml
@@ -1,6 +1,6 @@
 - name: make /local_home
   file: path=/local_home owner=root group=root state=directory
-  sudo: true 
+  become: true 
 
 - name: stat the local_home path
   stat: path=/local_home/{{ ansible_user }}
@@ -9,7 +9,7 @@
 - name: copy the {{ ansible_user }} home
   shell: cp -ar /home/{{ ansible_user }} /local_home
   ignore_errors: true
-  sudo: true
+  become: true
   register: home_copied
   when: not local_home_path.stat.exists
 
@@ -21,7 +21,7 @@
     regexp: '{{ ansible_user }}:x:(.*):(.*):(.*):/home/{{ ansible_user }}:(.*)'
     line: '{{ ansible_user }}:x:\1:\2:\3:/local_home/{{ ansible_user }}:\4'
     backrefs: yes
-  sudo: true
+  become: true
   register: edit
 
 
diff --git a/roles/mysql/handlers/main.yml b/roles/mysql/handlers/main.yml
index be88a2c20338d12fe9604eed8b612f91d886e0e8..a934d6955d71ee34415abe3d39d478e296518a3f 100644
--- a/roles/mysql/handlers/main.yml
+++ b/roles/mysql/handlers/main.yml
@@ -1,4 +1,4 @@
 ---
 - name: "Restart MySQL" 
   service: name={{ sqlServiceName }} state=restarted
-  sudo: true
+  become: true
diff --git a/roles/mysql/tasks/mysql_client.yml b/roles/mysql/tasks/mysql_client.yml
index bffd83e36077055f0a1224999eded1ae17444fdc..a5b50acc30f953279d58e918e2a0efd1849af943 100644
--- a/roles/mysql/tasks/mysql_client.yml
+++ b/roles/mysql/tasks/mysql_client.yml
@@ -2,11 +2,11 @@
 - name: "Installing MySQL Debian"
   apt: name="{{ item }}" update_cache=yes cache_valid_time=3600 state=present
   with_items: "{{ client_packages }}"
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
 
 - name: Installing MySQL RedHat
   yum: name="{{ item }}" state=present
   with_items: "{{ client_packages }}"
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
diff --git a/roles/mysql/tasks/mysql_server.yml b/roles/mysql/tasks/mysql_server.yml
index 6a7452966172636039f9eca87c57b6032c563722..f8edd4e66ceed4323aa0ad83364ec890b93e80c1 100644
--- a/roles/mysql/tasks/mysql_server.yml
+++ b/roles/mysql/tasks/mysql_server.yml
@@ -2,40 +2,45 @@
 - name: "Installing MySQL Debian"
   apt: name="{{ item }}" update_cache=yes cache_valid_time=3600 state=present
   with_items: "{{ server_packages }}"
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
 
 - name: Installing MySQL RedHat
   yum: name={{ item }}
   with_items: "{{ server_packages }}"
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
+  
+- name: make sure mysql conf directory exists
+  file: dest=/etc/mysql/conf.d state=directory
+  become: true
+  register: mysqldb_confdir_create
 
 - name: "Starting MySQL"
   service: name={{ sqlServiceName }} state=started enabled=true
-  sudo: true
-
-- name: make sure mysql conf directory exists
-  file: dest=/etc/mysql/conf.d state=directory
-  sudo: true
+  become: true
 
-- name: "Adding root"
-  sudo: true
-  mysql_user: name=root host="{{ item }}" password="{{ mysql_root_password }}" login_user=root login_password="{{ mysql_root_password }}" check_implicit_admin=yes
-  with_items:
-    - "{{ ansible_hostname }}"
-    - 127.0.0.1
-    - ::1
-    - localhost
+#- name: "Adding root"
+#  become: true
+#  mysql_user: name=root host="{{ item }}" password="{{ mysql_root_password }}" login_user=root login_password="{{ mysql_root_password }}" check_implicit_admin=yes
+#  with_items:
+#    - "{{ ansible_hostname }}"
+#    - 127.0.0.1
+#    - ::1
+#    - localhost
 
+- name: update mysql root password for all root accounts  # this will only work if a completely fresh db gets installed because it gets shipped with a blank root pw
+  mysql_user: name=root host=localhost password={{ mysql_root_password }} login_user=root
+  when: mysqldb_confdir_create.changed
+  
 - name: "Adding user database"
   mysql_db: name={{ mysql_user_db_name }} state=present login_user=root login_password={{ mysql_root_password }} 
 
 - name: "Giving priviliges to user"
-  mysql_user: name={{ mysql_user_name }} host={{ mysql_user_host }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL state=present
+  mysql_user: name={{ mysql_user_name }} host={{ mysql_user_host }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
   when: mysql_user_host is defined
 
 - name: "Giving priviliges to user"
-  mysql_user: name={{ mysql_user_name }} host={{ hostvars[item].ansible_fqdn }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL state=present
+  mysql_user: name={{ mysql_user_name }} host={{ hostvars[item].ansible_fqdn }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
   with_items: "{{ mysql_user_hosts_group }}"
   when: mysql_user_hosts_group is defined
diff --git a/roles/nagios_config/tasks/main.yml b/roles/nagios_config/tasks/main.yml
index 2743e363568ff55f7e7c3ecd68828d7cff9a86e9..5b42c49aaefe4a4f87f22021d25c974c03dc43a5 100644
--- a/roles/nagios_config/tasks/main.yml
+++ b/roles/nagios_config/tasks/main.yml
@@ -10,18 +10,18 @@
   - 'contacts'
   - 'generic-host'
   - 'generic-service'
-  sudo: true
+  become: true
 
 - name: remove unwanted configure files
   file: path=/etc/nagios3/conf.d/{{ item }}_nagios2.cfg state=absent
   with_items:
   - 'localhost'
   - 'extinfo'
-  sudo: true
+  become: true
 
 - name: change cgi config 
   copy: src=cgi.cfg dest=/etc/nagios3/cgi.cfg
-  sudo: true
+  become: true
 
 - name: change the default email command
   copy: src=commands.cfg dest=/etc/nagios3/commands.cfg
@@ -30,5 +30,5 @@
 
 - name: nagios restart
   service: name=nagios3 state=restarted
-  sudo: true
+  become: true
 
diff --git a/roles/nagios_monitored/tasks/main.yml b/roles/nagios_monitored/tasks/main.yml
index 8e4ac078a1d9fe3ad3d1e908ebb1aa236a180bf2..e69f82f9c1a3c377832745366586762724657cad 100644
--- a/roles/nagios_monitored/tasks/main.yml
+++ b/roles/nagios_monitored/tasks/main.yml
@@ -1,19 +1,19 @@
 ---
 - name: create nagios user
   user: name=nagios system=yes createhome=yes home={{ nagios_home }} shell=/bin/bash
-  sudo: true
+  become: true
 
 - name: create ssh directory
   file: path={{ nagios_home }}/.ssh state=directory owner=nagios mode=700
-  sudo: true
+  become: true
 
 - name: authorize_key
   authorized_key: user=nagios key="{{ lookup('file', 'files/nagios_public_key') }}" path="{{ nagios_home }}/.ssh/authorized_keys"
-  sudo: true
+  become: true
 
 - name: make scripts directory
   file: path={{ nagios_home }}/scripts state=directory owner=nagios mode=755
-  sudo: true
+  become: true
 
 - name: install monitor scripts
   copy: dest={{ nagios_home }}/scripts/{{ item }} src=files/scripts/{{ item }} mode=755
@@ -31,5 +31,5 @@
   - check_ldap                   
   - check_munge    
   - check_slurm     
-  sudo: true
+  become: true
   
diff --git a/roles/nagios_server/handlers/main.yml b/roles/nagios_server/handlers/main.yml
index fb97f1971ff637b1d7e27713e2386c53603d6a8c..d0a0c30f115363c1b5c70c7f199f3f8262112bd4 100644
--- a/roles/nagios_server/handlers/main.yml
+++ b/roles/nagios_server/handlers/main.yml
@@ -1,8 +1,8 @@
 ---
 - name: restart apache2 
   service: name=apache2 state=restarted
-  sudo: true
+  become: true
 
 - name: restart postfix 
   service: name=postfix state=restarted
-  sudo: true
+  become: true
diff --git a/roles/nagios_server/tasks/main.yml b/roles/nagios_server/tasks/main.yml
index aea8c85d8c50a9c8644fdd5083406f33108d9d16..b4f8fc37f40e1bd92927bb8f396cf31e5fcf1d01 100644
--- a/roles/nagios_server/tasks/main.yml
+++ b/roles/nagios_server/tasks/main.yml
@@ -1,15 +1,15 @@
 ---
 - name: create directory
   file: dest=/var/lib/nagios/.ssh state=directory
-  sudo: true
+  become: true
 
 - name: create nagios user
   user: name=nagios system=yes createhome=yes home={{ nagios_home }} shell=/bin/bash
-  sudo: true
+  become: true
 
 - name: copy priv key
   template: src={{ monitor_privkey_file }} dest={{ nagios_home }}/.ssh/id_rsa mode=600 owner={{ nagios_username }}
-  sudo: true
+  become: true
 
 - name: install packages
   apt: name={{ item }} state=present
@@ -17,7 +17,7 @@
   - nagios3
   - python-passlib
   - libapache2-mod-webauthldap
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
 
 - name: enable apache modules
@@ -35,5 +35,5 @@
 
 - name: force restart
   service: name=nagios3 state=started
-  sudo: true
+  become: true
 
diff --git a/roles/nat_server/tasks/main.yml b/roles/nat_server/tasks/main.yml
index ecc647a7b59e5a527bd096b2e936aec18c44f940..1e7fd39b588a527b2c65db2246510e87afea19c8 100644
--- a/roles/nat_server/tasks/main.yml
+++ b/roles/nat_server/tasks/main.yml
@@ -41,6 +41,6 @@
   shell: iptables-restore
   become: true
   become_user: root
-  when: rule_changed | changed
+  when: rule_changed.changed
 
 # make sure ip forwarding is enabled
diff --git a/roles/nfs-client/handlers/main.yml b/roles/nfs-client/handlers/main.yml
index 76b3a6a028b2f97a38890188dbd0300056fd5d91..b05f9fbf8929597fb98eca15a29a52f843b32e65 100644
--- a/roles/nfs-client/handlers/main.yml
+++ b/roles/nfs-client/handlers/main.yml
@@ -1,9 +1,9 @@
 ---
 - name: restart rpcbind
   service: name=rpcbind state=restarted
-  sudo: true
+  become: true
 
 - name: restart idmap 
   service: name=rpcidmapd state=restarted
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat" and ansible_distribution_major_version < 7
diff --git a/roles/nfs-client/tasks/main.yml b/roles/nfs-client/tasks/main.yml
index b08ac76f7fb9f047f97ce6cfe609e8d4d7b46c5a..1a3ea5fd54a102c95ec8276b8e59d6187f19ac7d 100644
--- a/roles/nfs-client/tasks/main.yml
+++ b/roles/nfs-client/tasks/main.yml
@@ -1,15 +1,16 @@
 ---
 - name: install dependencies
   yum: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items:
     - libnfsidmap 
     - nfs-utils
+    - nfstest.noarch
   when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "7"
 
 - name: install dependencies
   yum: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items:
     - nfs-utils-lib
   when: ansible_os_family == "RedHat" and ansible_distribution_major_version < "7"
diff --git a/roles/nfs-client/tasks/mountFileSystem.yml b/roles/nfs-client/tasks/mountFileSystem.yml
index 80d53d356deaace73a106d551aacc32b54adbac6..80dc3cb332385fb6154fdef6ded63ca748a47689 100644
--- a/roles/nfs-client/tasks/mountFileSystem.yml
+++ b/roles/nfs-client/tasks/mountFileSystem.yml
@@ -3,7 +3,7 @@
 - name: "Mounting NFS mounts"
   mount: name={{ item.name }} src="{{ item.ipv4 }}:{{ item.src }}" fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
   with_items: "{{ nfsMounts }}"
-  sudo: true 
+  become: true 
   ignore_errors: true
   register: firstMount
   when: nfsMounts is defined
diff --git a/roles/nfs-common/tasks/aptPackages.yml b/roles/nfs-common/tasks/aptPackages.yml
index 3244c7d1890613902b95c7f572059d905619b79e..d8e07d6195e9f1012970c375cc8b6c5c236570cc 100644
--- a/roles/nfs-common/tasks/aptPackages.yml
+++ b/roles/nfs-common/tasks/aptPackages.yml
@@ -5,5 +5,5 @@
     - nfs-common
     - nfs-kernel-server
   apt: "name={{ item }} state=present"
-  sudo: true
+  become: true
 
diff --git a/roles/nfs-common/tasks/main.yml b/roles/nfs-common/tasks/main.yml
index 5f42cf171289e08d8787e24aa7017505f0050a4f..2595b4dc657fa6d2336158b2dcb9e6c6d4b88785 100644
--- a/roles/nfs-common/tasks/main.yml
+++ b/roles/nfs-common/tasks/main.yml
@@ -7,4 +7,4 @@
 
 - name: setup idmap.conf
   template: src=idmapd.conf.j2 dest=/etc/idmapd.conf
-  sudo: true
+  become: true
diff --git a/roles/nfs-common/tasks/yumPackages.yml b/roles/nfs-common/tasks/yumPackages.yml
index 36da380ff649f49f291e153a26e352651111b4c3..6a8fd12b59d258f9ad021653350345729ffcb2e2 100644
--- a/roles/nfs-common/tasks/yumPackages.yml
+++ b/roles/nfs-common/tasks/yumPackages.yml
@@ -5,4 +5,4 @@
     - bind-utils
     - nfs-utils
   yum: "name={{ item }} state=present"
-  sudo: true
+  become: true
diff --git a/roles/nfs-server/tasks/main.yml b/roles/nfs-server/tasks/main.yml
index c317d2ae0b5fe6668f288ec074ae9f8dc0eff25c..f8f3a10aff29b79ae5dfaea5ac8c6a60ea5594e9 100644
--- a/roles/nfs-server/tasks/main.yml
+++ b/roles/nfs-server/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 - name: install nfs kernel server
   apt: name=nfs-kernel-server state=present
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
 
 - include: startServer.yml
diff --git a/roles/nfs-server/tasks/startServer.yml b/roles/nfs-server/tasks/startServer.yml
index 036058d3cec5f88a899f2afbdd5c9359f9564fcd..7ac79c0fa9ad39b43463dc2a3c4f6e8b2f1e0304 100644
--- a/roles/nfs-server/tasks/startServer.yml
+++ b/roles/nfs-server/tasks/startServer.yml
@@ -1,7 +1,7 @@
 --- 
 - name: "Create exports if necessary"
   file: dest={{ item.src }} state=directory mode=755 owner=root group=root
-  sudo: true
+  become: true
   with_items: "{{ exportList }}"
   ignore_errors: true
 
@@ -12,24 +12,24 @@
 
 - name: "Starting rpcbind"
   service: "name=rpcbind state=started enabled=true"
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 - name: "Start the Server"
   service: "name=nfs state=started"
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 - name: "Enable the Server at boot"
   service: "name=nfs enabled=true"
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"  and ansible_distribution_major_version < "7"
 - name: "Enable the Server at boot"
   service: "name=nfs-server.service enabled=true"
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"  and ansible_distribution_major_version == "7"
 
 - name: "Start the Server"
   service: "name=nfs-kernel-server state=started enabled=true"
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
diff --git a/roles/nhc/tasks/main.yml b/roles/nhc/tasks/main.yml
index 6b0aeb736af495424674e093b4111882614e45f2..fdc90302e8744e719ec7713a94ae5a4ce3d5ca21 100644
--- a/roles/nhc/tasks/main.yml
+++ b/roles/nhc/tasks/main.yml
@@ -33,22 +33,22 @@
 
 - name: install nhc 
   shell: make install
-  sudo: true
+  become: true
   args:
       chdir: /tmp/nhc-{{ nhc_version }}
   when: not nhc_binary.stat.exists
 
 - name: ensure sysconfig dir exists
   file: dest=/etc/sysconfig state=directory owner=root group=root mode=755
-  sudo: true
+  become: true
 
 - name: copy nhc sysconfig script
   template: dest=/etc/sysconfig/nhc src=nhc.sysconfig.j2 mode=644
-  sudo: true
+  become: true
 
 - name: copy nhc log rotate script
   template: dest=/etc/logrotate.d/nhc src=nhclog.j2 mode=644
-  sudo: true
+  become: true
 
 - name: install nhc config file
   copy: src=nhc.conf dest={{ nhc_dir }}/etc/nhc/{{ nhc_config_file }}
diff --git a/roles/openssh/handlers/main.yml b/roles/openssh/handlers/main.yml
index 484369b0bda79ad51a5cd93a13319259d25a6949..145dee27d85f4629d6472c67dec385c45a5835be 100644
--- a/roles/openssh/handlers/main.yml
+++ b/roles/openssh/handlers/main.yml
@@ -1,4 +1,4 @@
 ---
   - name: restart openssh 
     service: name={{ sshd_name }} enabled=yes state=restarted
-    sudo: true
+    become: true
diff --git a/roles/openssh/tasks/installSsh.yml b/roles/openssh/tasks/installSsh.yml
index 3dd138834575ec3066e2bdf795bb74b65a3e4a48..7911f9fad6f0d266c96cc599bf260da80d6aae27 100644
--- a/roles/openssh/tasks/installSsh.yml
+++ b/roles/openssh/tasks/installSsh.yml
@@ -1,6 +1,6 @@
 - name: install deps
   apt: name={{ item }} state=installed update_cache=yes
-  sudo: true
+  become: true
   with_items:
     - gcc 
     - make 
@@ -29,19 +29,19 @@
 
 - name: install ssh 
   shell: make install
-  sudo: true
+  become: true
   args:
     chdir: /tmp/openssh-{{ ssh_version }}
     creates: "{{ ssh_dir }}/bin/ssh"
 
 - name: copy init script
   template: dest=/etc/init.d/{{ sshd_name }} src=ssh.initd.centos.j2 mode=755
-  sudo: true   
+  become: true   
   when: ansible_os_family == "RedHat"
 
 - name: copy init script
   template: dest=/etc/init.d/{{ sshd_name }} src=ssh.initd.debian.j2 mode=755
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
 
 - name: check config file
@@ -51,7 +51,7 @@
 
 - name: create config file link
   file: src={{ ssh_dir }}/etc dest=/etc/ssh state=link force=yes
-  sudo: true
+  become: true
   when: ssh_config.stat.exists == false
 
 - name: edit config file replace"
@@ -61,7 +61,7 @@
     - {line: "HostKey /etc/ssh/ssh_host_dsa_key", regexp: "^#HostKey /etc/ssh/ssh_host_dsa_key"}
     - {line: "#GSSAPIAuthentication yes", regexp: "^GSSAPIAuthentication yes"}
     - {line: "#GSSAPICleanupCredentials yes", regexp: "^GSSAPICleanupCredentials yes"}
-  sudo: true
+  become: true
 
 - name: edit config file attache lines"
   lineinfile: dest=/etc/ssh/sshd_config line="{{ item }}" 
@@ -69,5 +69,5 @@
     - "KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha1,diffie-hellman-group-exchange-sha1,diffie-hellman-group1-sha1"
     - "Ciphers aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com,aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc,aes192-cbc,aes256-cbc,arcfour"
   notify: restart openssh
-  sudo: true
+  become: true
   
diff --git a/roles/opensslCA/tasks/main.yml b/roles/opensslCA/tasks/main.yml
index 3655359c3c5323e8e417fc4cbea632b470ca3286..854bfb0368d3667ec98cd9454dfd9b24b9058a37 100644
--- a/roles/opensslCA/tasks/main.yml
+++ b/roles/opensslCA/tasks/main.yml
@@ -1,37 +1,37 @@
 ---
 - name : make ca dir
   file: path={{ x509cadir }} owner=root group=root state=directory
-  sudo: true
+  become: true
 
 - name : make newcerts dir
   file: path={{ x509cadir }}/newcerts owner=root group=root state=directory
-  sudo: true
+  become: true
 
 - name : make private dir
   file: path={{ x509cadir }}/private mode=700 owner=root group=root state=directory
-  sudo: true
+  become: true
 
 - name: initialise ca
   shell: echo 01 > serial ; touch index.txt
   args: 
     chdir: "{{ x509cadir }}"
     creates: index.txt
-  sudo: true
+  become: true
 
 - name: template openssl.cnf
   template: dest={{ x509cadir }}/openssl.cnf src=openssl_cnf.j2
-  sudo: true
+  become: true
 
 - name: generate key
   shell: openssl genrsa -out private/cakey.pem 2048
   args:
     chdir: "{{ x509cadir }}"
     creates: private/cakey.pem
-  sudo: true
+  become: true
 
 - name: generate cert
   shell: openssl req -new -x509 -key private/cakey.pem -out cacert.pem -days 3650 -config openssl.cnf
   args:
     chdir: "{{ x509cadir }}"
     creates: cacert.pem
-  sudo: true
+  become: true
diff --git a/roles/opensslServer/tasks/main.yml b/roles/opensslServer/tasks/main.yml
index d49e1a770e7fc29859a9f7bc5ecb30849ad44de8..75a0e782038812bef054146f85f43cedba45198a 100644
--- a/roles/opensslServer/tasks/main.yml
+++ b/roles/opensslServer/tasks/main.yml
@@ -2,32 +2,32 @@
 
 - name: install system packages apt
   apt: name=openssl state=installed update_cache=true
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
 
 - name: install system packages yum
   yum: name=openssl state=installed
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 - name : make csr dir
   file: path={{ csrdir }} owner=root group=root state=directory
-  sudo: true
+  become: true
 
 - name : make private dir
   file: path={{ csrdir }}/private mode=700 owner=root group=root state=directory
-  sudo: true
+  become: true
 
 - name: template openssl.cnf
   template: dest={{ csrdir }}/openssl.cnf src=openssl_cnf.j2
-  sudo: true
+  become: true
 
 - name: generate key
   shell: openssl genrsa -out private/key.pem 2048
   args:
     chdir: "{{ csrdir }}"
     creates: private/key.pem
-  sudo: true
+  become: true
   register: needCert
 
 - name: generate csr
@@ -35,7 +35,7 @@
   args:
     chdir: "{{ csrdir }}"
     creates: "{{ certname }}.csr"
-  sudo: true
+  become: true
   when: needCert|changed
 
 #
@@ -64,7 +64,7 @@
 
 - name: sign certs
   shell: yes | openssl ca -config {{ cadir }}/openssl.cnf -days 3650 -in /tmp/{{ certname }}.csr -out /tmp/{{ certname }}.cert
-  sudo: true
+  become: true
   delegate_to: "{{ cahost }}"
   when: needCert|changed
 
diff --git a/roles/pam_slurm/tasks/main.yml b/roles/pam_slurm/tasks/main.yml
deleted file mode 100644
index 8a13ab12a174e0fbb953a12ffcdaae7eccf8a060..0000000000000000000000000000000000000000
--- a/roles/pam_slurm/tasks/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: "Copy access.conf"
-  template: src=access.conf.j2 dest=/etc/security/access.conf
-  become: true
-  become_user: root
-
-- name: "Copy password sshd pam config"
-  template: src=sshd.j2 dest=/etc/pam.d/sshd
-  become: true
-  become_user: root
-
diff --git a/roles/pam_sshd/README.md b/roles/pam_sshd/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f4546b473b4943f5a5b484d3868fe739ffd004f
--- /dev/null
+++ b/roles/pam_sshd/README.md
@@ -0,0 +1,9 @@
+Install an sshd PAM config definition
+
+we leverage pam_access to ensure that the ec2-user and members of the systems group and always login
+
+we use nologin on the login nodes during maintaince to retrict user login
+
+we use pam_slurm_adopt on the compute nodes so that only users with running jobs can login a given node.
+
+default is to configure as a login node. Use the variable computenodepam to config as a compute node (i.e. enable pam_slurm_adopt)
diff --git a/roles/pam_sshd/tasks/main.yml b/roles/pam_sshd/tasks/main.yml
index c445a9267c2fafc48cbdfa4bec31a1c455e7575d..25e9b257afaf38a05b7d675ae2556038ae84bd45 100644
--- a/roles/pam_sshd/tasks/main.yml
+++ b/roles/pam_sshd/tasks/main.yml
@@ -1,5 +1,18 @@
+---
+- name: "Copy access.conf"
+  template: src=access.conf.j2 dest=/etc/security/access.conf
+  become: true
+  become_user: root
+
 - name: "Copy password sshd pam config"
-  template: src=sshd.j2 dest=/etc/pam.d/sshd
+  template: src=loginnodes_sshd.j2 dest=/etc/pam.d/sshd
   become: true
   become_user: root
+  when: computenodepam is undefined or not computenodepam 
+
 
+- name: "Copy password sshd pam config"
+  template: src=computenodes_sshd.j2 dest=/etc/pam.d/sshd
+  become: true
+  become_user: root
+  when: computenodepam is defined and computenodepam 
diff --git a/roles/pam_slurm/templates/access.conf.j2 b/roles/pam_sshd/templates/access.conf.j2
similarity index 100%
rename from roles/pam_slurm/templates/access.conf.j2
rename to roles/pam_sshd/templates/access.conf.j2
diff --git a/roles/pam_slurm/templates/sshd.j2 b/roles/pam_sshd/templates/computenodes_sshd.j2
similarity index 100%
rename from roles/pam_slurm/templates/sshd.j2
rename to roles/pam_sshd/templates/computenodes_sshd.j2
diff --git a/roles/pam_sshd/templates/sshd.j2 b/roles/pam_sshd/templates/loginnodes_sshd.j2
similarity index 91%
rename from roles/pam_sshd/templates/sshd.j2
rename to roles/pam_sshd/templates/loginnodes_sshd.j2
index 0b73a8cf8b40633aab0a55f2be817562d6eb0391..b22b0bbf48e20d017775386ebe213732c954b612 100644
--- a/roles/pam_sshd/templates/sshd.j2
+++ b/roles/pam_sshd/templates/loginnodes_sshd.j2
@@ -4,7 +4,7 @@ auth       substack     password-auth
 auth       include      postlogin
 # Used with polkit to reauthorize users in remote sessions
 -auth      optional     pam_reauthorize.so prepare
-account [success=1 default=ignore] pam_succeed_if.so quiet user ingroup systems
+account    sufficient   pam_access.so
 account    required     pam_nologin.so
 account    include      password-auth
 password   include      password-auth
diff --git a/roles/postfix/handlers/main.yml b/roles/postfix/handlers/main.yml
index be706e39ee3209bb6bc551cafbf34bf6ad3eff5d..5b2be226ccd78907d173297c6e4dc49ed5a02d8e 100644
--- a/roles/postfix/handlers/main.yml
+++ b/roles/postfix/handlers/main.yml
@@ -1,4 +1,4 @@
 ---
 - name: restart postfix 
   service: name=postfix state=restarted
-  sudo: true
+  become: true
diff --git a/roles/postfix/tasks/main.yml b/roles/postfix/tasks/main.yml
index 5c2345c380956921a6ac87e18c0e2a59dea486c1..10dd2f7f72a3c330aa516200f5ddf7a741aeac8e 100644
--- a/roles/postfix/tasks/main.yml
+++ b/roles/postfix/tasks/main.yml
@@ -1,15 +1,15 @@
 ---
 - name: install postfix
   apt: name=postfix state=present
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
 
 - name: configure postfix
   template: src=main_cf.j2 dest=/etc/postfix/main.cf
-  sudo: true
+  become: true
   notify: restart postfix
 
 - name: start postfix 
   service: name=postfix state=started
-  sudo: true
+  become: true
 
diff --git a/roles/provision/tasks/main.yml b/roles/provision/tasks/main.yml
index c5d13aadd3c4aea253aaec4c556f65acdfe7d5a9..adbf87ad770d261fcce8690466e2e40d1d78871e 100644
--- a/roles/provision/tasks/main.yml
+++ b/roles/provision/tasks/main.yml
@@ -1,12 +1,12 @@
 ---
 - name: make  dir
   file: path="{{ provision | dirname }}" state=directory mode=755 owner=root
-  sudo: true
+  become: true
 
 - name: copy provision template 
   template: src=provision.sh.j2 dest={{ provision }} mode=755 owner=root
-  sudo: true
+  become: true
 
 - name: provision cron job 
   cron: name=provision job="/usr/bin/flock -x -n /tmp/provision.lck -c {{ provision }}" user=root minute=*/30 state=present
-  sudo: true
+  become: true
diff --git a/roles/provision_homedir/tasks/main.yml b/roles/provision_homedir/tasks/main.yml
index b9cbf86503c9d84fcf0053234cc55241c1c980f4..509d2ca9187f269f7636b9cc902819a8ea21f7ba 100644
--- a/roles/provision_homedir/tasks/main.yml
+++ b/roles/provision_homedir/tasks/main.yml
@@ -1,23 +1,23 @@
 ---
 - name: make  dir
   file: path="{{ provision_homedir | dirname }}" state=directory mode=755 owner=root
-  sudo: true
+  become: true
 
 - name: install python packages
   yum: name=python-ldap state=installed
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 - name: install python packages
   apt: name=python-ldap state=installed
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
 
 - name: copy provision_homedir template 
   template: src=provision_homedir.py.j2 dest={{ provision_homedir }} mode=700 owner=root
-  sudo: true
+  become: true
 
 # the lockfile for makeing home directories should be located on the shared directory where the home directories will be created. Otherwise it will be racey
 - name: provision_homedir cron job 
   cron: name=provision_homedir job="/usr/bin/flock -x -n {{ mnthome }}/home/provision.lck -c {{ provision_homedir }}" user=root minute=*/15 state=present
-  sudo: true
+  become: true
diff --git a/roles/provision_slurm/tasks/main.yml b/roles/provision_slurm/tasks/main.yml
index 807963771e0736ad81bf1451d97db9467ceeeb71..d93254c8711a5d79a17b516ae5143c199177ec98 100644
--- a/roles/provision_slurm/tasks/main.yml
+++ b/roles/provision_slurm/tasks/main.yml
@@ -1,25 +1,25 @@
 ---
 - name: make  dir
   file: path="{{ provision_slurm | dirname }}" state=directory mode=755 owner=root
-  sudo: true
+  become: true
   run_once: true
 
 - name: install python packages
   yum: name=python-ldap state=installed
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 - name: install python packages
   apt: name=python-ldap state=installed
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
 
 - name: copy provision_slurm template 
   template: src=provision_slurm.py.j2 dest={{ provision_slurm }} mode=700 owner=root
-  sudo: true
+  become: true
   run_once: true
 
 # the lockfile for makeing home directories should be located on the shared directory where the home directories will be created. Otherwise it will be racey
 - name: provision_slurm cron job 
   cron: name=provision_slurm job="/usr/bin/flock -x -n {{ lockpath }}/slurm_provision.lck -c {{ provision_slurm }}" user=root minute=*/20 state=present
-  sudo: true
+  become: true
diff --git a/roles/rsyslog_client/tasks/main.yml b/roles/rsyslog_client/tasks/main.yml
index 9b087381192f7818bd9a61467dea29614dab0ac7..eed6753ae301b63d5b9236c1b889f122e31b6cc4 100644
--- a/roles/rsyslog_client/tasks/main.yml
+++ b/roles/rsyslog_client/tasks/main.yml
@@ -7,7 +7,7 @@
   when: ansible_os_family == 'RedHat'
 
 - name: install rsyslog
-  yum: name=rsyslog state=installed
+  apt: name=rsyslog state=installed
   become: true
   become_user: root
   when: ansible_os_family == 'Debian'
diff --git a/roles/rsyslog_server/tasks/main.yml b/roles/rsyslog_server/tasks/main.yml
index 2aec4a9c6688a96994edb563b14c15b017cd599e..b7f54ec5a38be54cf969289f9fc7859fd75ea047 100644
--- a/roles/rsyslog_server/tasks/main.yml
+++ b/roles/rsyslog_server/tasks/main.yml
@@ -22,4 +22,4 @@
   service: name=rsyslog state=restarted
   become: true
   become_user: root
-  when: config_changed | changed
+  when: config_changed.changed
diff --git a/roles/set_timezone/README.md b/roles/set_timezone/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..43cb094b917b7d2cab5ec357b4de67fa9496ba7c
--- /dev/null
+++ b/roles/set_timezone/README.md
@@ -0,0 +1,13 @@
+This role sets the timezone on the desired server.
+- installs a templated file into /etc/ntp.conf. The variable NTP_SERVER sets the ntp server
+  - NTP_SERVER defaults to ntp.monash.edu.au
+- starts and enables the ntpd process
+- Makes a link from /etc/localtime state=link to path defined by Variable TIMEZONE_PATH
+  - TIMEZONE_PATH  defaults to /usr/share/zoneinfo/Australia/Melbourne
+
+
+Example of use
+- { role: set_timezone } #sets to Melbourne time
+- { role: set_timezone, TIMEZONE_PATH: "/usr/share/zoneinfo/Australia/Perth" } #sets to Perth time
+- { role: set_timezone, TIMEZONE_PATH: "/usr/share/zoneinfo/Australia/Perth", NTP_SERVER: "time.google.com" } #sets to Perth time and using google ntp server
+
diff --git a/roles/set_timezone/tasks/main.yml b/roles/set_timezone/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..177969103af146ee970584e774bf2d4731209e77
--- /dev/null
+++ b/roles/set_timezone/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+- name: install ntp.conf
+  template: src=ntp.conf.j2 dest=/etc/ntp.conf mode=644 owner=root group=root
+  become: true
+  become_user: root
+
+- name: restart ntpd
+  service: name=ntpd state=restarted 
+  become: true
+  become_user: root
+
+- name: ensure ntpd is enabled and started   
+  service: name=ntpd state=started enabled=yes   
+  become: true   
+  become_user: root
+
+- name: set local timezone
+  file: path=/etc/localtime state=link src={{ TIMEZONE_PATH }}
+  become: true
+  become_user: root
diff --git a/roles/set_timezone/templates/ntp.conf.j2 b/roles/set_timezone/templates/ntp.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..2717f9824e405f016299c2c292aeafd6dfe1cccb
--- /dev/null
+++ b/roles/set_timezone/templates/ntp.conf.j2
@@ -0,0 +1,55 @@
+# For more information about this file, see the man pages
+# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
+
+driftfile /var/lib/ntp/drift
+
+# Permit time synchronization with our time source, but do not
+# permit the source to query or modify the service on this system.
+restrict default nomodify notrap nopeer noquery
+
+# Permit all access over the loopback interface.  This could
+# be tightened as well, but to do so would effect some of
+# the administrative functions.
+restrict 127.0.0.1 
+restrict ::1
+
+# Hosts on local network are less restricted.
+#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
+
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+server {{ NTP_SERVER }}
+
+#broadcast 192.168.1.255 autokey	# broadcast server
+#broadcastclient			# broadcast client
+#broadcast 224.0.1.1 autokey		# multicast server
+#multicastclient 224.0.1.1		# multicast client
+#manycastserver 239.255.254.254		# manycast server
+#manycastclient 239.255.254.254 autokey # manycast client
+
+# Enable public key cryptography.
+#crypto
+
+includefile /etc/ntp/crypto/pw
+
+# Key file containing the keys and key identifiers used when operating
+# with symmetric key cryptography. 
+keys /etc/ntp/keys
+
+# Specify the key identifiers which are trusted.
+#trustedkey 4 8 42
+
+# Specify the key identifier to use with the ntpdc utility.
+#requestkey 8
+
+# Specify the key identifier to use with the ntpq utility.
+#controlkey 8
+
+# Enable writing of statistics records.
+#statistics clockstats cryptostats loopstats peerstats
+
+# Disable the monitoring facility to prevent amplification attacks using ntpdc
+# monlist command when default restrict does not include the noquery flag. See
+# CVE-2013-5211 for more details.
+# Note: Monitoring will not be disabled with the limited restriction flag.
+disable monitor
diff --git a/roles/set_timezone/vars/main.yml b/roles/set_timezone/vars/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..65f16b25f13e746fe3185f2f9789cf09367b79ae
--- /dev/null
+++ b/roles/set_timezone/vars/main.yml
@@ -0,0 +1,3 @@
+---
+TIMEZONE_PATH: "/usr/share/zoneinfo/Australia/Melbourne"
+NTP_SERVER: "ntp.monash.edu.au"
diff --git a/roles/shibboleth-sp/handlers/main.yml b/roles/shibboleth-sp/handlers/main.yml
index 0f9ae520be01b1ee5873911ecc1a3175a69aa21d..3070cd7df33df6b30af18596f14df572e6549abf 100644
--- a/roles/shibboleth-sp/handlers/main.yml
+++ b/roles/shibboleth-sp/handlers/main.yml
@@ -1,9 +1,9 @@
 ---
 - name: "Restarting Apache"  
   service: name=apache2 state=restarted
-  sudo: true
+  become: true
 
 - name: "Restarting shibboleth"  
   service: name=shibd state=restarted
-  sudo: true
+  become: true
 
diff --git a/roles/shibboleth-sp/tasks/shibbolethConfig.yml b/roles/shibboleth-sp/tasks/shibbolethConfig.yml
index d94f5dc70053db42194783b1145e67f5b508b141..3f08102b338829bc68abf5a24456d88ba9202199 100644
--- a/roles/shibboleth-sp/tasks/shibbolethConfig.yml
+++ b/roles/shibboleth-sp/tasks/shibbolethConfig.yml
@@ -3,11 +3,11 @@
 # name: "Copying the shibboleth files"
 # template: src=files/{{ item.src }} dest="{{ item.dest }}" mode=0644
 # with_items: shibboleth_file
-# sudo: true
+# become: true
 
 - 
   name: "Setting shibboleth2.xml sp.example.org"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -23,11 +23,11 @@
    regexp: '^(\s*)<SSO entityID="https://idp.example.org/idp/shibboleth"'
    line: '\1<SSO'
    backrefs: true
- sudo: true
+ become: true
 
 - 
   name: "Setting shibboleth2.xml handlerSSL"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -37,7 +37,7 @@
 
 - 
   name: "Setting shibboleth2.xml supportContact"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -47,7 +47,7 @@
 
 - 
   name: "Enabling MetadataProvider"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -56,7 +56,7 @@
    
 - 
   name: "Enabling MetadataProvider"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -64,7 +64,7 @@
    replace: '</MetadataProvider>'
 - 
   name: "Setting shibboleth2.xml Federation URI"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -73,7 +73,7 @@
    
 - 
   name: "Setting shibboleth2.xml backingFilePath"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -82,11 +82,11 @@
    
 - name: copy AAF metadata cert
   copy: src=files/{{ shib_metadata_cert }} dest=/etc/shibboleth/aaf-metadata-cert.pem mode=644
-  sudo: true
+  become: true
 
 
 - name: "Setting shibboleth2.xml aaf Certificate"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -95,7 +95,7 @@
    
 - 
   name: "Setting shibboleth2.xml AAF Discovery URL"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -104,23 +104,23 @@
    
 - name: make shib private directory
   file: path=/etc/shibboleth/private state=directory mode=700 owner="_shibd"
-  sudo: true
+  become: true
 
 - name: copy shib key
-  sudo: true
+  become: true
   copy: src=files/{{ shib_key }} dest=/etc/shibboleth/private/{{ shib_key }} owner="_shibd" mode=600
 
 - name: make shib certs directory
   file: path=/etc/shibboleth/certs state=directory mode=755 owner="_shibd"
-  sudo: true
+  become: true
 
 - name: copy shib cert
-  sudo: true
+  become: true
   copy: src=files/{{ shib_cert }} dest=/etc/shibboleth/certs/{{ shib_cert }} owner="_shibd" mode=644
 
 - 
   name: "Setting shibboleth2.xml Credential Resolver"
-  sudo: true
+  become: true
   replace: 
   args:
    dest: /etc/shibboleth/shibboleth2.xml 
@@ -129,7 +129,7 @@
 
 -
  name: "Templating attribute-map.xml"
- sudo: true
+ become: true
  template:
  args:
    src: attribute-map.xml.j2
@@ -139,5 +139,5 @@
    - Restarting shibboleth
 -
  name: "Starting shibboleth"
- sudo: true
+ become: true
  service: name=shibd state=started enabled=yes
diff --git a/roles/shibboleth-sp/tasks/shibbolethPrerequisites.yml b/roles/shibboleth-sp/tasks/shibbolethPrerequisites.yml
index f9e46165096e2fb9a3d5495ce41b998e99e78fab..fc2b0d411caa600dd391dd6e037d083f6b53ef85 100644
--- a/roles/shibboleth-sp/tasks/shibbolethPrerequisites.yml
+++ b/roles/shibboleth-sp/tasks/shibbolethPrerequisites.yml
@@ -1,7 +1,7 @@
 ---
 -
  name: Install base packages - Debian
- sudo: true
+ become: true
  apt: name={{ item }} state=present
  with_items:
   - shibboleth-sp2-schemas 
diff --git a/roles/slurm-common/handlers/main.yml b/roles/slurm-common/handlers/main.yml
index 57f0cb12be532609f9884170cf7aeadf7be037f9..b57d5bf21738f9ab035743b94a66e2225c56f3e4 100644
--- a/roles/slurm-common/handlers/main.yml
+++ b/roles/slurm-common/handlers/main.yml
@@ -1,18 +1,18 @@
 ---
   - name: restart munge
     service: name=munge state=restarted
-    sudo: true
+    become: true
 
   - name: restart slurm
     service: name=slurm state=restarted
-    sudo: true
+    become: true
 
   - name: restart slurmdbd
     service: name=slurmdbd state=restarted
-    sudo: true
+    become: true
 
   - name: scontrol reconfigure
     shell: sleep 10 ; scontrol reconfigure
-    sudo: true
+    become: true
     delegate_to: "{{ slurmctrl }}"
     run_once: true
diff --git a/roles/slurm-common/tasks/createSlurmDirectories.yml b/roles/slurm-common/tasks/createSlurmDirectories.yml
index 295aeadf3e1655c74e778d0b1263d5bc446757bb..738956823167ca062efe85940774a45c9a547423 100644
--- a/roles/slurm-common/tasks/createSlurmDirectories.yml
+++ b/roles/slurm-common/tasks/createSlurmDirectories.yml
@@ -1,15 +1,15 @@
 ---
 - name: make sure slurmctld and slurmdb log dir exists
   file: dest=/mnt/slurm-logs state=directory owner=root group=root mode=755
-  sudo: true
+  become: true
 
 - name: make sure slurm conf dir exists
   file: dest={{ slurm_dir }}/etc state=directory
-  sudo: true
+  become: true
 
 - name: make sure slurm lock dir exists
   file: dest=/var/lock/subsys state=directory owner=root group=root mode=755
-  sudo: true
+  become: true
 
 - name: stat run directory
   stat: path={{ slurmdatadir }} 
@@ -20,7 +20,7 @@
 
 - name: create data directory
   file: path={{ slurmdatadir }} state=directory owner=slurm group=slurm mode=755
-  sudo: true
+  become: true
   when: slurmdatadir is defined and not runstat.stat.exists
       
 - name: stat pid directory
@@ -32,23 +32,27 @@
 
 - name: create pid directory
   file: path={{ slurmpiddir }} state=directory owner=slurm group=slurm mode=755
-  sudo: true
+  become: true
   when: slurmpiddir is defined and not pidstat.stat.exists
 
+- name: create slurmdbdpiddir directory
+  file: path={{ slurmdbdpiddir }} state=directory owner=slurm group=slurm mode=755
+  become: true
+
 - name: create shared state directory
   file: path={{slurmsharedstatedir }} state=directory owner=slurm group=slurm mode=750
-  sudo: true
+  become: true
   run_once: true
   when: usesharedstatedir is defined and usesharedstatedir
 
 - name: symlink shared state dir
   file: path={{ slurmstatedir }} src={{ slurmsharedstatedir }} state=link
-  sudo: true
+  become: true
   when: usesharedstatedir is defined and usesharedstatedir
 
 - name: create state directory
   file: path={{ slurmstatedir }} state=directory owner=slurm group=slurm mode=750
-  sudo: true
+  become: true
   when: slurmstatedir is defined  and not usesharedstatedir
 
 - name: stat log directory
@@ -60,14 +64,14 @@
 
 - name: create log directory
   file: path={{ slurmlogdir }} state=directory owner=slurm group=slurm mode=750
-  sudo: true
+  become: true
   when: slurmlogdir is defined and not logstat.stat.exists
 
 - name: make sure slurm conf dir exists
   file: dest={{ slurm_dir }}/etc state=directory
-  sudo: true
+  become: true
 
 - name: create greps directory
   file: path={{ slurm_dir }}/etc/gres state=directory owner=slurm group=slurm mode=755
-  sudo: true
+  become: true
 
diff --git a/roles/slurm-common/tasks/installMungeFromSource.yml b/roles/slurm-common/tasks/installMungeFromSource.yml
index f2cbd81cc64d5ec89e07cf4eedb74156f4b7bce8..656d35c9ff04a253224e44c9031e2c37c67c777e 100644
--- a/roles/slurm-common/tasks/installMungeFromSource.yml
+++ b/roles/slurm-common/tasks/installMungeFromSource.yml
@@ -21,7 +21,7 @@
 
 - name: install munge
   shell: make install
-  sudo: true
+  become: true
   args:
     chdir: /tmp/munge-{{ munge_version }}
     creates: "{{ munge_dir }}/bin/munge"
@@ -34,16 +34,16 @@
 
 - name: copy init script
   template: dest=/etc/init.d/munge src=munge.initd.j2 mode=755
-  sudo: true
+  become: true
   register: systemd_script_installed
   when: use_systemd is not defined
 
 - name:  copy slurm init script if OS contains systemd
   template: dest=/etc/systemd/system/munge.service src=munge.service.j2 mode=644
-  sudo: true
+  become: true
   when: use_systemd is defined
 
 - name: reload systemd
   shell: systemctl daemon-reload
-  sudo: true
-  when: use_systemd is defined and systemd_script_installed|changed
+  become: true
+  when: use_systemd is defined and systemd_script_installed.changed
diff --git a/roles/slurm-common/tasks/installSlurmFromSource.yml b/roles/slurm-common/tasks/installSlurmFromSource.yml
index c1c7794f6d26dc78413edf578e0007a83d2ae116..9d1a326c634ede300ccbe6571b6123b88903cf50 100644
--- a/roles/slurm-common/tasks/installSlurmFromSource.yml
+++ b/roles/slurm-common/tasks/installSlurmFromSource.yml
@@ -1,11 +1,15 @@
 - name: remove all install
-  shell: rm -rf /tmp/slurm-{{ slurm_version }}
-  sudo: true 
+  file:
+    path: "/tmp/slurm-{{ slurm_version }}"
+    state: absent
+  become: true
   when: force_slurm_recompile is defined
 
 - name: remove all install
-  shell: rm -rf {{ slurm_dir }}
-  sudo: true 
+  file:
+    path: "{{ slurm_dir }}"
+    state: absent
+  become: true 
   when: force_slurm_recompile is defined
 
 - name: unarchive slurm
@@ -19,9 +23,6 @@
   stat: path="{{ slurm_dir }}/bin/srun"
   register: stat_srun
 
-- name: stat pam_slurm_adopt
-  stat: path="/lib64/security/pam_slurm_adopt.so"
-  register: stat_pam_slurm_adopt
 
 - name: configure slurm
   command: /tmp/slurm-{{ slurm_version }}/configure --prefix={{ slurm_dir }} --with-munge={{ munge_dir }} --enable-pam --with-pmix=/usr/local/pmix/latest
@@ -39,7 +40,7 @@
 
 - name: install slurm
   shell: make install
-  sudo: true
+  become: true
   args:
     chdir: /tmp/slurm-{{ slurm_version }}
     creates: "{{ slurm_dir }}/bin/srun"
@@ -53,7 +54,7 @@
 
 - name: install pmi
   shell: make install
-  sudo: true
+  become: true
   args:
     chdir: /tmp/slurm-{{ slurm_version }}/contribs/pmi
   when: force_slurm_recompile is defined or not stat_srun.stat.exists
@@ -66,7 +67,7 @@
 
 - name: install pmi2
   shell: make install
-  sudo: true
+  become: true
   args:
     chdir: /tmp/slurm-{{ slurm_version }}/contribs/pmi2
   when: force_slurm_recompile is defined or not stat_srun.stat.exists
@@ -79,7 +80,7 @@
 
 - name: install pam_slurm
   shell: make install
-  sudo: true
+  become: true
   args:
     chdir: /tmp/slurm-{{ slurm_version }}/contribs/pam
   when: force_slurm_recompile is defined or not stat_srun.stat.exists
@@ -87,25 +88,30 @@
 - name: build pam_slurm_adopt
   make:
     chdir: /tmp/slurm-{{ slurm_version }}/contribs/pam_slurm_adopt
-  when: force_slurm_recompile is defined or not stat_pam_slurm_adopt.stat.exists
+  when: force_slurm_recompile is defined or not stat_srun.stat.exists
 
 - name: install pam_slurm_adopt
   make:
     chdir: /tmp/slurm-{{ slurm_version }}/contribs/pam_slurm_adopt
     target: install
-  when: force_slurm_recompile is defined or not stat_pam_slurm_adopt.stat.exists
-  sudo: true
+  when: force_slurm_recompile is defined or not stat_srun.stat.exists
+  become: true
 
 - name: remove exist-slurm-latest-link
-  shell: rm -f  /opt/slurm-latest
-  sudo: true
+  file:
+    path: /opt/slurm-latest
+    state: absent
+  become: true
   when: force_slurm_recompile is defined or not stat_srun.stat.exists
 
 - name: put slurm-latest-link
-  shell: ln -s  {{ slurm_dir }}  /opt/slurm-latest
-  sudo: true
+  file:
+    src: "{{ slurm_dir }}"
+    dest: /opt/slurm-latest
+    state: link
+  become: true
   when: force_slurm_recompile is defined or not stat_srun.stat.exists
 
 - name: add slurm log rotate config
   template: src=slurmlog.j2 dest=/etc/logrotate.d/slurm mode=644
-  sudo: true
+  become: true
diff --git a/roles/slurm-common/tasks/main.yml b/roles/slurm-common/tasks/main.yml
index dfbed6ddb0e7466248d341962a06ab381968a6a0..d2351af627d7d6b32aa7d720d236c3a5139d84d5 100644
--- a/roles/slurm-common/tasks/main.yml
+++ b/roles/slurm-common/tasks/main.yml
@@ -1,19 +1,19 @@
 ---
 - name: create munge group
   group: name=munge system=yes gid=498
-  sudo: true
+  become: true
 
 - name: create munge user
   user: name=munge group=munge system=yes createhome=no uid=498
-  sudo: true
+  become: true
 
 - name: create slurm group
   group: name=slurm system=yes gid=497
-  sudo: true
+  become: true
 
 - name: create slurm user
   user: name=slurm group=slurm system=yes createhome=no uid=497
-  sudo: true
+  become: true
 
 - include: createSlurmDirectories.yml
 
@@ -35,12 +35,12 @@
     - hwloc-devel
     - lua
     - lua-devel
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 - name: install deps
   apt: name={{ item }} state=installed update_cache=yes
-  sudo: true
+  become: true
   with_items:
     - gcc
     - wget
@@ -59,19 +59,19 @@
 
 - name: chown mungedir
   file: path={{ munge_dir }} state=directory owner=munge recurse=yes
-  sudo: true
+  become: true
 
 - name: make munge logdir
   file: path={{ munge_dir }}/var/log/munge state=directory owner=munge mode=700
-  sudo: true
+  become: true
 
 - name: install munge key
   template: src=munge_key.j2 dest={{ munge_dir }}/etc/munge/munge.key owner=munge mode=600
-  sudo: true
+  become: true
 
 - name: enable munge on boot
   service: name=munge enabled=yes
-  sudo: true
+  become: true
 
 
 - include: installSlurmFromSource.yml
@@ -84,23 +84,26 @@
   ignore_errors: true
   when: slurm_gres_check is defined
   check_mode: no
+  changed_when: False
 
 - name: Gres - Test for Nvidia devices
   script: scripts/nvidia-probe.py
   register: probeOutput
   check_mode: no
+  changed_when: False
 
 - name: get cpu count
   shell: 'lscpu | grep "On-line CPU" | cut -f 2 -d ":" | sed "s/\ *//g"'
   register: cpucount
   check_mode: no
+  changed_when: False
 
 - name: "set nvidiaprobe slurm_gres_list"
   set_fact: "slurm_gres_list={{  probeOutput.stdout }}"
 
 - name: template gres.conf file
   template: src="gres.conf.j2" dest={{ slurm_dir }}/etc/gres.conf mode=644
-  sudo: true
+  become: true
 
 - name: make slurm prolog dir 
   file: path=/opt/slurm/etc state=directory mode=755
@@ -109,36 +112,36 @@
 
 - name: install slurm prolog
   template: src=slurm.prolog.j2 dest=/opt/slurm/etc/slurm.prolog mode=755
-  sudo: true
+  become: true
 
 - name: install slurm epilog
   template: src=slurm.epilog.j2 dest=/opt/slurm/etc/slurm.epilog mode=755
-  sudo: true
+  become: true
 
 - name: install slurm.conf
   copy: src=files/slurm.conf dest={{ slurm_dir }}/etc/slurm.conf
-  sudo: true
+  become: true
   when: slurm_use_vpn==False 
 
 - name: install slurm.conf
   template: src=slurm-vpn.conf.j2 dest={{ slurm_dir }}/etc/slurm.conf
-  sudo: true
+  become: true
   when: slurm_use_vpn==True
 
 #- name: install job_submit.lua
 #  copy: src=files/job_submit.lua dest={{ slurm_dir }}/etc/job_submit.lua
-#  sudo: true
+#  become: true
 #  when: slurm_use_vpn==False
 
 - name: setup envirnment variables 
   template: src=slurm_setup.sh.j2 dest=/etc/profile.d/slurm_setup.sh
-  sudo: true
+  become: true
 
 - name: setup plugin
   template: src=job_submit.lua.j2 dest={{ slurm_dir }}/etc/job_submit.lua mode=755
   #delegate_to: "{{ slurmctrl }}"
   #run_once: true
-  sudo: true
+  become: true
   when: slurm_lua==True
 
 - include: installCgroup.yml
diff --git a/roles/slurm-start/tasks/main.yml b/roles/slurm-start/tasks/main.yml
index e053d09d543da6d4d633bf97a85f80d02c263e99..5bd124c036f53a36c5132a2e21a835f4bcf0189e 100644
--- a/roles/slurm-start/tasks/main.yml
+++ b/roles/slurm-start/tasks/main.yml
@@ -12,73 +12,73 @@
 
 - name: install slurmdbd init
   template: src=slurmdbd.initd.j2 dest=/etc/init.d/slurmdbd mode=755
-  sudo: true
+  become: true
   when: use_systemd is not defined and start_slurmdbd is defined
 
 - name:  copy slurmdbd init script if OS contains systemd
   template: dest=/etc/systemd/system/slurmdbd.service src=slurmdbd.service.j2 mode=644
-  sudo: true
+  become: true
   when: use_systemd is defined and start_slurmdbd is defined
   register: slurmdbd_service_installed
 
 - name: copy slurm init script
   template: dest=/etc/init.d/slurm src=slurm.initd.j2  mode=755
-  sudo: true
+  become: true
   when: use_systemd is not defined
 
 - name:  copy slurmd.service
   template: dest=/etc/systemd/system/slurmd.service src=slurmd.service.j2 mode=644
-  sudo: true
+  become: true
   when: use_systemd is defined and start_slurmd is defined
   register: slurmd_service_installed
 
 - name: slurmctld.service
   template: dest=/etc/systemd/system/slurmctld.service src=slurmctld.service.j2 mode=644
-  sudo: true
+  become: true
   when: use_systemd is defined and start_slurmctld is defined
   register: slurmctld_service_installed
 
 - name: reload systemd
   shell: systemctl daemon-reload
-  sudo: true
-  when: use_systemd is defined and start_slurmd is defined and slurmd_service_installed | changed
+  become: true
+  when: use_systemd is defined and start_slurmd is defined and slurmd_service_installed.changed
 
 - name: reload systemd
   shell: systemctl daemon-reload
-  sudo: true
-  when: use_systemd is defined and start_slurmctld is defined and slurmctld_service_installed | changed
+  become: true
+  when: use_systemd is defined and start_slurmctld is defined and slurmctld_service_installed.changed
 
 - name: reload systemd
   shell: systemctl daemon-reload
-  sudo: true
-  when: use_systemd is defined and start_slurmdbd is defined and slurmdbd_service_installed | changed
+  become: true
+  when: use_systemd is defined and start_slurmdbd is defined and slurmdbd_service_installed.changed
 
 - name: start munge
-  service: name=munge state=started enabled=yes
-  sudo: true
+  service: name=munge state=restarted enabled=yes
+  become: true
 
 - name: start slurmdbd
-  service: name=slurmdbd state=started enabled=no
-  sudo: true
+  service: name=slurmdbd state=restarted enabled=no
+  become: true
   when: start_slurmdbd is defined
 
 
 - name: "create cluster in slurm db"
   shell:  "{{slurm_dir}}/bin/sacctmgr -i create cluster {{ clustername }}"
-  sudo: true
+  become: true
   ignore_errors: true
 
 - name: start slurmctl
-  service: name=slurmctld state=started enabled=no
-  sudo: true
+  service: name=slurmctld state=restarted enabled=no
+  become: true
   when: use_systemd is defined and start_slurmctld is defined
 
 - name: start slurmd
-  service: name=slurmd state=started enabled={{ slurmd_enabled }}
-  sudo: true
+  service: name=slurmd state=restarted enabled={{ slurmd_enabled }}
+  become: true
   when: use_systemd is defined and start_slurmd is defined
 
 - name: start slurm
-  service: name=slurm state=started enabled={{ slurmd_enabled }}
-  sudo: true
+  service: name=slurm state=restarted enabled={{ slurmd_enabled }}
+  become: true
   when: use_systemd is not defined and ( start_slurmd is defined or start_slurmctld is defined )
diff --git a/roles/slurm-start/templates/slurmdbd.service.j2 b/roles/slurm-start/templates/slurmdbd.service.j2
index 49a4b1b3b3722006a7b36e11626d132618dec3f2..cc48193f09d95e8d2886855b9a68002793410950 100644
--- a/roles/slurm-start/templates/slurmdbd.service.j2
+++ b/roles/slurm-start/templates/slurmdbd.service.j2
@@ -7,7 +7,7 @@ ConditionPathExists={{ slurm_dir }}/etc/slurmdbd.conf
 [Service]
 Type=forking
 ExecStart={{ slurm_dir }}/sbin/slurmdbd
-PIDFile={{ slurmpiddir }}/slurmdbd.pid
+PIDFile={{ slurmdbdpiddir }}/slurmdbd.pid
 
 [Install]
 WantedBy=multi-user.target
diff --git a/roles/slurm_config/tasks/main.yml b/roles/slurm_config/tasks/main.yml
index feec10209a05172fcf9f887384233a553444b5e5..93912a851dda2ccb18c18cb26b6c84b2f684c481 100644
--- a/roles/slurm_config/tasks/main.yml
+++ b/roles/slurm_config/tasks/main.yml
@@ -1,8 +1,13 @@
 ---
 - name: install slurm.conf
   copy: src=files/slurm.conf dest={{ slurm_dir }}/etc/slurm.conf
-  sudo: true
+  become: true
+  become_user: root
+
+- name: setup plugin
+  template: src=job_submit.lua.j2 dest={{ slurm_dir }}/etc/job_submit.lua mode=755
+  run_once: true
+  become: true
+  become_user: root
+  when: slurm_lua is defined
 
-- name: install job_submit.lua
-  copy: src=files/job_submit.lua dest={{ slurm_dir }}/etc/job_submit.lua
-  sudo: true
diff --git a/roles/slurm_config/templates/job_submit.lua.j2 b/roles/slurm_config/templates/job_submit.lua.j2
new file mode 100644
index 0000000000000000000000000000000000000000..22b05df79c76d4e33a0aae386ac6f5102454ee32
--- /dev/null
+++ b/roles/slurm_config/templates/job_submit.lua.j2
@@ -0,0 +1,70 @@
+--[[
+
+ Example lua script demonstrating the SLURM job_submit/lua interface.
+ This is only an example, not meant for use in its current form.
+
+ Leave the function names, arguments, local varialbes and setmetatable
+ set up logic in each function unchanged. Change only the logic after
+ the lSUCCESSine containing "*** YOUR LOGIC GOES BELOW ***".
+
+ For use, this script should be copied into a file name "job_submit.lua"
+ in the same directory as the SLURM configuration file, slurm.conf.
+
+
+--]]
+
+function slurm_job_submit(job_desc, part_list, submit_uid)
+
+
+-- Check no default account
+
+if job_desc.account == "default" then
+   slurm.log_user("You have to specify your project ID as part of your job submission. The account=default is now deprecated on M3 job scheduler.")
+   return slurm.ERROR
+end
+
+
+-- Check Desktop requests with more than one node
+
+if ((job_desc.name == "desktop") and (job_desc.min_nodes > 1 )) then
+   slurm.log_user("The current M3 Desktop applications are unable to utilise more than one node, please select one node instead")
+   return slurm.ERROR
+end
+
+
+
+-- Check for gres.gpu requirements in m3c, m3h and m3g, else move job to comp
+
+if ((job_desc.partition == "m3c" ) or (job_desc.partition == "m3h" ) or (job_desc.partition == "m3g" ))  then
+   local partition = ""
+   if (job_desc.gres == nil) then
+      partition = "comp"
+      slurm.log_info("slurm_job_submit: for user: %u, partition: %s", submit_uid, partition)
+      job_desc.partition = partition
+   end
+   return slurm.SUCCESS
+end
+
+
+-- Check for QOS rtq in m3c, m3h , m3g and partition=nil, then forward job to rtqp,comp,m3g
+
+if ((job_desc.qos == "rtq") and (job_desc.partition == nil)) then
+   local partition = ""
+   partition = "rtqp,comp,m3g"
+   slurm.log_info("slurm_job_submit: for user: %u, partition: %s", submit_uid, partition)
+   job_desc.partition = partition
+   return slurm.SUCCESS
+end
+
+
+
+end
+
+
+
+function slurm_job_modify(job_desc, job_rec, part_list, modify_uid)
+       return slurm.SUCCESS
+end
+
+slurm.log_info("initialized")
+return slurm.SUCCESS
diff --git a/roles/slurmdb-config/tasks/main.yml b/roles/slurmdb-config/tasks/main.yml
index 3e23046fe6d1011f6bb23b4c937089c8724ec053..c189183bab51ca97da66ddbae06aba5c73931bed 100644
--- a/roles/slurmdb-config/tasks/main.yml
+++ b/roles/slurmdb-config/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 - name: install deps in control node
   yum: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items:
     - mysql
     - mysql-devel
@@ -10,7 +10,7 @@
 
 - name: install deps in control node
   apt: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items:
     - mysql-client
     - libmysqlclient-dev
@@ -19,7 +19,20 @@
 
 - name: make sure config dir exists
   file: path="{{ slurm_dir }}/etc" state=directory
-  sudo: true
+  become: true
+
+- name: create slurm group
+  group: name=slurm system=yes gid=497
+  become: true
+
+- name: create slurm user # this is duplicated from slurm-common
+  user: 
+    name: slurm 
+    group: slurm 
+    system: yes 
+    createhome: no
+    uid: 497
+  become: true
 
 - name: install slurmdb.conf
   copy: 
@@ -28,7 +41,7 @@
     owner: slurm
     group: slurm
     mode: u+rw,g-wx,o-rwx
-  sudo: true
+  become: true
   when: slurm_dir is defined
 
 
@@ -39,10 +52,10 @@
     owner: slurm
     group: slurm
     mode: u+rw,g-wx,o-rwx
-  sudo: true
+  become: true
   when: slurm_dir is not defined
 
 - name: add slurm db log rotate config
   template: src=slurmdblog.j2 dest=/etc/logrotate.d/slurmdb mode=644
-  sudo: true
+  become: true
 
diff --git a/roles/slurmdb-config/templates/slurmdblog.j2 b/roles/slurmdb-config/templates/slurmdblog.j2
index 0fee942fd1470228d2a029243fe55ecac68d58cc..9bdb068e43169fd8977fe2c4076b88800e4cef66 100644
--- a/roles/slurmdb-config/templates/slurmdblog.j2
+++ b/roles/slurmdb-config/templates/slurmdblog.j2
@@ -14,7 +14,7 @@
  create 640 slurm root 
 {% if ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' %}
   postrotate
-	systemctl kill -s HUP --kill-who=main slurmdbd
+        pkill -x --signal SIGUSR2 slurmdbd
 {% else %}
  postrotate /etc/init.d/slurmdbd reconfig 
 {% endif %}
diff --git a/roles/smux/tasks/main.yml b/roles/smux/tasks/main.yml
index 62ccd783ccaa3c4e6079e80a09fe18fb8d5c935f..2e3633f72f660a95f750b73c21d3b41793f03520 100644
--- a/roles/smux/tasks/main.yml
+++ b/roles/smux/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
 - name: install tmux
   yum: name=tmux state=present
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
diff --git a/roles/ssh-keepalive/handlers/main.yml b/roles/ssh-keepalive/handlers/main.yml
index df0d3deeca457c10a9805a439cb4a61087cac8d3..c9546663caba8d4f511dea25f245a06c5071e64b 100644
--- a/roles/ssh-keepalive/handlers/main.yml
+++ b/roles/ssh-keepalive/handlers/main.yml
@@ -1,9 +1,9 @@
 - name: "restart sshd"
   service: name=sshd state=restarted
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 - name: "restart ssh"
   service: name=ssh state=restarted
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
diff --git a/roles/ssh-keepalive/tasks/main.yml b/roles/ssh-keepalive/tasks/main.yml
index e7c2627f782b74aa8393666adb00b06adac29007..c58c27bd4e3a743cd8255e20c986cfbe8e96b713 100644
--- a/roles/ssh-keepalive/tasks/main.yml
+++ b/roles/ssh-keepalive/tasks/main.yml
@@ -5,7 +5,7 @@
     regexp: "#?ClientAliveInterval [0-9]+"
     line: "ClientAliveInterval 60" 
     backrefs: yes
-  sudo: true
+  become: true
   notify: 
   - restart sshd
   - restart ssh
@@ -17,7 +17,7 @@
     regexp: "#?ClientAliveCountMax [0-9]+"
     line: "ClientAliveCountMax 5" 
     backrefs: yes
-  sudo: true
+  become: true
   notify: 
   - restart sshd
   - restart ssh
diff --git a/roles/ssh-nopassword-login/handlers/main.yml b/roles/ssh-nopassword-login/handlers/main.yml
index df0d3deeca457c10a9805a439cb4a61087cac8d3..c9546663caba8d4f511dea25f245a06c5071e64b 100644
--- a/roles/ssh-nopassword-login/handlers/main.yml
+++ b/roles/ssh-nopassword-login/handlers/main.yml
@@ -1,9 +1,9 @@
 - name: "restart sshd"
   service: name=sshd state=restarted
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 - name: "restart ssh"
   service: name=ssh state=restarted
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
diff --git a/roles/ssh-nopassword-login/tasks/main.yml b/roles/ssh-nopassword-login/tasks/main.yml
index f8594e1902a904b5be06ab3575c1ae697532b854..59fb47f69eee03427dd442252563893e47a45040 100644
--- a/roles/ssh-nopassword-login/tasks/main.yml
+++ b/roles/ssh-nopassword-login/tasks/main.yml
@@ -5,7 +5,7 @@
     regexp: "ChallengeResponseAuthentication yes"
     line: "ChallengeResponseAuthentication no" 
     backrefs: yes
-  sudo: true
+  become: true
   notify: 
   - restart sshd
   - restart ssh
@@ -17,7 +17,7 @@
     regexp: "PasswordAuthentication yes"
     line: "PasswordAuthentication no"
     backrefs: yes
-  sudo: true
+  become: true
   notify: 
   - restart sshd
   - restart ssh
diff --git a/roles/ssh-nopassword-login/tasks/main.yml~ b/roles/ssh-nopassword-login/tasks/main.yml~
deleted file mode 100644
index f8594e1902a904b5be06ab3575c1ae697532b854..0000000000000000000000000000000000000000
--- a/roles/ssh-nopassword-login/tasks/main.yml~
+++ /dev/null
@@ -1,24 +0,0 @@
-- name: "Disable Challenge Response"
-  lineinfile:
-  args:
-    dest: /etc/ssh/sshd_config
-    regexp: "ChallengeResponseAuthentication yes"
-    line: "ChallengeResponseAuthentication no" 
-    backrefs: yes
-  sudo: true
-  notify: 
-  - restart sshd
-  - restart ssh
-
-- name: "Disable Password"
-  lineinfile:
-  args:
-    dest: /etc/ssh/sshd_config
-    regexp: "PasswordAuthentication yes"
-    line: "PasswordAuthentication no"
-    backrefs: yes
-  sudo: true
-  notify: 
-  - restart sshd
-  - restart ssh
-
diff --git a/roles/ssh-password-login/handlers/main.yml b/roles/ssh-password-login/handlers/main.yml
index df0d3deeca457c10a9805a439cb4a61087cac8d3..c9546663caba8d4f511dea25f245a06c5071e64b 100644
--- a/roles/ssh-password-login/handlers/main.yml
+++ b/roles/ssh-password-login/handlers/main.yml
@@ -1,9 +1,9 @@
 - name: "restart sshd"
   service: name=sshd state=restarted
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 - name: "restart ssh"
   service: name=ssh state=restarted
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
diff --git a/roles/ssh-password-login/tasks/main.yml b/roles/ssh-password-login/tasks/main.yml
index d2f18c544ad304ab52049b0dbc73f5f9b4f3e948..7c133001c777cee39ad6fa519d11537d49bc928d 100644
--- a/roles/ssh-password-login/tasks/main.yml
+++ b/roles/ssh-password-login/tasks/main.yml
@@ -5,7 +5,7 @@
     regexp: "ChallengeResponseAuthentication no"
     line: "ChallengeResponseAuthentication yes" 
     backrefs: yes
-  sudo: true
+  become: true
   notify: 
   - restart sshd
   - restart ssh
@@ -17,7 +17,7 @@
     regexp: "PasswordAuthentication yes"
     line: "PasswordAuthentication no"
     backrefs: yes
-  sudo: true
+  become: true
   notify: 
   - restart sshd
   - restart ssh
diff --git a/roles/strudel_build/tasks/main.yml b/roles/strudel_build/tasks/main.yml
index 21c8b806d06ffc8492b32d6e814aa2f72edef6ab..e7e755d73e3e51481349bf014d01b514e69eea12 100644
--- a/roles/strudel_build/tasks/main.yml
+++ b/roles/strudel_build/tasks/main.yml
@@ -5,20 +5,20 @@
 
 - name: add epel on CentOS 7
   shell: rpm -iUvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
-  sudo: true
+  become: true
   when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
   ignore_errors: true
 
 
 - name: install system packages apt
   apt: name={{ item }} state=installed update_cache=true
-  sudo: true
+  become: true
   with_items: system_packages
   when: ansible_os_family == 'Debian'
 
 - name: install system packages yum
   yum: name={{ item }} state=installed
-  sudo: true
+  become: true
   with_items: system_packages
   when: ansible_os_family == 'RedHat'
 
@@ -31,13 +31,13 @@
 
 - name: install pip packages
   command: pip install {{ item }}
-  sudo: true
+  become: true
   with_items: pip_packages
   when: pip_packages is defined
 
 #- name: install pkg deps
 #  shell: /tmp/strudel/system_build_scripts/{{ install_prerequisites }} chdir=/tmp/strudel
-#  sudo: true
+#  become: true
 #  when: ansible_os_family == 'RedHat'
 
 - name: build
diff --git a/roles/strudel_test/tasks/main.yml b/roles/strudel_test/tasks/main.yml
index 2810aa0a4dfc02243d8cbd4de956cb6218232f24..6dfbeac2bfdd2eed22de38df3b3203cdec19ce2c 100644
--- a/roles/strudel_test/tasks/main.yml
+++ b/roles/strudel_test/tasks/main.yml
@@ -7,7 +7,7 @@
 
 - name: install turobvnc
   apt: deb=turbovnc_1.2.3_amd64.deb
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
 
 - name: get turbovnc
@@ -16,7 +16,7 @@
 
 - name: install turobvnc
   yum: src=turbovnc-1.2.3.x86_64.rpm
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
 - name: copy launcher
@@ -24,11 +24,11 @@
 
 - name: install launhcer
   apt: deb=/tmp/{{ dest_pkg_name }} 
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian"
 
 - name: install launcher
   yum: src=/tmp/{{ dest_pkg_name }}
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat"
 
diff --git a/roles/syncExports/handlers/main.yml b/roles/syncExports/handlers/main.yml
index 967ec246c1ccf327d7e57a86e54b855907703a45..51c3b0bff89519f6219b5c593e4db1de11127e21 100644
--- a/roles/syncExports/handlers/main.yml
+++ b/roles/syncExports/handlers/main.yml
@@ -2,4 +2,4 @@
 - name: "Reload exports" 
   command: exportfs -ra
   delegate_to: "{{ nfs_server }}"
-  sudo: true
+  become: true
diff --git a/roles/syncExports/tasks/addExports.yml b/roles/syncExports/tasks/addExports.yml
index 6edbd85aa12381f8606ecfa7fdda754012dac4c8..b4c73bb69d948cba5b216582e1f2fd85b4fbe8ee 100644
--- a/roles/syncExports/tasks/addExports.yml
+++ b/roles/syncExports/tasks/addExports.yml
@@ -1,17 +1,17 @@
 ---
 - name: "Templating /etc/exports"
   copy: src=files/etcExports dest=/etc/exports owner=root group=root mode=644
-  sudo: true
+  become: true
   register: exports
 
 - name: "Start the Server"
   service: "name=nfs state=restarted"
-  sudo: true
+  become: true
   when: ansible_os_family == "RedHat" and exports.changed
 
 - name: "Start the Server"
   service: "name=nfs-kernel-server state=restarted"
-  sudo: true
+  become: true
   when: ansible_os_family == "Debian" and exports.changed
 
 - name : "Pause ... clients sometimes have errors"
diff --git a/roles/sysctl_network/files/90-network.conf b/roles/sysctl_network/files/90-network.conf
new file mode 100644
index 0000000000000000000000000000000000000000..21e6a6c57da25781087afe29356172b951b75fc5
--- /dev/null
+++ b/roles/sysctl_network/files/90-network.conf
@@ -0,0 +1,30 @@
+net.ipv4.tcp_max_syn_backlog=30000
+net.ipv4.conf.all.accept_redirects=0
+net.ipv4.udp_rmem_min=8192
+net.ipv4.tcp_congestion_control=htcp
+net.core.default_qdisc=fq_codel
+net.ipv4.tcp_rmem=4096  87380   33554432
+net.ipv4.tcp_tw_recycle=1
+net.ipv4.tcp_tw_reuse=1
+net.core.optmem_max=4194304
+net.ipv4.tcp_slow_start_after_idle=0
+net.core.wmem_max=33554432
+net.ipv4.conf.all.send_redirects=0
+net.core.netdev_budget=600
+net.ipv4.tcp_fack=1
+net.netfilter.nf_conntrack_max=1024000
+net.ipv4.tcp_fastopen=1
+net.ipv4.conf.all.log_martians=0
+net.core.netdev_max_backlog=50000
+net.ipv4.tcp_ecn=1
+net.ipv4.tcp_timestamps=1
+net.ipv4.tcp_mtu_probing=1
+net.ipv4.tcp_wmem=4096  65536   33554432
+net.nf_conntrack_max=1024000
+net.core.somaxconn=1024
+net.ipv4.tcp_fin_timeout=10
+net.ipv4.tcp_sack=1
+kernel.pid_max=4194303
+net.core.rmem_max=33554432
+net.ipv4.udp_wmem_min=8192
+net.ipv4.tcp_dsack=1
diff --git a/roles/sysctl_network/tasks/main.yml b/roles/sysctl_network/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c7824dc35aa39d588c8743a802dd290fe5052d7f
--- /dev/null
+++ b/roles/sysctl_network/tasks/main.yml
@@ -0,0 +1,16 @@
+- name: copying networking config for sysctl
+  copy:
+    mode: '640'
+    src:  90-network.conf
+    dest: '/etc/sysctl.d'
+  become: true
+  become_user: root
+  register: sysctl-network-conf
+
+#The sysctl module manages entries in sysctl.conf and setting "reload :yes" performs a /sbin/sysctl -p if the sysctl file is updated. In addition this module requires a name for a specific sysctl variable in order to work.
+#In this case we need to update the network configuration and making no change to the sysctl.conf file hence the module is not applicable. I am replacing it with a shell module instead.
+- name: Reloading sysctl
+  shell: sysctl -p
+  become: true
+  become_user: root
+  when: sysctl-network-conf.changed
diff --git a/roles/telegraf/files/telegraf_mountstats.py b/roles/telegraf/files/telegraf_mountstats.py
new file mode 100644
index 0000000000000000000000000000000000000000..45fcf8a7fb21443eec754d5015afa3f541d839c1
--- /dev/null
+++ b/roles/telegraf/files/telegraf_mountstats.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+xprtudpcounters="proto,port,bind_count,rpcsends,rpcreceives,badxids,inflightsends,backlogutil".split(',')
+xprttcpcounters="proto,port,bind_count,connect_count,connect_time,idle_time,rpcsends,rpcreceives,badxids,inflightsends,backlogutil".split(',')
+xprtrdmacounters="proto,port,bind_count,connect_count,idle_time,rpcsends,rpcreceives,badxids,backlogutil,read_chunks,write_chunks,reply_chunks,total_rdma_req',total_rdma_rep,pullup,fixup,hardway,failed_marshal,bad_reply".split(',')
+NfsOpCounters="operations,transmissions,major_timeouts,bytes_sent,bytes_recv,queue_time,response_time,request_time".split(',')
+OPS="WRITE,OPEN,CLOSE,ACCESS,RENAME,SYMLINK,CREATE".split(',')
+
+NfsEventCounters = [
+    'inoderevalidates',
+    'dentryrevalidates',
+    'datainvalidates',
+    'attrinvalidates',
+    'vfsopen',
+    'vfslookup',
+    'vfspermission',
+    'vfsupdatepage',
+    'vfsreadpage',
+    'vfsreadpages',
+    'vfswritepage',
+    'vfswritepages',
+    'vfsreaddir',
+    'vfssetattr',
+    'vfsflush',
+    'vfsfsync',
+    'vfslock',
+    'vfsrelease',
+    'congestionwait',
+    'setattrtrunc',
+    'extendwrite',
+    'sillyrenames',
+    'shortreads',
+    'shortwrites',
+    'delay'
+]
+
+NfsByteCounters = [
+    'normalreadbytes',
+    'normalwritebytes',
+    'directreadbytes',
+    'directwritebytes',
+    'serverreadbytes',
+    'serverwritebytes',
+    'readpages',
+    'writepages'
+]
+
+class DeviceData:
+    """DeviceData objects provide methods for parsing and displaying
+    data for a single mount grabbed from /proc/self/mountstats
+    """
+    def __init__(self):
+        self.__nfs = dict()
+        self.__nfs_device = dict()
+
+    def fstype(self):
+        return self.__nfs_device['fstype']
+
+    def tags(self):
+        return ",".join(["{}={}".format(key,value) for key,value in self.__nfs_device.items()])
+
+    def values(self):
+        try:
+            values = ",".join(["{}={}".format(key,value) for key,value in self.__nfs['bytes']])
+            values +=","
+            values += ",".join(["{}={}".format(key,value) for key,value in self.__nfs['events']])
+        except KeyError as e:
+            # key error occurs if we haven't filtered the lustre mount points from the NFS mount points yet
+            return None
+        return values
+
+    def opvalues(self,op):
+        return ",".join(["{}={}".format(key,value) for key,value in self.__nfs[op]])
+
+
+    def __parse_device_line(self, words):
+        if words[0] == 'device':
+            self.__nfs_device['export'] = words[1]
+            self.__nfs_device['mountpoint'] = words[4]
+            self.__nfs_device['fstype'] = words[7]
+
+    def __parse_bytes_line(self, words):
+        if words[0] == 'bytes:':
+            self.__nfs['bytes'] = zip(NfsByteCounters,[ int(x) for x in words[1:]])
+
+    def __parse_events_line(self,words):
+        if words[0] == 'events:':
+            self.__nfs['events'] = zip(NfsEventCounters,[int(x) for x in words[1:]])
+
+    def __parse_ops_line(self,words):
+        if words[0][:-1] in OPS:
+            self.__nfs[words[0][:-1]] = zip(NfsOpCounters, [ int(x) for x in words[1:]])
+
+    def __parse_xprt_line(self, words):
+        if words[0] == 'xprt:':
+            if words[1] == 'udp':
+                self._rpc = zip(xprtudpcounters, words[1:11])
+            if words[1] == 'tcp':
+                self._rpc = zip(xprttcpcounters, words[1:11])
+            if words[1] == 'rdma':
+                self._rpc = zip(xprtrdmacounters, words[1:11])
+
+
+    def parse_stats(self, lines):
+        """Turn a list of lines from a mount stat file into a 
+        dictionary full of stats, keyed by name
+        """
+        foundnfs = False
+        foundrpc = False
+        for line in lines:
+            words = line.split()
+            if len(words) == 0:
+                continue
+            self.__parse_device_line(words)
+            self.__parse_bytes_line(words)
+            self.__parse_events_line(words)
+            self.__parse_ops_line(words)
+            self.__parse_xprt_line(words)
+
+
+def parse_stats_file(filename):
+    """pop the contents of a mountstats file into a dictionary,
+    keyed by mount point.  each value object is a list of the
+    lines in the mountstats file corresponding to the mount
+    point named in the key.
+    """
+    ms_dict = dict()
+    key = ''
+
+    f = open(filename)
+    for line in f.readlines():
+        words = line.split()
+        if len(words) == 0:
+            continue
+        if line.startswith("no device mounted") :
+            continue
+        if words[0] == 'device':
+            key = words[4]
+            new = [ line.strip() ]
+        elif 'nfs' in words or 'nfs4' in words:
+            key = words[3]
+            new = [ line.strip() ]
+        else:
+            new += [ line.strip() ]
+        ms_dict[key] = new
+    f.close
+
+    return ms_dict
+
+
+def iostats(mountstats):
+    stats = {}
+    for device in mountstats:
+        stats[device] = DeviceData()
+        stats[device].parse_stats(mountstats[device])
+    return stats
+
+def print_influx_line_proto(device,stats):
+
+    try:
+        if not 'nfs' in stats.fstype():
+            return
+        print("mountstats,{} {}".format(stats.tags(), stats.values()))
+        for op in OPS:
+            print("nfsops,{},op={} {}".format(stats.tags(),op,stats.opvalues(op)))
+    except:
+        return
+
+mountstats = parse_stats_file("/proc/self/mountstats")
+stats = iostats(mountstats)
+for device in stats:
+    print_influx_line_proto(device,stats[device])
+
diff --git a/roles/telegraf/files/telegraf_slurmstats.py b/roles/telegraf/files/telegraf_slurmstats.py
new file mode 100644
index 0000000000000000000000000000000000000000..962a2080abbeb1a5532045bce4beb5a51f0033b7
--- /dev/null
+++ b/roles/telegraf/files/telegraf_slurmstats.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+
+class SlurmStats:
+    def __init__(self):
+        self._values = {'backfill':1}
+
+
+    def values(self):
+        values = ",".join(["{}={}".format(key,value) for key,value in self._values.items()])
+        return values
+
+
+def print_stats(stats):
+    print("slurmstats {}".format(stats.values()))
+
+def get_stats():
+    import subprocess
+    p = subprocess.Popen(['sdiag'],stdout = subprocess.PIPE,stderr=subprocess.PIPE)
+    (stdout,stderr) = p.communicate()
+    for l in stdout.decode().splitlines():
+        if "Total backfilled jobs (since last stats cycle start):" in l:
+            v = l.split(':')[1]
+    stats = SlurmStats()
+    stats._values = {'backfill':int(v)}
+    return stats
+
+    
+
+slurmstats = get_stats()
+print_stats(slurmstats)
+
diff --git a/roles/telegraf/handlers/main.yml b/roles/telegraf/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..358f66089d37426ab8e5f1e5e882c367d424a135
--- /dev/null
+++ b/roles/telegraf/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: restart telegraf
+  service:
+    name: telegraf
+    state: restarted
+  become: true
diff --git a/roles/telegraf/tasks/main.yml b/roles/telegraf/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..13701898cd1ae4c091aa148bc8928d091834b0b6
--- /dev/null
+++ b/roles/telegraf/tasks/main.yml
@@ -0,0 +1,65 @@
+- name: Install Telegraf from URL [RHEL/CentOS]
+  yum:
+    name: "{{ telegraf_install_rpm_url }}"
+    state: present
+  when: ansible_os_family == "RedHat"
+  become: true
+  become_user: root
+
+- name: Download Telegraf package via URL [Debian/Ubuntu]
+  get_url:
+    url: "{{ telegraf_install_url }}"
+    dest: /tmp/telegraf-ansible-download.deb
+  when: ansible_os_family == "Debian"
+  become: true
+  become_user: root
+
+- name: Install Telegraf package
+  apt:
+    deb: /tmp/telegraf-ansible-download.deb
+    state: present
+  when: ansible_os_family == "Debian"
+  become: true
+  become_user: root
+
+- name: Make a directory for extra files
+  file: 
+    state: directory
+    mode: 'u=rwx,g=rx,o=rx'
+    owner: 'telegraf'
+    group: 'telegraf'
+    path: '/opt/telegraf/bin'
+  become: true
+  become_user: root
+
+- name: copy mountstats plugin
+  copy:
+    mode: 'u=rwx,g=rx,o=rx'
+    src: telegraf_mountstats.py
+    dest: '/opt/telegraf/bin/telegraf_mountstats.py'
+  become: true
+  become_user: root
+
+- name: copy slurmstats plugin
+  copy:
+    mode: 'u=rwx,g=rx,o=rx'
+    src: telegraf_slurmstats.py
+    dest: '/opt/telegraf/bin/telegraf_slurmstats.py'
+  become: true
+  become_user: root
+
+
+
+- name: Install Telegraf config
+  template:
+    src: telegraf.conf.j2
+    dest: /etc/telegraf/telegraf.conf
+    owner: telegraf
+    group: telegraf
+    mode: '640'
+  notify:
+    - "restart telegraf"
+  become: true
+  become_user: root
+
+
diff --git a/roles/telegraf/templates/telegraf.conf.j2 b/roles/telegraf/templates/telegraf.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..52c2fae9c6f193000249ccd1bdcc64574b72d103
--- /dev/null
+++ b/roles/telegraf/templates/telegraf.conf.j2
@@ -0,0 +1,153 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[tags]
+  hostgroup = "{{ hostgroup | default('undefined') }}"
+  cluster = "{{ clustername | default('undefined') }}"
+  computenodeclass = "{{ computenodeclass | default('undefined') }}"
+  # dc = "us-east-1" # will tag all metrics with dc=us-east-1
+  # rack = "1a"
+
+# Configuration for telegraf agent
+[agent]
+  # Default data collection interval for all plugins
+  interval = "10s"
+  # Rounds collection interval to 'interval'
+  # ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  # Default data flushing interval for all outputs. You should not set this below
+  # interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "60s"
+  # Jitter the flush interval by a random amount. This is primarily to avoid
+  # large write spikes for users running a large number of telegraf instances.
+  # ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "5s"
+
+  # Run telegraf in debug mode
+  debug = false
+  # Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://localhost:8089"] # UDP endpoint example
+  urls = ["{{ influxdb_server }}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "telegraf" # required
+  # Precision of writes, valid values are n, u, ms, s, m, and h
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  # Connection timeout (for the connection with InfluxDB), formatted as a string.
+  # If not provided, will default to 0 (no timeout)
+  # timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  username = "{{ influxdb_user }}" 
+  password = "{{ influxdb_password }}"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+  [outputs.influxdb.tagdrop]
+       influxdb_database = ["*"]
+
+[[outputs.influxdb]]
+  urls = ["{{ influxdb_server }}"] # required
+  database = "slurm" # required
+  precision = "s"
+  username = "{{ influxdb_user }}" 
+  password = "{{ influxdb_password }}"
+  [outputs.influxdb.tagpass]
+       influxdb_database = ["slurm"]
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+
+# Read metrics about cpu usage
+[[inputs.cpu]]
+  # Whether to report per-cpu stats or not
+  percpu = true
+  # Whether to report total system cpu stats or not
+  totalcpu = true
+  # Comment this line if you want the raw CPU time metrics
+  drop = ["time_*"]
+
+# Read metrics about disk usage by mount point
+[[inputs.disk]]
+  # By default, telegraf gather stats for all mountpoints.
+  # Setting mountpoints will restrict the stats to the specified mountpoints.
+  # mount_points=["/"]
+
+# Read metrics about disk IO by device
+[[inputs.diskio]]
+  # By default, telegraf will gather stats for all devices including
+  # disk partitions.
+  # Setting devices will restrict the stats to the specified devices.
+  # devices = ["sda", "sdb"]
+  # Uncomment the following line if you do not need disk serial numbers.
+  # skip_serial_number = true
+
+# Read metrics about memory usage
+[[inputs.mem]]
+  # no configuration
+
+# Read metrics about swap memory usage
+[[inputs.swap]]
+  # no configuration
+
+# Read metrics about system load & uptime
+[[inputs.system]]
+  # no configuration
+
+[[inputs.net]]
+  # no configuration
+ 
+[[inputs.netstat]]
+  # no configuration
+
+[[inputs.exec]]
+  commands = [
+    "/opt/telegraf/bin/telegraf_mountstats.py"
+  ]
+  data_format = "influx"
+  timeout="4s"
+  interval="300s"
+
+# Both Slurm ManagementNodes will log sdiag stats, but no Compute or Login nodes will
+{% if 'ManagementNodes' in group_names %}
+[[inputs.exec]]
+  commands = [
+    "/opt/telegraf/bin/telegraf_slurmstats.py"
+  ]
+  data_format = "influx"
+  timeout="4s"
+  interval="60s"
+  [inputs.exec.tags]
+    influxdb_database="slurm"
+{% endif %}
+
+
+###############################################################################
+#                              SERVICE INPUTS                                 #
+###############################################################################
diff --git a/roles/telegraf/vars/main.yml b/roles/telegraf/vars/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9e2c5c5a63f1ee5de9e6f2d53201c615997dc187
--- /dev/null
+++ b/roles/telegraf/vars/main.yml
@@ -0,0 +1,2 @@
+telegraf_install_rpm_url: https://dl.influxdata.com/telegraf/releases/telegraf-1.12.6-1.x86_64.rpm
+telegraf_install_deb_url: https://dl.influxdata.com/telegraf/releases/telegraf_1.12.6-1_amd64.deb
diff --git a/roles/upgrade-lustre/tasks/main.yml b/roles/upgrade-lustre/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..697d68933bd41807e73da81deac23a771cebb4ad
--- /dev/null
+++ b/roles/upgrade-lustre/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: ensure a list of packages installed
+  yum:
+    name: "{{ packages }}"
+    state: latest
+  vars:
+    packages:
+    - lustre-client
+    - kmod-lustre-client
+  become: true
+  become_user: root
+  register: update_done
+
+- name: reboot the machine if an upgrade occured
+  reboot:
+  when: update_done.changed
+  become: true
+  become_user: root
diff --git a/roles/upgrade/tasks/main.yml b/roles/upgrade/tasks/main.yml
index 0d0a6041a98f6d641e0e4aecd38479e6063f444e..85255ce6e8c1be524eb118ff7846767f1304d8f8 100644
--- a/roles/upgrade/tasks/main.yml
+++ b/roles/upgrade/tasks/main.yml
@@ -8,7 +8,7 @@
 
 - name: apt-get upgrade
   apt: upgrade=safe
-  sudo: true
+  become: true
   when: ansible_os_family=="Debian"
 
 - name: yum remove
@@ -55,6 +55,13 @@
   changed_when: False
   args:  
     warn: False
+    
+- name: get kernel-devel version
+  shell: dpkg -l linux-image* | grep "^ii" | grep "linux-image-[0-9]" | sed 's/\ \ */ /g' | cut -f 2 -d " " | cut -f 3-5 -d "-"
+  register: dpkg_l_output
+  when: ansible_os_family=="Debian"
+  check_mode: no
+  changed_when: False    
 
 - name: get kernel version
   shell: uname -r
@@ -73,10 +80,14 @@
   debug: var=uname_r_output
 
 - name: set reboot when kernel has changed
-  set_fact: 
+  set_fact:
     reboot_now: true
-  when: not uname_r_output.stdout in rpm_q_output.stdout and ansible_os_family=="RedHat"
+  when: ansible_os_family=="RedHat" and not uname_r_output.stdout in rpm_q_output.stdout
 
+- name: set reboot when kernel has changed
+  set_fact:
+    reboot_now: true
+  when: ansible_os_family=="Debian" and not uname_r_output.stdout in dpkg_l_output.stdout 
 
 - name: debug3
   debug: var=reboot_now
diff --git a/roles/usedornot.py b/roles/usedornot.py
new file mode 100644
index 0000000000000000000000000000000000000000..2699c2d0625b33823b8a00b49c590b81ae6a9b91
--- /dev/null
+++ b/roles/usedornot.py
@@ -0,0 +1,10 @@
+def used(role):
+    import subprocess
+    p = subprocess.call("grep {} *".format(role))
+    return p == 0
+    
+
+with open('roles.txt') as f:
+    roles = f.readlines()
+for r in roles:
+    print("{},{}".format(r,used(r)))
diff --git a/roles/vncserver/tasks/main.yml b/roles/vncserver/tasks/main.yml
index c79336e6f512de9b7a99590092bce78a04edbcf0..1a0078fb7dc9b2f4870f3fae247148bbb9abcb79 100644
--- a/roles/vncserver/tasks/main.yml
+++ b/roles/vncserver/tasks/main.yml
@@ -6,16 +6,16 @@
 
 - name: install system packages apt
   apt: name={{ item }} state=present update_cache=true  force=yes
-  sudo: true
+  become: true
   with_items: system_packages
   when: ansible_os_family == 'Debian'
 
 - name: force the use of KDE desktop
   template: src=vncserver.centos dest=/bin/vncserver
-  sudo: true
+  become: true
   when: ansible_os_family == 'RedHat'
 
 - name: force the use of mate desktop
   template: src=vncserver.ubuntu dest=/usr/bin/vncserver
-  sudo: true
+  become: true
   when: ansible_os_family == 'Debian'
diff --git a/scripts/make_inventory.py b/scripts/make_inventory.py
new file mode 100755
index 0000000000000000000000000000000000000000..48bd21d85e1a7314d0982d062227c33ac2b87783
--- /dev/null
+++ b/scripts/make_inventory.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+import sys, os, string, socket, re
+import shlex, multiprocessing, time, shutil, json
+from novaclient import client as nvclient
+from cinderclient import client as cdclient
+import novaclient.exceptions as nvexceptions
+from keystoneclient import client as ksclient
+from joblib import Parallel, delayed
+from multiprocessing import Process, Manager, Pool
+
+def gatherInfo(md_key,md_value,authDict,project_id,inventory):
+    ## Fetch the Nova Object
+    from keystoneclient import client as ksclient
+    from keystoneauth1.identity import v3
+    from keystoneauth1 import session
+    auth = v3.Password(project_id=project_id,**authDict)
+    sess = session.Session(auth=auth)
+    nc = nvclient.Client('2.0',session=sess)
+    cc = cdclient.Client('2.0',session=sess)
+        
+    for server in nc.servers.list():
+        if server.metadata and \
+        'ansible_host_groups' in server.metadata and \
+        md_key in  server.metadata:
+            if server.metadata[md_key].strip() != md_value.strip(): continue
+            unwantedChars = """][")("""
+            rgx = re.compile('[%s]' % unwantedChars)
+            ansible_groups = rgx.sub('', server.metadata['ansible_host_groups']).split(',')
+            hostname = server.name
+            novaVolumes = cc.volumes.list(server.id)
+            # Set Ansible Host Group
+            for group in ansible_groups:
+                groupName = group.strip()
+                if groupName not in inventory: inventory[groupName] = []
+                inventory[groupName].append(hostname)
+            # Add other metadata
+            if not hostname in inventory['_meta']['hostvars']:
+                inventory['_meta']['hostvars'][hostname] = {}
+            for md in server.metadata.items():
+                if md[0] not in (md_key,'ansible_host_groups'):
+                    inventory['_meta']['hostvars'][hostname].update({ md[0]:md[1] })
+            if novaVolumes:
+                volDict = {}    
+                for volume in novaVolumes:
+                    try:
+                        if volume.attachments[0]['server_id'] == server.id:
+                            volDict[volume.name] = {'dev':'/dev/disk/by-id/virtio-' + volume.id[:20],'uuid':volume.id}
+                    except IndexError:
+                        continue
+                if volDict: inventory['_meta']['hostvars'][hostname]['ansible_host_volumes'] = volDict
+                network_name=None
+                if len(list(server.networks.keys())) > 1:
+                    for nn in server.networks.keys():
+                        if 'internal' in nn:
+                            network_name = nn
+                        else:
+                            inventory['_meta']['hostvars'][hostname]['public_host'] = server.networks[nn][0]
+                if network_name == None:
+                    network_name = list(server.networks.keys())[0]
+                    
+                inventory['_meta']['hostvars'][hostname]['ansible_host'] = server.networks[network_name][0]
+
+        else:
+            continue
+    return inventory
+
+def merge(i,j):
+    for k in i.keys():
+        v=i[k]
+        if k in j:
+            if isinstance(v,list):
+                j[k].extend(v)
+            if isinstance(v,dict):
+                merge(i[k],j[k])
+        else:
+            j[k]=i[k]
+
+
+if __name__ == "__main__":
+    inventory = {}
+    inventory['_meta'] = { 'hostvars': {} }
+    authDict={}
+    try:
+        authDict['auth_url'] = os.environ['OS_AUTH_URL']
+        authDict['username'] = os.environ['OS_USERNAME']
+        authDict['password'] = os.environ['OS_PASSWORD']
+        authDict['user_domain_name'] = os.environ['OS_USER_DOMAIN_NAME']
+    except KeyError:
+        print("Env Variables not set, Please run: source <openstack rc file>")
+        sys.exit()
+    if sys.argv[1] == "static":
+        static=True
+        md_key="project_name"
+        md_value=sys.argv[2]
+    else:
+        static=False
+        md_key="project_name"
+        md_value=sys.argv[1]
+    from keystoneclient import client as ksclient
+    import keystoneclient
+    from keystoneauth1.identity import v3
+    from keystoneauth1 import session
+#    auth = v3.Password(username=userName, password=passwd, auth_url=authUrl,user_domain_name=domainName)
+    auth = v3.Password(unscoped=True,**authDict)
+    sess = session.Session(auth=auth)
+    kc = ksclient.Client(session=sess)
+    kc.include_metadata = False
+    authmgr = keystoneclient.v3.auth.AuthManager(kc)
+    projects = authmgr.projects()
+    enabled_projects = [ x for x in projects if x.enabled ]
+
+    inventory_list = Parallel(n_jobs=len(projects))(delayed(gatherInfo) (md_key,md_value, authDict, proj.id, inventory) for proj in enabled_projects)
+    inventory={}
+
+    for i in inventory_list:
+        merge(i,inventory)
+    if not inventory['_meta']['hostvars']:
+        print("I could not find any resouces tagged with {}: {}".format(md_key,md_value))
+    else:
+        if static:
+            print( "#!/bin/bash\necho '"+json.dumps(inventory,indent=4)+"'")
+        else:
+            print(json.dumps(inventory))
diff --git a/syncNFS.yml b/syncNFS.yml
index be34ef3dda026b5bf54e90196a7e1f0dafa4683f..9095bfc8008c18aa940c5a63e760685b67f56fae 100644
--- a/syncNFS.yml
+++ b/syncNFS.yml
@@ -5,7 +5,7 @@
   roles:
     - syncExports
     - nfs-client
-  sudo: true
+  become: true
   vars: 
     nfs_server: "vm-118-138-240-224.erc.monash.edu.au"
     openvpn_servers: