diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 24b4dcbd65298b92768dc13857f9c679750b3a9f..466caa3b592a36aa762098fc86c78164dc377d7f 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -13,6 +13,7 @@ stages:
   - ansible_create_cluster_stage
   - push_button_spawn_cluster
   - tests
+
   - integration_test #https://docs.gitlab.com/ee/ci/triggers/
   - clean
   
@@ -23,8 +24,10 @@ trigger_pipeline_in_Clusterbuild:
   - ansible
   script:
   - echo ${CI_JOB_TOKEN}
-  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=aciab_upstream https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline  # ID is from clusterbuild
-
+  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline  # ID is from clusterbuild
+  only: 
+  - ${CI_PROJECT_NAME} == 'ansible_cluster_in_a_box'
+  
 
 trigger_pipeline_in_monarch:
   stage: integration_test
@@ -32,8 +35,9 @@ trigger_pipeline_in_monarch:
   - ansible
   script:
   - echo ${CI_JOB_TOKEN}
-  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=cicd https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline  # ID is from monarch
-
+  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline  # ID is from monarch
+  only: 
+  - ${CI_PROJECT_NAME} == 'ansible_cluster_in_a_box'
 
 yamllint:
   stage: lint
@@ -70,7 +74,6 @@ ansiblelint:
     - cd CICD
     - python3 ansiblelint/run_lint.py --targets master_playbook.yml
     
-
 build_cluster_cicd:
   stage: heat
   allow_failure: false
@@ -108,13 +111,15 @@ ansible_create_cluster_stage:
     - echo "ansible_create_cluster_stage"
     - bash -x ./CICD/ansible_create_cluster_script.sh
     - cd CICD
-    - ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem --skip-tags monitoring master_playbook.yml
+    - ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem --skip-tags SiteSpecific master_playbook.yml
     - sleep 15
     - echo uglyuglyfix
     - ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -b -a "systemctl restart slurmdbd" ManagementNodes
-    - sleep 60
-    - echo do it again
-    - ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem --skip-tags monitoring master_playbook.yml
+    - cd plays
+    - ansible-playbook -i files/inventory.$STACKNAME --key-file ../../gc_key.pem --skip-tags monitoring computenodes.yml | tee nochange.log
+    - echo [ `grep changed= ./nochange.log -c` = `grep changed=0 ./nochange.log -c` ] > bashtest.sh   # a crude way to make sure all changed lines are equal to changed=0
+    - bash ./bashtest.sh
+    - ansible-playbook -i files/inventory.$STACKNAME --key-file ../../gc_key.pem --skip-tags monitoring --check computenodes.yml 
     
 
 tests:
@@ -134,9 +139,10 @@ tests:
     - grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME   #fail if inventory file is empty
     - ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
     - ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
-    
-    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sinfo" ManagementNodes
-    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "squeue" ManagementNodes
+    - echo -e '[defaults]\r\nallow_world_readable_tmpfiles = True' > ansible.cfg 
+    # Need to find a better check for sinfo
+    #- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "find /opt/ -name sinfo -type f" ManagementNodes
+    #- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "find /opt/ -name squeue -type f" ManagementNodes
     - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet mariadb" SQLNodes
     - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmctld" ManagementNodes
     - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmdbd" ManagementNodes
@@ -147,6 +153,7 @@ tests:
     - bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem"
     - bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
     - bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
+    - bash -e ./tests/run_tests.sh slurm "files/inventory.$STACKNAME" "../gc_key.pem"
     
 extended:
   stage: extended
@@ -159,7 +166,7 @@ extended:
     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
   script:
     - source ./$NECTAR_ALLOCATION-openrc.sh
-    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME ${CI_PROJECT_NAME}
   only:
     variables:
       - $EXTENDED != null
@@ -180,7 +187,7 @@ manual_cluster_spawn:
     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
   script:
     - source ./$NECTAR_ALLOCATION-openrc.sh
-    - bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME
+    - bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME ${CI_PROJECT_NAME}
     - openstack stack list
     - export STACKNAME=$MANUAL_STACKNAME
     - sleep 25
@@ -228,3 +235,4 @@ clean:
 #   after_script:
 #     - sleep 20 # artifically wait a bit to make sure it is really dead
 
+
diff --git a/CICD/files/.gitignore b/CICD/files/.gitignore
index 37e22cdfa443a09339be8c5dc62c492e2914cce0..de782f9443327185343652401b63a61d64ec76ea 100644
--- a/CICD/files/.gitignore
+++ b/CICD/files/.gitignore
@@ -1,4 +1,6 @@
+nhc.conf
 ssh_known_hosts
-*.conf
+slurm.conf
+slurmdbd.conf
 etcHosts
 inventory.*
diff --git a/CICD/heat/gc_HOT.yaml b/CICD/heat/gc_HOT.yaml
index 7b44445e1831e0766a21dda38da6c7ea4093ad1c..4c7224d1512deac0778cbc86cddb13045ad90f76 100644
--- a/CICD/heat/gc_HOT.yaml
+++ b/CICD/heat/gc_HOT.yaml
@@ -2,6 +2,7 @@
 heat_template_version: 2013-05-23
 description: "A simple template to boot a cluster of desktops (LoginNode, ManagementNodes and Desktop Nodes)"
 # avz parameters disabled. they are working but I want just more options than monash-02. I would like to have a parameter that says "I don't care"
+#This requires gc_secgroups to be called beforehand
 
 parameters:
   ubuntu_1804_image_id:
@@ -33,76 +34,33 @@ parameters:
   Flavour:
     type: string
     default: m3.xsmall
-
+  SlurmSecGroupID:
+    type: string
+    label: Resource ID
+    default: 6e7a09b0-981c-424f-a9b7-9fd4f4d8f416
+  NFSSecGroupID:
+    type: string
+    label: Resource ID
+    default: b07a75a3-830c-4778-96c6-8a3732ec7d6c
+  MySQLSecGroupID:
+    type: string
+    label: Resource ID
+    default: 4478f245-de5c-4177-bcbd-6fa661032cbe
+  SSHMonashSecGroupID:
+    type: string
+    label: Resource ID
+    default: c15081f4-c756-4c57-b8cf-388dd7fdbcd4
+  HTTPsSecGroupID:
+    type: string
+    label: Resource ID
+    default: 2d4510c3-ae73-44ea-9700-b6f0a00bf7aa
+  PublicSSHSecGroupID:
+    type: string
+    label: Resource ID
+    default: 8a029c04-08ce-40f1-a705-d45a2077e27d
 
 resources:
 
-  SlurmSecGroup:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "heatslurmsecgroup"
-     rules: [ { protocol: tcp,
-               port_range_min: 12000,
-               port_range_max: 12999},
-              { protocol: tcp,
-               port_range_min: 6817,
-               port_range_max: 6819},
-              { protocol: tcp,
-               port_range_min: 1019,
-               port_range_max: 1019}]
-  NFSSecGroup:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "heatnfssecgroup"
-     rules: [ { protocol: tcp,
-               port_range_min: 2049,
-               port_range_max: 2049},
-              { protocol: tcp,
-               port_range_min: 111,
-               port_range_max: 111},
-              { protocol: udp,
-               port_range_min: 2049,
-               port_range_max: 2049},
-              { protocol: udp,
-               port_range_min: 111,
-               port_range_max: 111}]
-  MySQLSecGroup:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "heatmysqlsecgroup"
-     rules: [ { protocol: tcp,
-               port_range_min: 3306,
-               port_range_max: 3306,
-               remote_mode: "remote_group_id"} ]
-  SSHMonashSecGroup:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "SSHMonashSecGroup"
-     rules: [ { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 118.138.240.0/21
-     } ]
-#  SSHInternalSecGroup:
-#   type: "OS::Neutron::SecurityGroup"
-#   properties:
-#     name: "SSHInternalSecGroup"
-#     rules: [ { protocol: tcp,
-#               port_range_min: 22,
-#               port_range_max: 22,
-#               direction: ingress} ]
-               #remote_ip_prefix: { get_param: REMOTE_IP }, direction: ingress
-  webaccess:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "webaccess"
-     rules: [ { protocol: tcp,
-               port_range_min: 80,
-               port_range_max: 80},
-              { protocol: tcp,
-               port_range_min: 443,
-               port_range_max: 443} ]
 
   SQLNode0:
    type: "OS::Nova::Server"
@@ -113,7 +71,7 @@ resources:
     flavor: m3.xsmall
     image: { get_param: centos_7_image_id }
     key_name: { get_param: ssh_key }
-    security_groups: [ { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: MySQLSecGroup }, { get_resource: NFSSecGroup } ]
+    security_groups: [ { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: MySQLSecGroupID }, { get_param: NFSSecGroupID } ]
     metadata:
      ansible_host_groups: [ SQLNodes, NFSNodes ]
      ansible_ssh_user: ec2-user
@@ -157,10 +115,10 @@ resources:
     volume_id: { get_resource: DBVolume }
     instance_uuid: { get_resource: SQLNode0 }
 
-  MgmtNodes:
+  MgmtNodesCentos7:
    type: "OS::Heat::ResourceGroup"
    properties:
-    count: 2
+    count: 1
     resource_def:
       type: My::Server::MgmtNode
       properties:
@@ -170,10 +128,26 @@ resources:
         mynodename:
          list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmt%index%' ]]
         ssh_key: { get_param: ssh_key }
-        security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup }, { get_resource: MySQLSecGroup } ]
+        security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
         project_name: { get_param: project_name }
 
-  LoginNodes:
+  MgmtNodesU:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 1
+    resource_def:
+      type: My::Server::MgmtNode
+      properties:
+        #avz: { get_param: avz }
+        image: { get_param: ubuntu_1804_image_id }
+        ansible_ssh_user: ubuntu
+        mynodename:
+         list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmtU%index%' ]]
+        ssh_key: { get_param: ssh_key }
+        security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
+        project_name: { get_param: project_name }
+
+  LoginNodesC:
    type: "OS::Heat::ResourceGroup"
    properties:
     count: 1
@@ -186,7 +160,7 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'login%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: PublicSSHSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ LoginNodes ]
        ansible_ssh_user: ec2-user
@@ -194,6 +168,27 @@ resources:
       networks:
        - network: { get_param: NetID }
 
+  LoginNodesU:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 1
+    resource_def:
+     type: "OS::Nova::Server"
+     properties:
+      #availability_zone: { get_param: avz }
+      flavor: m3.xsmall
+      image: { get_param: ubuntu_1804_image_id }
+      key_name: { get_param: ssh_key }
+      name:
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'loginU%index%' ]]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
+      metadata:
+       ansible_host_groups: [ LoginNodes ]
+       ansible_ssh_user: ubuntu
+       project_name: { get_param: project_name }
+      networks:
+       - network: { get_param: NetID }
+
   DesktopNodes:
    type: "OS::Heat::ResourceGroup"
    properties:
@@ -207,7 +202,7 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopc%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ DesktopNodes, VisNodes, ComputeNodes ]
        ansible_ssh_user: ec2-user
@@ -215,7 +210,28 @@ resources:
       networks:
        - network: { get_param: NetID }
 
-  ComputeNodes:
+  ComputeNodesU:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 0
+    resource_def:
+     type: "OS::Nova::Server"
+     properties:
+      #availability_zone: { get_param: avz }
+      flavor: m3.xsmall
+      image: { get_param: ubuntu_1804_image_id }
+      key_name: { get_param: ssh_key }
+      name:
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computeU%index%' ]]
+      security_groups: [ default, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: SSHMonashSecGroupID } ]
+      metadata:
+       ansible_host_groups: [ ComputeNodes ]
+       ansible_ssh_user: ubuntu
+       project_name: { get_param: project_name }
+      networks:
+       - network: { get_param: NetID }
+
+  ComputeNodesCentos7:
    type: "OS::Heat::ResourceGroup"
    properties:
     count: 1
@@ -227,8 +243,8 @@ resources:
       image: { get_param: centos_7_image_id }
       key_name: { get_param: ssh_key }
       name:
-       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec7%index%' ]]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ ComputeNodes ]
        ansible_ssh_user: ec2-user
@@ -244,19 +260,40 @@ resources:
      type: "OS::Nova::Server"
      properties:
       #availability_zone: { get_param: avz }
-      flavor: m3.xsmall
+      flavor: mon.c10r35.gpu-k2
       image: { get_param: ubuntu_1804_image_id }
       key_name: { get_param: ssh_key }
       name:
-       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopu%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'gpudesktopu%index%' ]]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
-       ansible_host_groups: [ DesktopNodes ]
+       ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, VisNodes ]
        ansible_ssh_user: ubuntu
        project_name: { get_param: project_name }
       networks:
        - network: { get_param: NetID }
 
+  CentosDesktopNodes:
+   type: "OS::Heat::ResourceGroup"
+   properties:
+    count: 0
+    resource_def:
+     type: "OS::Nova::Server"
+     properties:
+      #availability_zone: { get_param: avz }
+      flavor: mon.c10r35.gpu-k2
+      image: { get_param: centos_7_image_id }
+      key_name: { get_param: ssh_key }
+      name:
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'gpudesktopc%index%' ]]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
+      metadata:
+       ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, K1, VisNodes ]
+       ansible_ssh_user: ec2-user
+       project_name: { get_param: project_name }
+      networks:
+       - network: { get_param: NetID }
+
   ComputeNodeRHEL:
    type: "OS::Heat::ResourceGroup"
    properties:
@@ -270,27 +307,10 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computerhel%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ DGXRHELNodes ]
        ansible_ssh_user: cloud-user
        project_name: { get_param: project_name }
       networks:
        - network: { get_param: NetID }
-
-#  PySSHauthz:
-#   type: "OS::Nova::Server"
-#   properties:
-#    name:
-#     list_join: [ '-', [ { get_param: "OS::stack_name" }, 'pysshautz' ]]
-#    availability_zone: { get_param: avz }
-#    flavor: m3.xsmall
-#    image: { get_param: ubuntu_1804_image_id }
-#    key_name: { get_param: ssh_key }
-#    security_groups: [ { get_resource: SSHMonashSecGroup }, { get_resource: webaccess } ]
-#    metadata:
-#     ansible_host_groups: [ PySSHauthz ]
-#     ansible_ssh_user: ubuntu
-#     project_name: { get_param: project_name }
-#    networks:
-#      - network: { get_param: NetID }
diff --git a/CICD/heat/gc_secgroups.hot b/CICD/heat/gc_secgroups.hot
new file mode 100644
index 0000000000000000000000000000000000000000..fe7d4b35f50549c7e81f7f65fc6d73ff0e9fd0f7
--- /dev/null
+++ b/CICD/heat/gc_secgroups.hot
@@ -0,0 +1,197 @@
+---
+heat_template_version: 2013-05-23
+description: # call with openstack stack [update || create ] --wait --template gc_secgroups.hot SecurityGroupStack
+
+resources:
+
+  SlurmSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatslurmsecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 12000,
+               port_range_max: 12999,
+               remote_mode: "remote_group_id"},
+              { protocol: tcp,
+               port_range_min: 6817,
+               port_range_max: 6819,
+               remote_mode: "remote_group_id"},
+              { protocol: tcp,
+               port_range_min: 1019,
+               port_range_max: 1019,
+               remote_mode: "remote_group_id"}]
+  NFSSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatnfssecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 2049,
+               port_range_max: 2049,
+               remote_mode: "remote_group_id"},
+              { protocol: tcp,
+               port_range_min: 111,
+               port_range_max: 111,
+               remote_mode: "remote_group_id"},
+              { protocol: udp,
+               port_range_min: 2049,
+               port_range_max: 2049,
+               remote_mode: "remote_group_id"},
+              { protocol: udp,
+               port_range_min: 111,
+               port_range_max: 111,
+               remote_mode: "remote_group_id"} ]
+  MySQLSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatmysqlsecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 3306,
+               port_range_max: 3306,
+               remote_mode: "remote_group_id"} ]
+  PublicSSHSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "SSHSecGroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22} ]
+  SSHMonashSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "SSHMonashSecGroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 59.191.208.0/20
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 114.30.64.0/21
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 118.138.0.0/16
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 118.139.0.0/17
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 130.194.0.0/16
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 203.0.141.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 203.6.141.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 203.23.136.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 59.191.192.0/20
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 202.158.212.32/27
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 130.194.13.96/27
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 49.127.0.0/16
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 202.58.246.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 202.94.69.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 103.35.228.0/22
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 43.246.232.0/22
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 103.35.228.0/22
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 59.191.208.0/20
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 49.127.0.0/16
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 59.191.220.0/22
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 43.246.232.0/22
+     } ]
+  HTTPsSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "HTTPsSecGroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 80,
+               port_range_max: 80},
+              { protocol: tcp,
+               port_range_min: 443,
+               port_range_max: 443} ]
diff --git a/CICD/heat/heatcicdwrapper.sh b/CICD/heat/heatcicdwrapper.sh
index 26afdebda88b5ba150f9e947b997f21e7f6b461d..ff8aa04e3307ebccad82258f487cab01e03e3f7b 100644
--- a/CICD/heat/heatcicdwrapper.sh
+++ b/CICD/heat/heatcicdwrapper.sh
@@ -8,14 +8,8 @@ function usage {
     exit 1
 }
 
-if [ "$#" -ne 2 ]; then
-    echo "Illegal number of parameters expecting 2"
-    usage
-fi
-
 STACKNAME=$2
 
-
 if [[ "$STACKNAME" == "CICD"* ]]; then
   echo "CICD found in stackname. doing nothing"
 else
@@ -83,6 +77,8 @@ case "$1" in
                    echo "I cannot update a stack which does not exist"
                    exit -45
             fi
+            openstack stack check --wait $STACKNAME
+            sleep 2
             openstack stack update --wait --template ./heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./heat/resource_registry.yaml $STACKNAME
             ret=$?
             exit $ret
@@ -90,6 +86,8 @@ case "$1" in
         create_or_update)
             if  check_stack_exists
                then
+               openstack stack check --wait $STACKNAME
+               sleep 2
                openstack stack update --wait --template ./heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./heat/resource_registry.yaml $STACKNAME
                ret=$?
                exit $ret
diff --git a/CICD/heat/resource_registry.yaml b/CICD/heat/resource_registry.yaml
index 0638b887c8c09d5d6a98f51a34d3b4eeb6e9aafb..421a309d5ce769fdaa0cfcf590fc927a4104eab1 100644
--- a/CICD/heat/resource_registry.yaml
+++ b/CICD/heat/resource_registry.yaml
@@ -1,2 +1,2 @@
 resource_registry:
-  My::Server::MgmtNode: mgmtnode_HOT.yaml
+  My::Server::MgmtNode: ./mgmtnode_HOT.yaml
diff --git a/CICD/plays/allnodes.yml b/CICD/plays/allnodes.yml
index d6eed3ef32a32d9b702b20534e2f916652c3c3e4..3245f995d793ddd598ae7d45c3637b976b1198c3 100644
--- a/CICD/plays/allnodes.yml
+++ b/CICD/plays/allnodes.yml
@@ -24,7 +24,7 @@
 #  - { role: disable_selinux, tags: [ disableselinux ] }
   - { role: etcHosts, tags: [ networking ] }
   - { role: config_repos, tags: [ repos ] }
-  - { role: upgrade }
+  - { role: upgrade, tags: [ upgrade ]}
   - { role: set_password }
 
 
@@ -46,4 +46,5 @@
   - { role: calculateKnownHosts, tags: [ calculateKnownHosts ] }
   - { role: SSHKnownHosts, tags: [ known_hosts ] }
   - { role: jasons_ssh_ca, tags: [ ssh_ca ] }
+  - { role: ntp }
   - { role: set_timezone }
diff --git a/CICD/plays/computenodes.yml b/CICD/plays/computenodes.yml
index fa6a749c96c8341915132e8cd577ba780abc5f5e..8dd5617c05bf15fd8e1d5049ef1d1db6a2fd1fd8 100644
--- a/CICD/plays/computenodes.yml
+++ b/CICD/plays/computenodes.yml
@@ -1,5 +1,6 @@
 
 - hosts: 'DesktopNodes,ComputeNodes,LoginNodes,VisNodes'
+  gather_facts: True
   vars_files: 
   - vars/passwords.yml 
   - vars/names.yml
@@ -8,10 +9,17 @@
   - vars/slurm.yml 
   - vars/vars.yml 
   tasks:
+  - include_vars: vars/passwords.yml 
+  - include_vars: vars/names.yml
+  - include_vars: vars/ldapConfig.yml
+  - include_vars: vars/filesystems.yml 
+  - include_vars: vars/slurm.yml 
+  - include_vars: vars/vars.yml 
   - { name: set use shared state, set_fact: usesharedstatedir=False }
   tags: [ always ]
 
 - hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
+  gather_facts: False
   vars_files: 
   - vars/passwords.yml 
   - vars/names.yml
@@ -24,22 +32,20 @@
   - { role: move_homedir, tags: [ authentication, filesystems ] }
   - { role: nfs-client, nfsMounts: "{{ computeNfsMounts }}", tags: [ filesystems ] }
   - { role: slurm-common, tags: [ slurm, slurm-common ] }
-  - { role: lmod, tags: [ other ] }
+  #- { role: lmod, tags: [ other ] } # actually preffered on ubuntu but mutually exclusive with environment-modules
   - { role: enable_modules, default_modules: "modulecmd", tags: [ other ] }
   - { role: postfix, tags: [ mail, other ] }
+  - { role: set_semaphore_count, tags: [ semaphore ] }
 
 - hosts: 'VisNodes'
+  gather_facts: False
   vars_files:
-  - vars/passwords.yml
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml
-  - vars/slurm.yml
   - vars/vars.yml
   roles:
   - { role: gpu, tags: [ gpu ] }
 
 - hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
+  gather_facts: False
   vars_files:
   - vars/passwords.yml
   - vars/names.yml
@@ -51,6 +57,7 @@
   - { role: slurm_config, tags: [slurm, slurm_config] }
 
 - hosts: 'DesktopNodes,ComputeNodes'
+  gather_facts: False
   vars_files:
   - vars/passwords.yml
   - vars/names.yml
@@ -61,4 +68,35 @@
   strategy: free
   roles:
   - { role: slurm-start, start_slurmd: True, tags: [ slurm, slurmstart ] }
-  #- { role: mate-de-install, tags: [ mate-de-install ] }   # TODO this crashes for everything except cmca
\ No newline at end of file
+  #- { role: mate-de-install, tags: [ mate-de-install ] }   # TODO this crashes for everything except cmca
+
+- hosts: 'K1Nodes'
+  tasks:
+  - { name: set nvidia driver version, set_fact: nvidia_version='367.130' }
+  tags: [ always ]
+
+- hosts: 'VisNodes'
+  tasks:
+  - { name: set cuda monitoring, set_fact: cudamonitor=true }
+  tags: [ always ]
+
+- hosts: 'ComputeNodes'
+  vars_files:
+  - vars/slurm.yml
+  roles:
+  - { role: slurm_config, tags: [ slurm_config, slurm ] }
+  - { role: calculateNhcConfig, tags: [ nhc, slurm ] }
+  - { role: nhc, tags: [ nhc, slurm ] }
+  - { role: slurm-start, start_slurmd: True, tags: [ slurm, slurm-start ] }
+  - { role: vncserver, tags: [ other ] }
+  - { role: jasons_ssh_ca, tags: [ other ] }
+  #- { role: extra_packages, tags: [ other, extra_packages ] } # commented because it takes forever! good enough if this gets tested on clusterbuild
+  - { role: telegraf, telegraf_install_rpm_url: 'http://consistency0/src/telegraf-1.12.6-1.x86_64.rpm', tags: [ monitoring,SiteSpecific ] }
+
+- hosts: 'VisNodes'
+  roles:
+  - { role: systemd-nvidia-uvm, tags: [ uvm,SiteSpecific ] }
+
+- hosts: 'VisNodes'
+  roles:
+  - { role: deploy-xorg, tags: [ deploy-xorg ] }
\ No newline at end of file
diff --git a/CICD/plays/mgmtnodes.yml b/CICD/plays/mgmtnodes.yml
index 5d4241194324fe13739e074b4ee749c969935dfb..50442355f2c9c1b1114a613c0280e6d8c2249a3c 100644
--- a/CICD/plays/mgmtnodes.yml
+++ b/CICD/plays/mgmtnodes.yml
@@ -29,16 +29,13 @@
 #  - { role: ldapclient, tags: [ authentication ] }
 #  - { role: ssh-password-login }
 #  - { role: enable_sudo_group }
-#  - { role: make_filesystems, volumes: "{{ glustervolumes }}" }
-#  - { role: gluster_server, volname: "gv", brickmnt: '/gbrick', gluster_servers: "{{ groups['ManagementNodes'] }}", replicas: 2, tags: [ gluster_server ]  }
-#  - { role: gluster_volcreate, volname: "gv", gluster_servers: "{{ groups['ManagementNodes'] }}", brickmnt: '/gbrick', replicas: 2 }
-#  - { role: gluster_client, volname: "gv", gluster_servers: ['mgmt0','mgmt1','sql0'], volmnt: '/glusterVolume' }
+
   - { role: nfs-client, nfsMounts: "{{ mgmtNfsMounts }}", tags: [ nfs ] }
   - { role: slurmdb-config, tags: [ slurm, slurmdb-config ] }
   - { role: slurm-common, tags: [ slurm, slurm-common ]  }
   - { role: slurm_config, tags: [ slurm, slurm-config ] }
   - { role: slurm-start, start_slurmdbd: True, start_slurmctld: True, tags: [ slurm-start ]  }
-  - { role: telegraf, tags: [ monitoring ] }
+  - { role: telegraf, tags: [ monitoring, SiteSpecific ] }
 #  - { role: provision_slurm, use_active_directory: False, lockpath: "/mnt/home", tags: [ slurm ]  }
 #  - { role: provision_homedir, use_active_directory: False, mntpt: "/mnt/home", tags: [ provisioning ] }
 
diff --git a/CICD/tests/ComputeNodes/modules.sh b/CICD/tests/ComputeNodes/modules.sh
index 4b1c6121afd2949f702bcc55d22ac47e2a2c117f..254e7cb6144babae9bc9f7771d0c6abb4de39d44 100755
--- a/CICD/tests/ComputeNodes/modules.sh
+++ b/CICD/tests/ComputeNodes/modules.sh
@@ -1,6 +1,10 @@
 #!/bin/bash
+#source /etc/profile.d/modulecmd.sh
+#source /etc/profile.d/modules.sh 
+
+#ubuntu is very picky so lets skip it
+/bin/grep Ubuntu -q /etc/issue && exit 0 
 module purge
 module load gcc/8.1.0
 module list
 gcc --version | grep 8.1.0
-
diff --git a/CICD/tests/all/check.yml b/CICD/tests/all/check.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fd95357394f0c3d582043aace19cd80b3dad9dd5
--- /dev/null
+++ b/CICD/tests/all/check.yml
@@ -0,0 +1,16 @@
+---
+- hosts: ManagementNodes
+  gather_facts: false
+  tasks:
+  - name: have ssh running
+    service:
+      name: sshd
+      state: started
+
+- hosts: ComputeNodes
+  gather_facts: false
+  tasks:
+  - name: have munge service running
+    service:
+      name: munge
+      state: started
\ No newline at end of file
diff --git a/CICD/tests/run_tests.sh b/CICD/tests/run_tests.sh
index d063e98d1d7e4617882bb14a5e1c51d9e8cda381..bfb8278ee1a8f2a8534236990d6ac11455ebb7e7 100644
--- a/CICD/tests/run_tests.sh
+++ b/CICD/tests/run_tests.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 
 function usage {
-    echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql}" INVENTORY_FILE KEY
+    echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql, slurm}" INVENTORY_FILE KEY
     exit 1
 }
 
@@ -23,22 +23,4 @@ function run_them ()
     done
 }
 
-# I think I am just checking the if $1 is one of the listes strings (see usage) not proud of this at all but works
-case "$1" in
-        all)
-        ;;
-        ComputeNodes)
-        ;;
-        ManagementNodes)
-        ;;
-        NFSNodes)
-        ;;
-        SQLNodes)
-        ;;
-        LoginNodes)
-        ;;
-        *)
-        usage
-esac
-
 run_them $1 $2 $3
\ No newline at end of file
diff --git a/CICD/tests/slurm/srunHostname.yml b/CICD/tests/slurm/srunHostname.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4f05fb580a3c0fec10e06026779cb0f0deeb04f3
--- /dev/null
+++ b/CICD/tests/slurm/srunHostname.yml
@@ -0,0 +1,55 @@
+---
+- hosts: ManagementNodes,LoginNodes,ComputeNodes
+  gather_facts: false
+  tasks:
+  - name: add user hpctest
+    user:
+      name: hpctest
+      shell: /bin/bash
+    become: true
+
+- hosts: ManagementNodes
+  gather_facts: false
+  tasks:
+  - name: Create a parent account
+    command: ./sacctmgr -i add account parentAccount cluster=cicd Description="Test parent account" Organization="Monash"
+    args:
+      chdir: '/opt/slurm-latest/bin'
+    become: true
+    register: result 
+    failed_when: result.rc != 0 and result.stdout != " Nothing new added."
+    
+  - name: Create a project associated with a given parent
+    command: ./sacctmgr -i add account testProject parent=parentAccount cluster=cicd Organization="Monash"
+    args:
+      chdir: '/opt/slurm-latest/bin'
+    become: true
+    register: result 
+    failed_when: result.rc != 0 and result.stdout != " Nothing new added."
+    
+  - name: Create a user and associate them with a project
+    command: ./sacctmgr -i create user hpctest cluster=cicd account=testProject partition=batch
+    args:
+      chdir: '/opt/slurm-latest/bin'
+    become: true 
+    register: result 
+    failed_when: result.rc != 0 and result.stdout != " Nothing new added."
+
+#sudo `which sacctmgr` modify user where name=hpctest set maxjobs=200
+##  18  sudo `which sacctmgr` update account hpctest set qos=normal
+#   22  sudo `which sacctmgr` update account testProject set qos=normal
+
+- hosts: LoginNodes
+  gather_facts: false
+  tasks:
+  - name: make sure munge is running
+    service:
+      name: munge
+      state: started
+    become: true
+  - name: simple srun test
+    command: ./srun --ntasks=1  --partition=batch  hostname
+    args:
+      chdir: '/opt/slurm-latest/bin'
+    become: true       
+    become_user: hpctest
diff --git a/CICD/vars/names.yml b/CICD/vars/names.yml
index fa7063762a3477f082cd454fce0101dbcb8a0bbc..f3142ad8e778d6b6426f53c8af66cbd0f6fb2094 100644
--- a/CICD/vars/names.yml
+++ b/CICD/vars/names.yml
@@ -1,3 +1,3 @@
 ---
-domain: massive.org.au
+domain: cicd.test.au
 smtp_smarthost: smtp.monash.edu.au
diff --git a/CICD/vars/slurm.yml b/CICD/vars/slurm.yml
index 65def4d949685d32b7f6b705a6390c9a6dfdab2a..b58c6162058d4571f6f2709e1b6083974c66e000 100644
--- a/CICD/vars/slurm.yml
+++ b/CICD/vars/slurm.yml
@@ -1,10 +1,10 @@
 ---
 desktopNodeList:
   - { name : 'DesktopNodes', interface : 'eth0' }
-clustername: "m3"
-projectname: "m3"
-slurm_version: 19.05.3-2
-munge_version: 0.5.11
+clustername: "cicd"
+projectname: "cicd"
+slurm_version: 19.05.4
+munge_version: 0.5.13
 nhc_version: 1.4.2
 munge_dir: /opt/munge-{{ munge_version }}
 slurm_dir: /opt/slurm-{{ slurm_version }}
diff --git a/CICD/vars/vars.yml b/CICD/vars/vars.yml
index 83485426b7e370a91d2fd15a5083156c483a1f4e..7def1ce714e85c7a5325fa6ccfe5ce3a9141a508 100644
--- a/CICD/vars/vars.yml
+++ b/CICD/vars/vars.yml
@@ -1,7 +1,7 @@
 ---
 sudo_group: systems
 nagios_home: "/var/lib/nagios"
-nvidia_version: "390.46"
+nvidia_version: "367.134"
 
 yumdisablerepo: 
  - 'base'
@@ -16,6 +16,7 @@ yumenablerepo:
 
 gpumap:
  'K1': 'K1'
+ 'K2': 'K2'
  'K80': 'K80'
  'P100-PCIE-16GB': 'P100'
  'V100-PCIE-16GB': 'V100'
diff --git a/buildCert.yml b/buildCert.yml
deleted file mode 100644
index eb6a72f3154d9632effe3b54a6c008ecf1b836c1..0000000000000000000000000000000000000000
--- a/buildCert.yml
+++ /dev/null
@@ -1,88 +0,0 @@
---- 
-- name: "Check client ca certificate"
-  register: ca_cert
-  stat: "path={{ x509_cacert_file }}"
-
-- name: "Check certificate and key"
-  shell: (openssl x509 -noout -modulus -in {{ x509_cert_file }}  | openssl md5 ; openssl rsa -noout -modulus -in {{ x509_key_file }} | openssl md5) | uniq | wc -l
-  register: certcheck
-
-- name: "Check certificate"
-  register: cert
-  stat: "path={{ x509_cert_file }}"
-
-- name: "Check key"
-  register: key
-  stat: "path={{ x509_key_file }}"
-  become: true
-
-- name: "Default: we don't need a new certificate"
-  set_fact: needcert=False
-
-- name: "Set need cert if key is missing"
-  set_fact: needcert=True
-  when: key.stat.exists == false
-
-- name: "set needcert if cert is missing"
-  set_fact: needcert=True
-  when: cert.stat.exists == false
-
-- name: "set needcert if cert doesn't match key"
-  set_fact: needcert=True
-  when: certcheck.stdout == '2'
-
-
-- name: "Creating Keypair"
-  shell: "echo noop when using easy-rsa"
-  when: needcert
-
-- name: "Creating CSR"
-  shell: " cd /etc/easy-rsa/2.0; source ./vars; export EASY_RSA=\"${EASY_RSA:-.}\"; \"$EASY_RSA\"/pkitool --csr {{ x509_csr_args }} {{ common_name }}"
-  when: needcert
-  become: true
-
-- name: "Copy CSR to ansible host"
-  fetch: "src=/etc/easy-rsa/2.0/keys/{{ common_name }}.csr dest=/tmp/{{ common_name }}/ fail_on_missing=yes validate_md5=yes flat=yes"
-  become: true
-  when: needcert
-
-- name: "Copy CSR to CA"
-  delegate_to: "{{ x509_ca_server }}"
-  copy: "src=/tmp/{{ ansible_fqdn }}/{{ common_name }}.csr dest=/etc/easy-rsa/2.0/keys/{{ common_name }}.csr force=yes"
-  when: needcert
-  become: true
-
-- name: "Sign Certificate"
-  delegate_to: "{{ x509_ca_server }}"
-  shell:    "source ./vars; export EASY_RSA=\"${EASY_RSA:-.}\" ;\"$EASY_RSA\"/pkitool --sign {{ common_name }}"
-  args:
-    chdir: "/etc/easy-rsa/2.0"
-  become: true
-  when: needcert
-
-- name: "Copy the Certificate to ansible host"
-  delegate_to: "{{ x509_ca_server }}"
-  fetch: "src=/etc/easy-rsa/2.0/keys/{{ common_name }}.crt dest=/tmp/{{ common_name }}/ fail_on_missing=yes validate_md5=yes flat=yes"
-  become: true
-  when: needcert
-
-- name: "Copy the CA Certificate to the ansible host"
-  delegate_to: "{{ x509_ca_server }}"
-  fetch: "src=/etc/easy-rsa/2.0/keys/ca.crt dest=/tmp/ca.crt fail_on_missing=yes validate_md5=yes flat=yes"
-  become: true
-  when: "ca_cert.stat.exists == false"
-
-- name: "Copy the certificate to the node"
-  copy: "src=/tmp/{{ common_name }}/{{ common_name }}.crt dest={{ x509_cert_file }} force=yes"
-  become: true
-  when: needcert
-
-- name: "Copy the CA certificate to the node"
-  copy: "src=/tmp/ca.crt dest={{ x509_cacert_file }}"
-  become: true
-  when: "ca_cert.stat.exists == false"
-
-- name: "Copy the key to the correct location"
-  shell: "mkdir -p `dirname {{ x509_key_file }}` ; chmod 700 `dirname {{ x509_key_file }}` ; cp /etc/easy-rsa/2.0/keys/{{ common_name }}.key {{ x509_key_file }}"
-  become: true
-  when: needcert
diff --git a/buildKaraage3.x.yml b/buildKaraage3.x.yml
deleted file mode 100644
index fcd336022770c1aace87d490ab52404741fb7bdd..0000000000000000000000000000000000000000
--- a/buildKaraage3.x.yml
+++ /dev/null
@@ -1,216 +0,0 @@
----
--
-  hosts: ldap-server
-  pre_tasks:
-    - sysctl: name=kernel.hostname value={{ inventory_hostname }} state=present
-      ignore_errors: yes
-    - service: name=network state=restarted
-      when: ansible_os_family == 'RedHat'
-  roles:
-    - etcHosts
-    - easy-rsa-CA
-    - easy-rsa-certificate
-    - ldapserver
-  become: true
-  vars:
-   - x509_ca_server: "{% for host in groups['ldap-server'] %}{{ hostvars[host]['ansible_fqdn'] }}{% endfor %}"
-   - countryName: "AU"
-   - reginalName: "Victoria"
-   - cityName: "Melbourne"
-   - organizationName: "Monash University"
-   - emailAddress: "shahaan@gmail.com"
-   - organizationUnit: "defaultUnit"
-   - ldapDomain: "dc=monash,dc=edu,dc=au"
-   - ldapManager: "cn=Manager,dc=monash,dc=edu,dc=au"
-   - ldapBindDN: "cn=ldapuser,ou=users,dc=monash,dc=edu,dc=au"
-   - ldapUserBase: "ou=users,dc=monash,dc=edu,dc=au"
-   - ldapGroupBase: "ou=groups,dc=monash,dc=edu,dc=au"
-   - ldapBase: "dc=monash,dc=edu,dc=au"
-   - ldapURI: "{% for host in groups['ldap-server'] %}ldaps://{{ hostvars[host]['ansible_fqdn'] }}{% endfor %}"
-   - smtp_smarthost: "{{ ansible_hostname }}"
-   - ldapManagerPassword: "imldap"
-   - ldapBindDNPassword: "imbinddn"
-   - domain: ""
-   - karaage_sql_password: "imkaraage"
-   - mysql_root_password: "immysql"
-   - x509_key_file: "/etc/ssl/private/server.key"
-   - x509_cert_file: "/etc/ssl/certs/server.crt"
-   - x509_cacert_file: "/etc/ssl/certs/ca.crt"
-   - x509_csr_args: ""
-   - x509_sign_args: "{{ x509_csr_args }}"
-   - x509_common_name: "{{ inventory_hostname }}"
-- 
-  hosts: karaage-server
-  pre_tasks:
-    - sysctl: name=kernel.hostname value={{ inventory_hostname }} state=present
-      ignore_errors: yes
-    - service: name=network state=restarted
-      when: ansible_os_family == 'RedHat'
-  roles:
-    - etcHosts
-    - easy-rsa-certificate
-    - karaage3.1.17
-    - shibboleth-sp
-  become: true
-  vars:
-   - x509_ca_server: "{% for host in groups['ldap-server'] %}{{ hostvars[host]['ansible_fqdn'] }}{% endfor %}"
-   - countryName: "AU"
-   - reginalName: "Victoria"
-   - cityName: "Melbourne"
-   - organizationName: "Monash University"
-   - emailAddress: "shahaan@gmail.com"
-   - organizationUnit: "defaultUnit"
-   - ldapDomain: "dc=monash,dc=edu,dc=au"
-   - ldapManager: "cn=Manager,dc=monash,dc=edu,dc=au"
-   - ldapBindDN: "cn=ldapuser,ou=users,dc=monash,dc=edu,dc=au"
-   - ldapUserBase: "ou=users,dc=monash,dc=edu,dc=au"
-   - ldapGroupBase: "ou=groups,dc=monash,dc=edu,dc=au"
-   - ldapBase: "dc=monash,dc=edu,dc=au"
-   - ldapURI: "{% for host in groups['ldap-server'] %}ldaps://{{ hostvars[host]['ansible_fqdn'] }}{% endfor %}"
-   - smtp_smarthost: "{{ ansible_hostname }}"
-   - ldapManagerPassword: "imldap"
-   - ldapBindDNPassword: "imbinddn"
-   - domain: ""
-   - karaage_sql_password: "imkaraage"
-   - mysql_root_password: "immysql"
-   - x509_key_file: "/etc/ssl/private/server.key"
-   - x509_cert_file: "/etc/ssl/certs/server.crt"
-   - x509_cacert_file: "/etc/ssl/certs/ca.crt"
-   - x509_csr_args: ""
-   - x509_sign_args: "{{ x509_csr_args }}"
-   - x509_common_name: "{{ inventory_hostname }}"
-   - aaf_federation_url: "https://ds.test.aaf.edu.au/distribution/metadata"
-   - aaf_discovery_url: "https://ds.test.aaf.edu.au/discovery/DS"
-   - admin_email: "shahaan@gmail.com"
-   - aaf_metadata_xml: '<EntityDescriptor entityID="https://vm-118-138-241-159.erc.monash.edu.au/shibboleth" xmlns="urn:oasis:names:tc:SAML:2.0:metadata" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:shibmd="urn:mace:shibboleth:metadata:1.0" xmlns:ds="http://www.w3.org/2000/09/xmldsig#" xsi:schemaLocation="urn:oasis:names:tc:SAML:2.0:metadata saml-schema-metadata-2.0.xsd urn:mace:shibboleth:metadata:1.0 shibboleth-metadata-1.0.xsd http://www.w3.org/2000/09/xmldsig# xmldsig-core-schema.xsd">
-  <SPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
-    <Extensions>
-      <dsr:DiscoveryResponse xmlns:dsr="urn:oasis:names:tc:SAML:profiles:SSO:idp-discovery-protocol" Binding="urn:oasis:names:tc:SAML:profiles:SSO:idp-discovery-protocol" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/Login" index="0" isDefault="true" />
-    </Extensions>
-    <KeyDescriptor use="signing">
-      <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
-        <ds:X509Data>
-          <ds:X509Certificate>
-MIIFDDCCA/SgAwIBAgIJALO1/Blx64tvMA0GCSqGSIb3DQEBCwUAMIG0MQswCQYD
-VQQGEwJBVTEMMAoGA1UECBMDVklDMRIwEAYDVQQHEwlNZWxib3VybmUxDTALBgNV
-BAoTBE1lUkMxETAPBgNVBAsTCG9wZW5sZGFwMS0wKwYDVQQDEyR2bS0xMTgtMTM4
-LTI0MS0xNTkuZXJjLm1vbmFzaC5lZHUuYXUxEDAOBgNVBCkTB0Vhc3lSU0ExIDAe
-BgkqhkiG9w0BCQEWEXNoYWhhYW5AZ21haWwuY29tMB4XDTE1MDMyMzEyMjYzOFoX
-DTI1MDMyMDEyMjYzOFowgbQxCzAJBgNVBAYTAkFVMQwwCgYDVQQIEwNWSUMxEjAQ
-BgNVBAcTCU1lbGJvdXJuZTENMAsGA1UEChMETWVSQzERMA8GA1UECxMIb3Blbmxk
-YXAxLTArBgNVBAMTJHZtLTExOC0xMzgtMjQxLTE1OS5lcmMubW9uYXNoLmVkdS5h
-dTEQMA4GA1UEKRMHRWFzeVJTQTEgMB4GCSqGSIb3DQEJARYRc2hhaGFhbkBnbWFp
-bC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTcsIqn/HKgeRK
-gj4rXYu8V/kTkv63d2Rtmv6zSlRwtjKBCvePEo/4ZpwOK235kBfX9KZKU9wlyFhf
-DdmOvIBYvhrLqtIYNfMWLt8iUFkdt2N/dNmftu7WUXuZezsRXMqbPG7dLjMLyJ7D
-7UCox1IB2SYzHx0K9w7PtCleV5A/o9Eg/7G8/FvOCB5askY/YywzEWLrxIYYn6Cr
-Gsioh5hXxac9p3KuO6dvbMLIMHVZ4u7mbLrdp/e6TZTlyZN+Tfbjta0VYBw0beuS
-KpwZc8Toow2B22O3K15o6tr0nvVSTEj2Qrd+LPolFSFBKVaD+9G/i0FMLHNOuQVP
-Cw/62vEnAgMBAAGjggEdMIIBGTAdBgNVHQ4EFgQUouRhu/Wc+jU1rfUd+kiqbtg/
-q3cwgekGA1UdIwSB4TCB3oAUouRhu/Wc+jU1rfUd+kiqbtg/q3ehgbqkgbcwgbQx
-CzAJBgNVBAYTAkFVMQwwCgYDVQQIEwNWSUMxEjAQBgNVBAcTCU1lbGJvdXJuZTEN
-MAsGA1UEChMETWVSQzERMA8GA1UECxMIb3BlbmxkYXAxLTArBgNVBAMTJHZtLTEx
-OC0xMzgtMjQxLTE1OS5lcmMubW9uYXNoLmVkdS5hdTEQMA4GA1UEKRMHRWFzeVJT
-QTEgMB4GCSqGSIb3DQEJARYRc2hhaGFhbkBnbWFpbC5jb22CCQCztfwZceuLbzAM
-BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQDFKPmj1TGpUZsdviOwMjU/
-IHqZ+3RwFcvkfBu8JmwxaO86GrC1mwZyQExvQLQF6LLaGHyVlZa3PxUkmcqq1for
-ZcYYyVRip4fgtOI6WcKg+nWI9+rDX5fU5gZAYm3er4MNZ/R7sTmgHEemOcuSiatQ
-hDoUkv9GOZKoxw4uJJq/yUumAkziAIuMWoTHYrR9cqOkoKQiFUjqmhI3m4phtoV4
-OaeVf3hkhXakbk1OkAAAzPxsrpAaUM5eLC75SV5Hopid9ltpFjpD457TXKdE+IyB
-oBDUnCaHSkrDmbeX6iSUHLWjjcOs0MI0UOXH+XNKNR3kUUvS+0ZCwRIPXc11/AFN
-</ds:X509Certificate>
-        </ds:X509Data>
-      </ds:KeyInfo>
-    </KeyDescriptor>
-    <KeyDescriptor use="encryption">
-      <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
-        <ds:X509Data>
-          <ds:X509Certificate>
-MIIFDDCCA/SgAwIBAgIJALO1/Blx64tvMA0GCSqGSIb3DQEBCwUAMIG0MQswCQYD
-VQQGEwJBVTEMMAoGA1UECBMDVklDMRIwEAYDVQQHEwlNZWxib3VybmUxDTALBgNV
-BAoTBE1lUkMxETAPBgNVBAsTCG9wZW5sZGFwMS0wKwYDVQQDEyR2bS0xMTgtMTM4
-LTI0MS0xNTkuZXJjLm1vbmFzaC5lZHUuYXUxEDAOBgNVBCkTB0Vhc3lSU0ExIDAe
-BgkqhkiG9w0BCQEWEXNoYWhhYW5AZ21haWwuY29tMB4XDTE1MDMyMzEyMjYzOFoX
-DTI1MDMyMDEyMjYzOFowgbQxCzAJBgNVBAYTAkFVMQwwCgYDVQQIEwNWSUMxEjAQ
-BgNVBAcTCU1lbGJvdXJuZTENMAsGA1UEChMETWVSQzERMA8GA1UECxMIb3Blbmxk
-YXAxLTArBgNVBAMTJHZtLTExOC0xMzgtMjQxLTE1OS5lcmMubW9uYXNoLmVkdS5h
-dTEQMA4GA1UEKRMHRWFzeVJTQTEgMB4GCSqGSIb3DQEJARYRc2hhaGFhbkBnbWFp
-bC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTcsIqn/HKgeRK
-gj4rXYu8V/kTkv63d2Rtmv6zSlRwtjKBCvePEo/4ZpwOK235kBfX9KZKU9wlyFhf
-DdmOvIBYvhrLqtIYNfMWLt8iUFkdt2N/dNmftu7WUXuZezsRXMqbPG7dLjMLyJ7D
-7UCox1IB2SYzHx0K9w7PtCleV5A/o9Eg/7G8/FvOCB5askY/YywzEWLrxIYYn6Cr
-Gsioh5hXxac9p3KuO6dvbMLIMHVZ4u7mbLrdp/e6TZTlyZN+Tfbjta0VYBw0beuS
-KpwZc8Toow2B22O3K15o6tr0nvVSTEj2Qrd+LPolFSFBKVaD+9G/i0FMLHNOuQVP
-Cw/62vEnAgMBAAGjggEdMIIBGTAdBgNVHQ4EFgQUouRhu/Wc+jU1rfUd+kiqbtg/
-q3cwgekGA1UdIwSB4TCB3oAUouRhu/Wc+jU1rfUd+kiqbtg/q3ehgbqkgbcwgbQx
-CzAJBgNVBAYTAkFVMQwwCgYDVQQIEwNWSUMxEjAQBgNVBAcTCU1lbGJvdXJuZTEN
-MAsGA1UEChMETWVSQzERMA8GA1UECxMIb3BlbmxkYXAxLTArBgNVBAMTJHZtLTEx
-OC0xMzgtMjQxLTE1OS5lcmMubW9uYXNoLmVkdS5hdTEQMA4GA1UEKRMHRWFzeVJT
-QTEgMB4GCSqGSIb3DQEJARYRc2hhaGFhbkBnbWFpbC5jb22CCQCztfwZceuLbzAM
-BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQDFKPmj1TGpUZsdviOwMjU/
-IHqZ+3RwFcvkfBu8JmwxaO86GrC1mwZyQExvQLQF6LLaGHyVlZa3PxUkmcqq1for
-ZcYYyVRip4fgtOI6WcKg+nWI9+rDX5fU5gZAYm3er4MNZ/R7sTmgHEemOcuSiatQ
-hDoUkv9GOZKoxw4uJJq/yUumAkziAIuMWoTHYrR9cqOkoKQiFUjqmhI3m4phtoV4
-OaeVf3hkhXakbk1OkAAAzPxsrpAaUM5eLC75SV5Hopid9ltpFjpD457TXKdE+IyB
-oBDUnCaHSkrDmbeX6iSUHLWjjcOs0MI0UOXH+XNKNR3kUUvS+0ZCwRIPXc11/AFN
-</ds:X509Certificate>
-        </ds:X509Data>
-      </ds:KeyInfo>
-    </KeyDescriptor>
-    <ContactPerson contactType="technical">
-      <Company>Monash University</Company>
-      <GivenName>Shahaan</GivenName>
-      <SurName>Ayyub</SurName>
-      <EmailAddress>mailto:shahaan.ayyub@monash.edu</EmailAddress>
-    </ContactPerson>
-    <SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Artifact" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/SLO/Artifact" />
-    <SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/SLO/POST" />
-    <SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:SOAP" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/SLO/SOAP" />
-    <SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/SLO/Redirect" />
-    <ManageNameIDService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/NIM/POST" />
-    <ManageNameIDService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/NIM/Redirect" />
-    <ManageNameIDService Binding="urn:oasis:names:tc:SAML:2.0:bindings:SOAP" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/NIM/SOAP" />
-    <ManageNameIDService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Artifact" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/NIM/Artifact" />
-    <NameIDFormat>urn:oasis:names:tc:SAML:2.0:nameid-format:transient</NameIDFormat>
-    <AssertionConsumerService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Artifact" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/SAML2/Artifact" index="3" isDefault="false" />
-    <AssertionConsumerService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Location="https://vm-118-138-241-159.erc.monash.edu.au/Shibboleth.sso/SAML2/POST" index="1" isDefault="true" />
-    <AttributeConsumingService index="1" isDefault="false">
-      <ServiceName xml:lang="en">vm-118-138-241-159.erc.monash.edu.au</ServiceName>
-      <RequestedAttribute NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" Name="urn:oid:2.5.4.3" FriendlyName="commonName" isRequired="true" />
-      <RequestedAttribute NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" Name="urn:oid:0.9.2342.19200300.100.1.3" FriendlyName="email" isRequired="true" />
-      <RequestedAttribute NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" Name="urn:oid:2.5.4.42" FriendlyName="givenName" isRequired="false" />
-      <RequestedAttribute NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" Name="urn:oid:2.5.4.4" FriendlyName="surname" isRequired="true" />
-    </AttributeConsumingService>
-  </SPSSODescriptor>
-  <Organization>
-    <OrganizationName xml:lang="en">monash.edu.au</OrganizationName>
-    <OrganizationDisplayName xml:lang="en">Monash University</OrganizationDisplayName>
-    <OrganizationURL xml:lang="en">https://manager.aaf.edu.au/support</OrganizationURL>
-  </Organization>
-</EntityDescriptor>'
-   - aaf_metadata_cert: '-----BEGIN CERTIFICATE-----
-MIIEbDCCA1SgAwIBAgIESWrmGDANBgkqhkiG9w0BAQUFADCB9zEQMA4GA1UEBhMH
-VW5rbm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4G
-A1UEChMHVW5rbm93bjFaMFgGA1UECxNRb3BlbnNzbCB4NTA5IC1vdXQgbWV0YWRh
-dGEtY2VydC5wZW0gLW91dGZvcm0gcGVtIC1pbiBtZXRhZGF0YS1kZXIuY3J0IC1p
-bmZvcm0gZGVyMVEwTwYDVQQDDEhrZXl0b29sIC1rZXlzdG9yZSBrZXlzdG9yZS5r
-cyAtZXhwb3J0IC1hbGlhcyBtZXRhZGF0YSA+IG1ldGFkYXRhLWRlci5jcnQwHhcN
-MDkwMTEyMDY0MTI4WhcNMTQwMTExMDY0MTI4WjCB9zEQMA4GA1UEBhMHVW5rbm93
-bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UEChMH
-VW5rbm93bjFaMFgGA1UECxNRb3BlbnNzbCB4NTA5IC1vdXQgbWV0YWRhdGEtY2Vy
-dC5wZW0gLW91dGZvcm0gcGVtIC1pbiBtZXRhZGF0YS1kZXIuY3J0IC1pbmZvcm0g
-ZGVyMVEwTwYDVQQDDEhrZXl0b29sIC1rZXlzdG9yZSBrZXlzdG9yZS5rcyAtZXhw
-b3J0IC1hbGlhcyBtZXRhZGF0YSA+IG1ldGFkYXRhLWRlci5jcnQwggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZgh/InL2LixNtzuA+dNXSn19/W4IMbD6+
-Zzysk/jMi4Sgr4FrEfMeTi2G2/rpf32TeSG1P4MZqqyy5yuhNX7RQTFSZyl5D9cs
-98dE7FY/g7uySGv7oao1rkJfEmFmcZQIvRkLs89PQqKok2/m807DnzF1zCAt+YcY
-wqHyXyTrzxr4hMDDB2Ij8PeDZeSIB3s/CK2F6hIg13VeYEZjAWf4KPwsOteuzR4Y
-uuuGDlNFjcJGu+97N4LTnOBb6uW8qNtAAq6UWtA28A4KQejrzBZrfBGPLGbe6KHs
-WrziN2uk8kEY1TQw0cp+Am/ph8nl00KU+oVrswjS8oUklL98C5LnAgMBAAEwDQYJ
-KoZIhvcNAQEFBQADggEBAEy0xLMJBneC+DQ0cSNH3kXaW9cdqzsoD/UawJHaDqIJ
-UjIslR38p5H3pRQ7rZ1+c7z0lUaBqQO/i+MZUEMHCpbhEcZK0Ep5dlWc80DFGSxS
-ItbghQ5loS4JOgKYZZdRSzCxV3PAqlzqXoZrFeaeJL7xFIRglpphN06joOlX0zQM
-0iN8qn7oTTaR3U2Kxkh6NQ2qTH3IvP71YJnjSzljqZHFughhTpl8cA8i9ijcmeyP
-Y5TYJTbtwQ0X+435LTX8xxW/B4E8XnH7iEOykvfZMYxt5cSrtzF1eAMQ/ln2r54O
-bk0oX1BGue0XcgeMObQrs/eC+2uspENHKtUdYDU0OK4=
------END CERTIFICATE-----'
diff --git a/createNode b/createNode
deleted file mode 100644
index 779ebebe678008647f49ab17d56a89398188be10..0000000000000000000000000000000000000000
--- a/createNode
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python
-import sys, os, string, subprocess, socket, ansible.runner, re
-import copy, shlex,uuid, random, multiprocessing, time, shutil
-import novaclient.v1_1.client as nvclient
-import novaclient.exceptions as nvexceptions
-import glanceclient.v2.client as glclient
-import keystoneclient.v2_0.client as ksclient
-
-class Authenticate:
-	
-	def __init__(self, username, passwd):
-		self.username=username
-		self.passwd=passwd
-		self.tenantName= os.environ['OS_TENANT_NAME']
-		self.authUrl="https://keystone.rc.nectar.org.au:5000/v2.0"
-		kc = ksclient.Client(   auth_url=self.authUrl,
-					username=self.username,
-					password=self.passwd)
-		self.tenantList=kc.tenants.list()
-		self.novaSemaphore = multiprocessing.BoundedSemaphore(value=1)
-	
-	def createNovaObject(self,tenantName):
-		for tenant in self.tenantList:
-			if tenant.name == tenantName:
-				try:
-					nc = nvclient.Client(	auth_url=self.authUrl,
-						username=self.username,
-						api_key=self.passwd,
-						project_id=tenant.name,
-						tenant_id=tenant.id,
-						service_type="compute"
-						)
-					return nc
-				except nvexceptions.ClientException:
-					raise
-	
-	def gatherInfo(self):
-
-		for tenant in self.tenantList: print tenant.name
-		tenantName = raw_input("Please select a project: (Default MCC-On-R@CMON):")
-		if not tenantName or tenantName not in [tenant.name for tenant in self.tenantList]: 
-			tenantName = "MCC_On_R@CMON"
-		print tenantName,"selected\n"
-		
-		## Fetch the Nova Object
-
-		nc = self.createNovaObject(tenantName)
-		
-		## Get the Flavor
-		flavorList = nc.flavors.list()
-		for flavor in flavorList: print flavor.name
-		flavorName = raw_input("Please select a Flavor Name: (Default m1.xxlarge):")
-		if not flavorName or flavorName not in [flavor.name for flavor in flavorList]:
-			flavorName = "m1.xxlarge"
-		print flavorName,"selected\n"
-
-		
-		## Get the Availability Zones
-		az_p1 = subprocess.Popen(shlex.split\
-		("nova availability-zone-list"),stdout=subprocess.PIPE)
-		az_p2 = subprocess.Popen(shlex.split\
-		("""awk '{if ($2 && $2 != "Name")print $2}'"""),\
-		stdin=az_p1.stdout,stdout=subprocess.PIPE)
-		availabilityZonesList =  subprocess.Popen(shlex.split\
-		("sort"),stdin=az_p2.stdout,stdout=subprocess.PIPE).communicate()[0]
-		print  availabilityZonesList
-		availabilityZone = raw_input("Please select an availability zone: (Default monash-01):")
-		if not availabilityZone or \
-		availabilityZone not in [ zone for zone in availabilityZonesList.split()]:
-			availabilityZone = "monash-01"
-		print availabilityZone,"selected\n"
-		
-		## Get the number of instances to spawn
-		numberOfInstances = raw_input\
-		("Please specify the number of instances to launch: (Default 1):")
-		if not numberOfInstances or \
-		not isinstance(int(numberOfInstances), int):
-			numberOfInstances = 1
-		subprocess.call(['clear'])
-		flavorObj = nc.flavors.find(name=flavorName)
-		print "Creating",numberOfInstances,\
-		"instance(s) in",availabilityZone,"zone..."
-		instanceList = []
-		for counter in range(0,int(numberOfInstances)):
-			nodeName = "MCC-Node"+str(random.randrange(1,1000))
-			try:
-				novaInstance =  nc.servers.create\
-				(name=nodeName,image="ddc13ccd-483c-4f5d-a5fb-4b968aaf385b",\
-				flavor=flavorObj,key_name="shahaan",\
-				availability_zone=availabilityZone)
-				instanceList.append(novaInstance)
-			except nvexceptions.ClientException:
-				raise
-				continue
-				
-		while 'BUILD' in [novaInstance.status \
-		for novaInstance in instanceList]:
-			for count in range(0,len(instanceList)):
-				time.sleep(5)
-				if instanceList[count].status != 'BUILD': 
-					continue
-				else:
-					try:
-						instanceList[count] = nc.servers.get(instanceList[count].id)
-					except nvexceptions.ClientException or \
-					nvexceptions.ConnectionRefused or \
-					nvexceptions.InstanceInErrorState:
-						raise
-						del instanceList[count]
-						continue
-		activeHostsList = []
-		SSHports = []
-		for novaInstance in instanceList:
-			if novaInstance.status == 'ACTIVE':
-				hostname = socket.gethostbyaddr(novaInstance.networks.values()[0][0])[0]
-				activeHostsList.append(hostname)
-				SSHDict = {}
-				SSHDict['IP'] = novaInstance.networks.values()[0][0]
-				SSHDict['status'] = 'CLOSED'
-				SSHports.append(SSHDict) 
-		print "Scanning if port 22 is open..."
-		sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-		while 'CLOSED' in [host['status'] for host in SSHports]:
-			for instance in range(0,len(SSHports)):
-				sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-				if SSHports[instance]['status'] == 'CLOSED' and not sock.connect_ex((SSHports[instance]['IP'], 22)):
-					SSHports[instance]['status'] = 'OPEN'
-					print "Port 22, opened for IP:",SSHports[instance]['IP']
-				else:
-					time.sleep(5)
-				sock.close()
-				
-		fr = open('/etc/ansible/hosts.rpmsave','r+')
-		fw = open('hosts.temp','w+')
-		lines = fr.readlines()
-		for line in lines:
-			fw.write(line)
-			if re.search('\[new-servers\]',line):
-				for host in activeHostsList: fw.write(host+'\n')
-		fr.close()
-		fw.close()
-		shutil.move('hosts.temp','/etc/ansible/hosts')
-		print "Building the Nodes now..."
-		subprocess.call(shlex.split("/mnt/nectar-nfs/root/swStack/ansible/bin/ansible-playbook /mnt/nectar-nfs/root/ansible-config-root/mcc-nectar-dev/buildNew.yml -v"))	
-
-if __name__ == "__main__":
-	username = os.environ['OS_USERNAME']
-	passwd = os.environ['OS_PASSWORD']
-	choice = raw_input(username + " ? (y/n):")
-	while choice and choice not in ("n","y"):
-		print "y or n please"
-		choice = raw_input()
-	if choice == "n":
-		username = raw_input("username :")
-		passwd = raw_input("password :")
-	auth = Authenticate(username, passwd)
-	auth.gatherInfo()
diff --git a/dynamicInventory-mcc2 b/dynamicInventory-mcc2
deleted file mode 100755
index dd761641e840f69e8c20ecf3d19965069f4a3e61..0000000000000000000000000000000000000000
--- a/dynamicInventory-mcc2
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/env python
-import sys, os, string, socket, re
-import shlex, multiprocessing, time, shutil, json
-from novaclient import client as nvclient
-import novaclient.exceptions as nvexceptions
-import keystoneclient.v2_0.client as ksclient
-from joblib import Parallel, delayed
-from multiprocessing import Process, Manager, Pool
-from libnmap.process import NmapProcess
-from libnmap.parser import NmapParser, NmapParserException
-
-def gatherInfo(tenantName, tenantID, userName, passwd, authUrl, inventory):
-	## Fetch the Nova Object
-	projectName = os.path.basename(sys.argv[0])
-	nc = nvclient.Client(	auth_url=authUrl,
-		username=userName,
-		api_key=passwd,
-		project_id=tenantName,
-		tenant_id=tenantID,
-		version="2"
-		)
-	for server in nc.servers.list():
-		if server.metadata and \
-		'ansible_host_groups' in server.metadata and \
-		'project_name' in  server.metadata:
-			if server.metadata['project_name'].strip() != projectName.strip(): continue
-			unwantedChars = """][")("""
-			rgx = re.compile('[%s]' % unwantedChars)
-			ansible_groups = rgx.sub('', server.metadata['ansible_host_groups']).split(',')
-			hostname = socket.gethostbyaddr(server.networks.values()[0][0])[0]
-			novaVolumes = nc.volumes.get_server_volumes(server.id)
-			# Let's do some port scanning using nmap
-			nmproc = NmapProcess(hostname, "-p 22 -sV -Pn")
-			rc = nmproc.run()
-			if rc != 0: continue
-			parsed = NmapParser.parse(nmproc.stdout)
-			# Set Ansible Host Group
-			for group in ansible_groups:
-				groupName = group.strip()
-				if groupName not in inventory: inventory[groupName] = []
-				inventory[groupName].append(hostname)
-			# Add other metadata
-			for key, value in server.metadata.iteritems():
-				if key not in ('project_name','ansible_host_groups'):
-					inventory['_meta']['hostvars'][hostname] = { key:value }
-			if novaVolumes:
-				inventory['_meta']['hostvars'][hostname]['volumeList'] = [ volume.id for volume in novaVolumes ]
-			inventory['_meta']['hostvars'][hostname]['status']  = parsed.hosts[0].status
-		else:
-			continue
-	#print inventory
-
-if __name__ == "__main__":
-	inventory = {}
-	inventory['_meta'] = { 'hostvars': {} }
-	try:
-		authUrl = os.environ['OS_AUTH_URL']
-		userName = os.environ['OS_USERNAME']
-		passwd = os.environ['OS_PASSWORD']
-	except KeyError:
-		print "Env Variables not set, Please run: source <openstack rc file>"
-		sys.exit()
-	kc = ksclient.Client(auth_url=authUrl, username=userName, password=passwd)
-	tenancies = kc.tenants.list()
-	Parallel(n_jobs=len(tenancies), backend="threading")(delayed(gatherInfo)
-	(tenant.name, tenant.id, userName, passwd, authUrl, inventory)
-	for tenant in tenancies)
-	if not inventory['_meta']['hostvars']:
-		print "I could not find any project called ", os.path.basename(sys.argv[0]), "in any of "
-		for tenancy in tenancies: print tenancy.name
-		print "\n1. You can select a project by symlinking to it, for example if you have a project called myProject do ln -s dynamicInventory-mcc2 myProject\n and then run ./myProject"
-		print "2. It is also possible that none of your VMs are allocated to myProject, please add them to the project: e.g. by running"
-		print 'nova --os-tenant-name TF_NNF --os-tenant-id 033asdda60d7046b6affdf31d14asdasb meta nodex set project_name="myProject"'
-		sys.exit()
-	else:
-		print json.dumps(inventory)
diff --git a/extra_packages/tasks/main.yml b/extra_packages/tasks/main.yml
deleted file mode 100644
index 5a8c87642139c65c2e5f8ae7aad81ec894964306..0000000000000000000000000000000000000000
--- a/extra_packages/tasks/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: "Install extra packages"
-  yum: "name={{ item }} state=present"
-  with_items: "{{ pkgs }}"
-  become: true
-  ignore_errors: true
-  when: ansible_os_family == 'RedHat'
-
-- name: "Check fusermount user access permission"
-  shell: fusermount --version 
-  ignore_errors: true
-  register: fusermount_user_access_error
-  when: ansible_os_family == 'RedHat'
-
-- name: "Fix fusermount user access permission"
-  file: path=/bin/fusermount mode="o=rx"
-  become: true
-  when: ansible_os_family == 'RedHat' and fusermount_user_access_error.failed
-
-
diff --git a/extra_packages/vars/main.yml b/extra_packages/vars/main.yml
deleted file mode 100644
index b70d611e95a35d73562b2d50a066f34d98c685d8..0000000000000000000000000000000000000000
--- a/extra_packages/vars/main.yml
+++ /dev/null
@@ -1,254 +0,0 @@
-pkgs:
- - atlas
- - babel
- - bc
- - bitmap-console-fonts
- - bitmap-fangsongti-fonts
- - bitmap-fixed-fonts
- - bitmap-fonts-compat
- - bitmap-lucida-typewriter-fonts
- - bitmap-miscfixed-fonts
- - blas
- - compiz-gnome
- - db4-cxx
- - db4-devel
- - dejavu-sans-fonts
- - enchant
- - eog
- - evince
- - finger
- - fftw
- - file-roller
- - firefox
- - firstboot
- - fltk
- - fontconfig
- - fontpackages-filesystem
- - freeglut
- - ftgl
- - fuse-sshfs
- - gd
- - gdbm-devel
- - gdm
- - gedit
- - gettext
- - gettext-libs
- - ghostscript-fonts
- - giflib
- - giflib
- - glibc
- - glibc
- - glx-utils
- - gnome-applets
- - gnome-backgrounds
- - gnome-bluetooth-libs
- - gnome-desktop
- - gnome-disk-utility
- - gnome-disk-utility-libs
- - gnome-disk-utility-ui-libs
- - gnome-doc-utils-stylesheets
- - gnome-icon-theme
- - gnome-keyring
- - gnome-keyring-pam
- - gnome-mag
- - gnome-media
- - gnome-media-libs
- - gnome-menus
- - gnome-panel
- - gnome-panel-libs
- - gnome-python2
- - gnome-python2-applet
- - gnome-python2-bonobo
- - gnome-python2-canvas
- - gnome-python2-desktop
- - gnome-python2-extras
- - gnome-python2-gconf
- - gnome-python2-gnome
- - gnome-python2-gnomevfs
- - gnome-python2-libegg
- - gnome-python2-libwnck
- - gnome-screensaver
- - gnome-session
- - gnome-session-xsession
- - gnome-settings-daemon
- - gnome-speech
- - gnome-system-monitor
- - gnome-terminal
- - gnome-themes
- - gnome-user-docs
- - gnome-user-share
- - gnome-utils
- - gnome-utils-libs
- - gnome-vfs2
- - gnome-vfs2-smb
- - graphviz
- - gsl
- - gtkglext-libs
- - gtksourceview2
- - gvfs-fuse
- - hal
- - hdf
- - hdf
- - hdf5
- - ImageMagick
- - ImageMagick-c++
- - inotify-tools
- - java-1.7.0-openjdk
- - jline
- - lapack
- - leafpad
- - libblkid
- - libdrm
- - libfontenc
- - libgail-gnome
- - libgnome
- - libgnomecanvas
- - libgnomekbd
- - libgnomeui
- - libICE
- - libjpeg
- - libopenraw-gnome
- - libSM
- - libuuid
- - libX11
- - libXau
- - libXaw
- - libxcb
- - libXext
- - libXext-devel
- - libXfont
- - libXi
- - libXinerama
- - libxml2
- - libxml2-python
- - libXp
- - libXpm
- - libXt
- - libXtst
- - mailx
- - man
- - mod_ssl
- - mysql-server
- - nagios-plugins
- - nagios-plugins-disk
- - nagios-plugins-load
- - nagios-plugins-nrpe
- - nagios-plugins-perl
- - nagios-plugins-users
- - nautilus
- - nautilus-open-terminal
- - neon
- - nrpe
- - nss-softokn-freebl
- - numactl
- - numpy
- - numpy-f2py
- - openmotif
- - openssh-askpass
- - openssl098e
- - oxygen-icon-theme
- - perl-devel
- - perl-ExtUtils-MakeMaker
- - perl-ExtUtils-ParseXS
- - perl-HTML-Parser
- - perl-HTML-Tagset 
- - perl-Test-Harness
- - perl-Time-HiRes
- - pexpect
- - php
- - php-cli
- - php-common
- - php-ldap
- - php-mysql
- - php-pdo
- - php-pear
- - pinentry-gtk
- - plymouth-system-theme
- - polkit-gnome
- - postgresql
- - postgresql-contrib
- - postgresql-devel
- - postgresql-libs
- - postgresql-server
- - PyGreSQL
- - pygtksourceview
- - python-babel
- - python-dateutil
- - python-devel
- - python-ldap
- - python-matplotlib
- - python-nose
- - python-paramiko
- - python-pmw
- - python-setuptools
- - python-psycopg2
- - pytz
- - qhull
- - qt
- - qt3
- - qt-sqlite
- - qt-x11
- - rhino
- - rsync 
- - samba-client
- - scipy
- - spice-vdagent
- - suitesparse
- - system-gnome-theme
- - tcl
- - tcsh
- - Terminal
- - texlive-texmf-errata-fonts
- - texlive-texmf-fonts
- - tk
- - tkinter
- - tumbler
- - tzdata-java
- - unixODBC
- - unzip
- - util-linux-ng
- - uuid
- - vim-X11
- - vim-common 
- - vim-enhanced
- - vim-minimal
- - wacomexpresskeys
- - wdaemon
- - wxBase
- - wxGTK
- - wxGTK-gl
- - wxGTK-media
- - wxpropgrid
- - wxPython
- - xml-common
- - xml-commons-apis
- - xml-commons-resolver
- - xmlrpc-c
- - xmlrpc-c-client
- - xorg-x11-drivers
- - xorg-x11-fonts-100dpi
- - xorg-x11-fonts-75dpi
- - xorg-x11-fonts-cyrillic
- - xorg-x11-fonts-ethiopic
- - xorg-x11-fonts-ISO8859-1-100dpi
- - xorg-x11-fonts-ISO8859-14-100dpi
- - xorg-x11-fonts-ISO8859-14-75dpi
- - xorg-x11-fonts-ISO8859-15-100dpi
- - xorg-x11-fonts-ISO8859-15-75dpi
- - xorg-x11-fonts-ISO8859-1-75dpi
- - xorg-x11-fonts-ISO8859-2-100dpi
- - xorg-x11-fonts-ISO8859-2-75dpi
- - xorg-x11-fonts-ISO8859-9-100dpi
- - xorg-x11-fonts-ISO8859-9-75dpi
- - xorg-x11-fonts-misc
- - xorg-x11-fonts-Type1
- - xorg-x11-font-utils
- - xorg-x11-server-utils
- - xorg-x11-server-Xorg
- - xorg-x11-util-macros
- - xorg-x11-utils
- - xorg-x11-xauth
- - xorg-x11-xinit
- - xvattr
- - yum-utils
- - zip
diff --git a/headNode.yaml b/headNode.yaml
deleted file mode 100644
index d8fc004d334cfd70bc0ffeb6f8f6468b3b109a1b..0000000000000000000000000000000000000000
--- a/headNode.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
---- 
-description: " A simple template to boot a 3 node cluster"
-heat_template_version: 2013-05-23
-parameters:
- image_id:
-  type: string
-  label: Image ID
-  description: Image to be used for compute instance
-  default: a5e74703-f343-415a-aa23-bd0f0aacfc9e
- key_name:
-  type: string
-  label: Key Name
-  description: Name of key-pair to be used for compute instance
-  default: shahaan
- availability_z:
-  type: string
-  label: Availability Zone
-  description: Availability Zone to be used for launching compute instance
-  default: monash-01
-resources:
-  headNode:
-   type: "OS::Nova::Server"
-   properties:
-    availability_zone: { get_param: availability_z }
-    flavor: m1.small
-    image: { get_param: image_id }
-    key_name: { get_param: key_name }
-    security_groups: [OpenVPN, NSF, default]
-    metadata:
-     ansible_host_group: headNode
-     ansible_ssh_user: ec2-user
-     ansible_ssh_private_key_file: /home/sgeadmin/.ssh/shahaan.pem
-  headVolume:
-   type: OS::Cinder::Volume
-   properties:
-    availability_zone: { get_param: availability_z }
-    description: Volume that will attach the headNode
-    name: headNodeVolume
-    size: 50
-  volumeAttachment:
-   type: OS::Cinder::VolumeAttachment
-   properties:
-    instance_uuid: { get_resource: headNode }
-    volume_id: { get_resource: headVolume }
diff --git a/installNFS.yml b/installNFS.yml
deleted file mode 100644
index 6568c45077cdba9a1f26dae797dc20cb059632eb..0000000000000000000000000000000000000000
--- a/installNFS.yml
+++ /dev/null
@@ -1,24 +0,0 @@
---- 
-- 
-  hosts: openvpn-servers
-  remote_user: ec2-user
-  roles:
-    #- OpenVPN-Server 
-    - nfs-server
-  become: true
-  vars: 
-    x509_ca_server: vm-118-138-240-224.erc.monash.edu.au
-- 
-  hosts: openvpn-clients
-  remote_user: ec2-user
-  roles:
-    #- easy-rsa-common
-    #- easy-rsa-certificate 
-    #- OpenVPN-Client
-    - syncExports
-    - nfs-client
-  become: true
-  vars: 
-    x509_ca_server: vm-118-138-240-224.erc.monash.edu.au
-    openvpn_servers: ['vm-118-138-240-224.erc.monash.edu.au']
-    nfs_server: "vm-118-138-240-224.erc.monash.edu.au"
diff --git a/playbook/cvl2.yml b/playbook/cvl2.yml
deleted file mode 100644
index 908e3af3416db7b0808a0b0b1535ecbc2fdc4d06..0000000000000000000000000000000000000000
--- a/playbook/cvl2.yml
+++ /dev/null
@@ -1,192 +0,0 @@
----
-- hosts: all 
-  vars_files:
-    - massive_var/main.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-    slurmctrl: "{{ groups['ManagementNodes'][0] }}"
-    slurmqueues:
-      - {name: batch, group: ComputeNodes, default: true}
-  roles:
-    - { role: etcHosts,  domain: "{{ ldapDomain }}" }
-
-- hosts: 'ManagementNodes'
-  vars_files:
-    - massive_var/main.yml
-    - massive_var/package.yml
-    - massive_var/passwords.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-    slurmctrl: "{{ groups['ManagementNodes'][0] }}"
-    slurmqueues:
-      - {name: batch, group: ComputeNodes, default: true}
-      - {name: dev, group: ComputeNodesDev, default: false}
-      - {name: multicore, group: ComputeNodesLarge, default: false}
-    mkFileSystems:
-        - {fstype : 'ext4', dev: '/dev/vdc1', opts: ''}
-        - {fstype : 'ext4', dev: '/dev/vdc2', opts: ''}
-        - {fstype : 'ext4', dev: '/dev/vdc3', opts: ''}
-    mountFileSystems:
-        - {fstype : 'ext4', dev: '/dev/vdc1', opts: 'defaults,nofail', name: '/cvl/scratch'}
-        - {fstype : 'ext4', dev: '/dev/vdc2', opts: 'defaults,nofail', name: '/cvl/home'}
-        - {fstype : 'ext4', dev: '/dev/vdc3', opts: 'defaults,nofail', name: '/cvl/local'}
-  roles:
-    - { role: easy-rsa-CA }
-    - { role: OpenVPN-Server }
-    - { role: ntp }
-    - { role: openLdapClient }
-    - { role: slurm-build }
-    - { role: nfs-server, configDiskDevice: true }
-    - { role: slurm, slurm_use_vpn: true}
-    - { role: installPackage, yumGroupPackageList: ['CVL Pre-installation', 'CVL Base Packages'], cliCopy: {'run': 'cp -r /usr/local/Modules/modulefiles/cvl /usr/local/Modules/modulefiles/massive', 'check': '/usr/local/Modules/modulefiles/massive'} }
-
-- hosts: all 
-  vars_files:
-    - massive_var/main.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-  roles:
-    - { role: etcHosts, domain: "{{ ldapDomain }}" }
-
-- hosts: 'ComputeNodes*'
-  vars_files:
-    - massive_var/main.yml
-    - massive_var/passwords.yml
-    - massive_var/package.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-  roles:
-    - { role: OpenVPN-Client }
-
-- hosts: 'LoginNodes'
-  vars_files:
-    - massive_var/main.yml
-    - massive_var/passwords.yml
-    - massive_var/package.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-  roles:
-    - { role: OpenVPN-Client }
-
-- hosts: all 
-  vars_files:
-    - massive_var/main.yml
-    - massive_var/passwords.yml
-    - massive_var/package.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    nfs_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-    groupList:
-      - { name : 'ComputeNodes', interface : 'tun0' }
-      - { name : 'ComputeNodesDev', interface : 'tun0' }
-      - { name : 'ComputeNodesLarge', interface : 'tun0' }
-      - { name : 'LoginNodes', interface : 'tun0' }
-    exportList:
-      - { name: '/usr/local', src: '/cvl/local', fstype: 'nfs4', opts: 'defaults,ro,nofail', interface : 'tun0', srvopts: 'ro,sync' }
-      - { name: '/home', src: '/cvl/home', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' }
-      - { name: '/scratch', src: '/cvl/scratch', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' }
-  roles:
-    - { role: etcHosts, domain: "{{ ldapDomain }}" }
-    - { role: syncExports }
-
-- hosts: 'ComputeNodes'
-  vars_files:
-    - massive_var/main.yml
-    - massive_var/passwords.yml
-    - massive_var/package.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-    slurmctrl: "{{ groups['ManagementNodes'][0] }}"
-    slurmqueues:
-      - {name: batch, group: ComputeNodes, default: true}
-    nfs_server: "{{ groups['ManagementNodes'][0] }}"
-    groupList:
-      - { name : 'ComputeNodes', interface : 'tun0' }
-    exportList: 
-      - { name: '/usr/local', src: '/cvl/local', fstype: 'nfs4', opts: 'defaults,ro,nofail', interface : 'tun0', srvopts: 'ro,sync' } 
-      - { name: '/home', src: '/cvl/home', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' } 
-      - { name: '/scratch', src: '/cvl/scratch', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' }
-  roles:
-    - { role: ntp }
-    - { role: openLdapClient }
-    - { role: nfs-client }
-    - { role: slurm, slurm_use_vpn: true}
-    - { role: installPackage, preInstallation: "umount /usr/local", postInstallation: "mount /usr/local", yumGroupPackageList: ["CVL Pre-installation", "CVL Base Packages"], cliFileCopy: {'src': '/tmp/gconf_path', 'dest': '/etc/gconf/2/path'} }
-
-- hosts: 'ComputeNodesDev'
-  vars_files:
-    - massive_var/main.yml
-    - massive_var/passwords.yml
-    - massive_var/package.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-    slurmctrl: "{{ groups['ManagementNodes'][0] }}"
-    slurmqueues:
-      - {name: dev, group: ComputeNodesDev, default: false}
-    nfs_server: "{{ groups['ManagementNodes'][0] }}"
-    groupList:
-      - { name : 'ComputeNodes', interface : 'tun0' }
-    exportList: 
-      - { name: '/home', src: '/cvl/home', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' } 
-      - { name: '/scratch', src: '/cvl/scratch', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' }
-  roles:
-    - { role: ntp }
-    - { role: openLdapClient }
-    - { role: nfs-client }
-    - { role: slurm, slurm_use_vpn: true}
-    - { role: installPackage, preInstallation: "umount /usr/local", postInstallation: "mount /usr/local", yumGroupPackageList: ["CVL Pre-installation", "CVL Base Packages"], cliFileCopy: {'src': '/tmp/gconf_path', 'dest': '/etc/gconf/2/path'} }
-
-- hosts: 'ComputeNodesLarge'
-  vars_files:
-    - massive_var/main.yml
-    - massive_var/passwords.yml
-    - massive_var/package.yml
-  vars:
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-    slurmctrl: "{{ groups['ManagementNodes'][0] }}"
-    slurmqueues:
-      - {name: multicore, group: ComputeNodesLarge, default: false}
-    nfs_server: "{{ groups['ManagementNodes'][0] }}"
-    groupList:
-      - { name : 'ComputeNodes', interface : 'tun0' }
-    exportList: 
-      - { name: '/usr/local', src: '/cvl/local', fstype: 'nfs4', opts: 'defaults,ro,nofail', interface : 'tun0', srvopts: 'ro,sync' }
-      - { name: '/home', src: '/cvl/home', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' } 
-      - { name: '/scratch', src: '/cvl/scratch', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' }
-  roles:
-    - { role: ntp }
-    - { role: openLdapClient }
-    - { role: nfs-client }
-    - { role: slurm, slurm_use_vpn: true}
-    - { role: installPackage, preInstallation: "umount /usr/local", postInstallation: "mount /usr/local", yumGroupPackageList: ["CVL Pre-installation", "CVL Base Packages"], cliFileCopy: {'src': '/tmp/gconf_path', 'dest': '/etc/gconf/2/path'} }
-
-- hosts: 'LoginNodes'
-  vars_files:
-    - massive_var/main.yml
-    - massive_var/passwords.yml
-  vars:
-    groupList:
-      - { name : 'ComputeNodes', interface : 'tun0' }
-    x509_ca_server: "{{ groups['ManagementNodes'][0] }}"
-    openvpn_servers: "{{ groups['ManagementNodes'] }}"
-    slurmctrl: "{{ groups['ManagementNodes'][0] }}"
-    slurmqueues:
-      - {name: batch, group: ComputeNodes, default: true}
-    exportList: 
-      - { name: '/home', src: '/cvl/home', fstype: 'nfs4', opts: 'defaults,nofail', interface : 'tun0', srvopts: 'rw,root_squash,sync' } 
-  roles:
-    - { role: ntp }
-    - { role: openLdapClient }
-    - { role: nfs-client }
-    - { role: slurm, slurm_use_vpn: true}
-    - { role: installPackage, importRepo: { command: "wget http://cvlrepo.massive.org.au/repo/cvl.repo -O", destination: "/etc/yum.repos.d/cvl.repo" }, yumGroupPackageList: ['CVL Pre-installation', 'CVL Base Packages'], cliCopy: {'run': 'cp -r /usr/local/Modules/modulefiles/cvl /usr/local/Modules/modulefiles/massive', 'check': '/usr/local/Modules/modulefiles/massive'} }
-
diff --git a/playbook/massive_var/main.yml b/playbook/massive_var/main.yml
deleted file mode 100644
index 22b784529a43f4a50abc243f365b9d0328b4f288..0000000000000000000000000000000000000000
--- a/playbook/massive_var/main.yml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-ldapServerHostIpLine: "130.220.209.234 m2-w.massive.org.au"
-ldapCaCertSrc: "/tmp/m1-w-ca.pem" 
-countryName: "AU"
-reginalName: "Victoria"
-cityName: "Melbourne"
-organizationName: "Monash University"
-emailAddress: "help@massive.org.au"
-organizationUnit: "MASSIVE"
-nfsServerIpAddress: m2-login3.massive.org.au
-
-x509_cert_file: "/etc/openvpn/certs/{{ x509_ca_server }}.crt"
-x509_key_file: "/etc/openvpn/private/{{ x509_ca_server }}.key"
-x509_cacert_file: "/etc/ssl/certs/ca_{{ x509_ca_server }}.crt"
-###x509_common_name: "{{ x509_ca_server }}CommonName"
-x509_common_name: "{{ inventory_hostname }}"
-x509_csr_args: "--server"
-x509_sign_args: "{{ x509_csr_args }}"
-dhparms_file: "/etc/openvpn/private/dh.pem"
-server_network: "10.8.0.0"
-server_netmask: "255.255.255.0"
-
-slurm_version: 14.11.2
-munge_version: 0.5.11
-userRelocationName: "ec2-user"
-userNewHome: "/local_home"
-#nfs_type: "nfs4"
-#nfs_options: "defaults"
-#nfs_server: "m2-login3.massive.org.au"
-ldapServerHost: "130.220.209.234 m2-w.massive.org.au"
-ldapDomain: "massive.org.au"
-ldapURI: "ldaps://m2-w.massive.org.au:1637/"
-ldapBindDN: "cn=ldapbind,cn=users,dc=massive,dc=org,dc=au" 
-ldapBase: "cn=users,dc=massive,dc=org,dc=au"
-ldapUserClass: "user"
-ldapUserHomeDirectory: "unixHomeDirectory"
-ldapUserPricipal: "userPrincipalName"
-ldapGroupBase: "ou=groups,dc=massive,dc=org,dc=au"
-tlsCaCertDirectory: "/etc/openldap/certs"
-ldapCaCertFile: "/etc/openldap/certs/m1-w-ca.pem"
-ldapCaCertFileSource: "/tmp/cvl2server/m1-w-ca.pem"
-cacertFile: "cacert.pem"
-#domain: "cvl.massive.org.au"
-domain: "massive.org.au"
-ldapRfc2307: |
-  ldap_schema = rfc2307
-  ldap_search_base = cn=users,dc=massive,dc=org,dc=au
-  ldap_user_search_base = cn=users,dc=massive,dc=org,dc=au
-  ldap_user_object_class = user
-  ldap_user_home_directory = unixHomeDirectory
-  ldap_user_principal = userPrincipalName
-  ldap_user_name = uid
-  ldap_group_search_base = ou=groups,dc=massive,dc=org,dc=au
-  ldap_group_object_class = group
-  ldap_access_order = expire
-  ldap_account_expire_policy = ad
-
-ldapRfc2307Pam: |
-  scope sub
-  nss_base_passwd cn=users,dc=massive,dc=org,dc=au?sub
-  nss_base_shadow cn=users,dc=massive,dc=org,dc=au?sub
-  nss_base_group cn=users,dc=massive,dc=org,dc=au?sub
-  nss_map_objectclass posixAccount user
-  nss_map_objectclass shadowAccount user
-  nss_map_objectclass posixGroup group
-  nss_map_attribute homeDirectory unixHomeDirectory
-  nss_map_attribute uniqueMember member
-  nss_map_attribute shadowLastChange pwdLastSet
-  pam_login_attribute sAMAccountName
-  pam_filter objectClass=User
-  pam_password ad
diff --git a/playbook/massive_var/package.yml b/playbook/massive_var/package.yml
deleted file mode 100644
index 26d13db3708730b0ab3500e97c4b3346b3bd6641..0000000000000000000000000000000000000000
--- a/playbook/massive_var/package.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-importRepo: { command: "wget http://cvlrepo.massive.org.au/repo/cvl.repo -O", destination: "/etc/yum.repos.d/cvl.repo" }
-#yumGroupPackageList:
-#  - CVL Pre-installation
-#  - CVL Base Packages
-#  - CVL System
-#  - CVL System Extension
-#  - CVL General Imaging Tools
diff --git a/playbook/readme.txt b/playbook/readme.txt
deleted file mode 100644
index 59ab5815af15b6ffa3932a2ed065761fc1fb52e2..0000000000000000000000000000000000000000
--- a/playbook/readme.txt
+++ /dev/null
@@ -1 +0,0 @@
-Files in the playbook directory should be used as examples for the reference only.
diff --git a/roles/calculateEtcHosts/files/makehosts.py b/roles/calculateEtcHosts/files/makehosts.py
index 5f9cd42fe29b90a9b151db3ee4562f21439b3163..a96f049a9cf460aaf456497855709471dbff4cf1 100755
--- a/roles/calculateEtcHosts/files/makehosts.py
+++ b/roles/calculateEtcHosts/files/makehosts.py
@@ -23,7 +23,7 @@ for group in d['groups'].keys():
       else:
         hosts[h] = ['%s.%s %s'%(name,domain,name)]
 
-for h in hosts.keys():
+for h in sorted(hosts.keys()):
     if d['hostvars'].has_key(h):
 	for addr in d['hostvars'][h]['ansible_all_ipv4_addresses']:
 	    if "172.16.200" in addr:
@@ -32,14 +32,14 @@ for h in hosts.keys():
 		    string=string+" %s"%(name)
 		print string
 
-for h in hosts.keys():
+for h in sorted(hosts.keys()):
     if d['hostvars'].has_key(h):
         string="%s"%(d['hostvars'][h]['ansible_default_ipv4']['address'])
         for name in hosts[h]:
             string=string+" %s"%(name)
         print string
 
-for h in hosts.keys():
+for h in sorted(hosts.keys()):
     if d['hostvars'].has_key(h):
         if d['hostvars'][h].has_key('ansible_tun0'):
             string="%s"%(d['hostvars'][h]['ansible_tun0']['ipv4']['address'])
diff --git a/roles/calculateSlurmConf/templates/slurm.conf.j2 b/roles/calculateSlurmConf/templates/slurm.conf.j2
index dc833e3e78c8fa191c434c0795c0f936cfbb1e7c..d460ef811ddb8f3474b26a5e3ff72bf7434a718b 100644
--- a/roles/calculateSlurmConf/templates/slurm.conf.j2
+++ b/roles/calculateSlurmConf/templates/slurm.conf.j2
@@ -119,8 +119,8 @@ JobCompType=jobcomp/none
 Prolog={{ slurmjob.prolog }}
 Epilog={{ slurmjob.epilog }}
 {% else %}
-Prolog={{ slurm_dir }}/bin/slurm.prolog
-Epilog={{ slurm_dir }}/bin/slurm.epilog
+Prolog=/opt/slurm/etc/slurm.prolog
+Epilog=/opt/slurm/etc/slurm.epilog
 {% endif %}
 #
 # ACCOUNTING
diff --git a/roles/commonVars/vars/readme.txt b/roles/commonVars/vars/readme.txt
deleted file mode 100644
index 8faa3c3c117df7693ebf8d4aff2cd6283e5766d1..0000000000000000000000000000000000000000
--- a/roles/commonVars/vars/readme.txt
+++ /dev/null
@@ -1,2 +0,0 @@
----
-domain: testdomain.massive.org.au
diff --git a/roles/config_repos/tasks/main.yml b/roles/config_repos/tasks/main.yml
index 684c327aa9be6fb9c90f588bf44857f94794bfe7..8bd44d7e636c997a15d78632a9b5504a10eecc70 100644
--- a/roles/config_repos/tasks/main.yml
+++ b/roles/config_repos/tasks/main.yml
@@ -1,13 +1,12 @@
 ---
 
-- name: make sure out repo server is resolvable
+- name: make sure our repo server is resolvable
   lineinfile:
-    dest: /etc/hosts
-    line: "{{ reposerverip }} {{ reposervername }}"  #this is duplicated in the role calculateEtcHosts
+    path: /etc/hosts
+    line: "{{ reposerverip }} {{ reposervername }}"
     owner: root
     group: root
-  become: true
-
+  become: True 
 
 #- name: remove default repos
 #  file:
@@ -44,6 +43,7 @@
 - name: get enabled repos
 #shell: yum repolist | grep -v "repo id" | grep -v "Loaded plugins" | head -n -1 | cut -f 1 -d '/' | sed -s 's/\!//'
   shell: yum repolist all | grep enabled | cut -f 1 -d '/' | sed -s 's/\!//'
+  when: ansible_os_family == 'RedHat'
   register: repolist
   check_mode: no
   changed_when: False
@@ -55,7 +55,8 @@
   with_items: "{{ repolist.stdout_lines|difference(yumenablerepo) }}"
   become: true
   become_user: root
-  ignore_errors: false
+  ignore_errors: true
+  when: ansible_os_family == 'RedHat'
 
 
 #- name: Enable epel
@@ -75,11 +76,6 @@
   become: true
   when: ansible_distribution_release == 'trusty'
 
-- name: add repos apt
-  shell: "add-apt-repository -y ppa:gluster/glusterfs-3.7"
-  become: true
-  when: ansible_distribution == 'Ubuntu'
-
 - name: apt-get update
   apt: update_cache=True
   become: true
diff --git a/roles/deploy-xorg/files/scripts/nvidia-xconf-gen.py b/roles/deploy-xorg/files/scripts/nvidia-xconf-gen.py
index f26446971f04c022e19897edf5d9d05cefe61da7..337414bd31c6c1e745b007b483565e127a3afb67 100755
--- a/roles/deploy-xorg/files/scripts/nvidia-xconf-gen.py
+++ b/roles/deploy-xorg/files/scripts/nvidia-xconf-gen.py
@@ -11,14 +11,18 @@ from subprocess import call
 import re
 import json
 
-def grab_card_ids():
-	# This method runs nvidia-smi to grab the card ids, then returns a list
-    
-    if not os.path.isfile("/bin/nvidia-smi"):
+def getNvidia_smi_path():
+    if os.path.isfile("/bin/nvidia-smi"):
+    	return "/bin/nvidia-smi"
+    elif os.path.isfile("/usr/bin/nvidia-smi"):
+    	return "/usr/bin/nvidia-smi"
+    else:
     	print("nvidia-smi binary not found!")
-    	exit(1)
+    	exit(1)	
 
-    cmd = ["/bin/nvidia-smi", "--query-gpu=pci.bus_id","--format=csv,noheader"]
+def grab_card_ids():
+	# This method runs nvidia-smi to grab the card ids, then returns a list
+    cmd = [getNvidia_smi_path(), "--query-gpu=pci.bus_id","--format=csv,noheader"]
     p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
     
     cards = []
@@ -27,15 +31,11 @@ def grab_card_ids():
         line = line.rstrip().split(":")[2]
         pcibus_num = int(re.sub('[.:]', '', line).rstrip("0"),16)
         card = "PCI:0:{}:0".format(str(pcibus_num))
-    	cards.append(card)
+        cards.append(card)
     return cards
 
 def grab_card_boardname():
-	if not os.path.isfile("/bin/nvidia-smi"):
-	 	print("nvidia-smi binary not found!")
-	 	exit(1)
-
-	cmd = ["/bin/nvidia-smi", "--query-gpu=name","--format=csv,noheader"]
+	cmd = [getNvidia_smi_path(), "--query-gpu=name","--format=csv,noheader"]
 	cards = []
 	p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
 	for line in p.stdout.readlines():
diff --git a/roles/enable_modules/tasks/main.yml b/roles/enable_modules/tasks/main.yml
index f9c99893d772987a4c24b584ddc593eb0e183cbf..fb585996a0a0da83b5b05550c76330d2d503b6ae 100644
--- a/roles/enable_modules/tasks/main.yml
+++ b/roles/enable_modules/tasks/main.yml
@@ -3,8 +3,9 @@
 - name: make sure environment modules are installed
   package:
     name: environment-modules
-    state: installed
+    state: present
   become: true
+  when: default_modules == "modulecmd"
 
 - name: template lmod bash
   template: src=lmod.sh.j2 dest=/etc/profile.d/lmod.sh
@@ -30,6 +31,9 @@
   become_user: root
   when: default_modules == "lmod"
 
+#  vars: 
+#    MODULESHOMEvar: '/usr/share/modules'
+
 - name: template modulecmd bash
   template: src=modulecmd.sh.j2 dest=/etc/profile.d/modulecmd.sh
   become: true
@@ -59,3 +63,14 @@
   become: true
   become_user: root
   when: default_modules == "modulecmd"
+  
+- name: Create a symbolic link
+  file:
+    src: /usr/share/modules
+    dest: /usr/share/Modules
+    owner: root
+    group: root
+    state: link
+    mode: u=rwx,g=rx,o=rx
+  become: true
+  when: ansible_os_family == 'Debian' and default_modules == 'modulecmd'
\ No newline at end of file
diff --git a/roles/enable_root/tasks/main.yml b/roles/enable_root/tasks/main.yml
deleted file mode 100644
index 660c74f29556f6253a425d6fcb2822ddf9ae520b..0000000000000000000000000000000000000000
--- a/roles/enable_root/tasks/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-- name: add key to root ssh
-  template: dest=/root/.ssh/authorized_keys mode=600 owner=root group=root src=authorized_keys.j2
-  become: true
diff --git a/roles/enable_root/templates/authorized_keys.j2 b/roles/enable_root/templates/authorized_keys.j2
deleted file mode 100644
index f7eff2cc56bea11fdd047d2e1741798a1da2c71b..0000000000000000000000000000000000000000
--- a/roles/enable_root/templates/authorized_keys.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvjn5cQuMkqTo04ZnkuDXfUBeAt7oZ6xrT4phfMemqx12dDqLyFrMgUWOoVMFj+TNyR5M8WOCI6CRT6EXOMtqaxhPtWB1QlDNo0Ml8xTzSKckUO0EhdqNKh+nlQfVeaVIx0DZZeWWNpPCrKPCM4TSAXXiwtZuImd6/Zo4RI1x+oTcFR9zQulUGUuX8rf7+4c/oKr58B+La8bXP8QujtfLm29pl1kawSouCfdxt93wRfbISM7mGs/WqzttRXL9m5AeOMuo5S4Ia0GPMcIEUfsQhEyEU7tiTpEq5lDdf6H7a9SlHXzhd9f2Dn3mlv3mmQHaGBJvUuWmVwydxkdtCRQhOQ== root@m2-m
-ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2xrAkFRdYBpYs14AYSzdPFcIOt2zKXIgjPpyj/6eg/yl3y8N84T9VNw9ATRzb3+PJEw1lOfah6xLkFl7FueT6359y14c7wkNByGHgcL022SludkhM2zBe/3ebhcBs11L4Z725rqVnGDSKdKuwZjbCmUtu/nHwGYU/BnLKbQXMVyq53L5cbIyWGfvItPnwCF2ZMy1v0lmnFs1O3qDK9U/qcwc/77MTB0Z/ey0zsoXvmxjkdYr+zgQLRNm2+fkCXn+ZorbeDwWjhHE21arhMym5x3VG0XU2Ob9nL1Z2xEGQVSnBVWeadTMNzkfM8U07Md2tSOIC5B3ePETxk97puxbEQ== root@m2-m
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPijQ597uLqEPAvVZXQlSjrUfFl2h7SRBTCRhH4hQJMVu55dhFYiojJZ0tjjV3jTcgWs1AsyRp3wDtNp8iQxbwEY2JPxCOjNuH0et4I/y3y6VUjcVWanSaIkdPf5AFNb9KIXo3Hvdyvav8SfFpioRQ0FKp8SZs1JYXpuQ0mZY26oKCKcNsWXv9ZN7knUN0xvYNMycpCnI2Nl666Zrs0gGyJ6e+Xq5bpk1lm8nuK9q52bTRjxqtdEBuSGwkZea+NBJzpYw5rEucteQI66y6tzFuYJk2WC4bUifffIxnkQXKYVynJg1MJ2CGI69r9hXt9eUtH3WrDxrJGmCau8jD3lib hines@sparge
-ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAnakq6Lgq2n6yjcMaC7xQXMDMRdN33T6mPCqRy+TPdu0aPvVty0UFeAWsCyTxHeVfst9Vr0HwRRBvNihp1CJuOWGbk0H5a8yALDhLqoHazv2jlMQcLDgTktw0Jgo38+tcBShJyey1iHh8X5WgsS5/hgxR3OzoNBEzqzHUidMO/EI0ahNlM60l8EYL8Ww799NmPgqdPbwxK9nHsoFmx/NKhnUdronSg33L0CJZT3t2fccXAq+4Pbm7uYEkL3T/NgMdgpG5mKS3mKDtKyyKm2gOf3fVzExFew2etBxB3ANPEWvSuJ2XwXQv8sFE1722XQVR4RFgilCWUqXSN7EmqoHkNQ== jupiter@cvlproject
-ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAsBtPRJtDJzyW+Utu0v03wklUpvzS5c1E34ysGDMepGU8VT1phJQ2EwRPWVLdRjVHnuhrEeeUHMyQwOtLEdvTPFnw5u/4bHQ+37iwtAeTV6oyPARJVzJLRGuDUuFdkQbXN7xxi/0KUljWgswLN34UV+p5PL79kQlErh1QCN06z5k=
-
-ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2xrAkFRdYBpYs14AYSzdPFcIOt2zKXIgjPpyj/6eg/yl3y8N84T9VNw9ATRzb3+PJEw1lOfah6xLkFl7FueT6359y14c7wkNByGHgcL022SludkhM2zBe/3ebhcBs11L4Z725rqVnGDSKdKuwZjbCmUtu/nHwGYU/BnLKbQXMVyq53L5cbIyWGfvItPnwCF2ZMy1v0lmnFs1O3qDK9U/qcwc/77MTB0Z/ey0zsoXvmxjkdYr+zgQLRNm2+fkCXn+ZorbeDwWjhHE21arhMym5x3VG0XU2Ob9nL1Z2xEGQVSnBVWeadTMNzkfM8U07Md2tSOIC5B3ePETxk97puxbEQ== root@m2-m
-
-ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEApJTDPfappcYbNE3Z0+5Vsm4Sw2xD3PdcW+V1w6X6tpebG/bpUhnn9XsALkZYyKttql2vV3bqL6Fx5ZAFhHRhH0exdQEgc8hSvpX5gCCCUNqrL+mP8f4S59E0ha5+nBmMaf4WABHiZYfeoGhn7HHNQY0Up/qfzDPSvWo+ZaVQAqXcYLGTxaP70yywHOYABakJtBVKKkI1YPu83HFDVfw1PoYVaS5GAmEscq6nwoyC0Jm/pDirUtMoRibG2iiV6uYKQDvWrO9fBrGmavpmUT/ECtmcnrWj7V9zXzSi17HJhkq6gYc68iu6h8TBNJrIUE9Kgi07aWFRM9fbIM1ZVD/aEQ== ec2-user@cvl23server
-
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpuXUhr1Vzl1WljuFYSFIArU8wtlKWpkVLF4hCUb4dVWNEPU/FM1gkg4hPH+rTNPManRAJ8vxiCtMgLtvae2j1elO0edkM6BZReVDFMYo0fZVBbVR8fzvXRWD5ArXJeNI2uZ4fYTil3SoC3N0n+ySjqFriIkcBpxthKVcoOlK+yccOvCPGNWgqcSGFfMEKTR8P18ED83i7sOF2nzpH0RBo2/N7ry5Gzvfw859W7KScw/3uI7fzog6hW/P4niOQIZfG56enHHos0l7oymxeQRiYITqvf9Es1VatEfybk+tJhTVf1LcIqoM9v9bc0yd6QqST0+6ZiTJXCQCthmS0JVX1 hines@tun
diff --git a/roles/etcHosts/tasks/main.yml b/roles/etcHosts/tasks/main.yml
index 677f6e571e728ac91fe951158295d2d71cfcf2a7..180a871738df379816c388a72ca8ef7968084ed3 100644
--- a/roles/etcHosts/tasks/main.yml
+++ b/roles/etcHosts/tasks/main.yml
@@ -7,8 +7,6 @@
   register: sysctl_hostname
   check_mode: no
   changed_when: False
-  become: true
-  become_user: root
 
 - name: set hostname by sysctl
   shell: sysctl kernel.hostname="{{ inventory_hostname }}"
diff --git a/roles/extra_packages/tasks/main.yml b/roles/extra_packages/tasks/main.yml
index 309dd0b884aa47ee3975e897a3fe2b423f9bb703..39d8e165d58fc2c119e32cdb6423296b737f0d2f 100644
--- a/roles/extra_packages/tasks/main.yml
+++ b/roles/extra_packages/tasks/main.yml
@@ -8,12 +8,16 @@
   become: true
   become_user: root
   when: ansible_os_family == 'RedHat'
+  changed_when: false
+  
 
 - name: "Clear yum pending transactions"
   command: yum-complete-transaction --cleanup-only
   become: true
   become_user: root 
+  register: yumCompleteTransactioncall
   when: ansible_os_family == 'RedHat'
+  changed_when: '"No unfinished transactions left." not in yumCompleteTransactioncall.stdout'
 
 - name: "Install extra packages"
   yum: "name={{ item }} exclude={{ excludes|join(',') }} update_cache=yes state=present"
diff --git a/roles/gpu/tasks/main.yml b/roles/gpu/tasks/main.yml
index f76796f1881d3a0efb9b3eb3974e261e2b9dab58..24d20be7d7998faad4cec55d4b9915aa688b9e80 100644
--- a/roles/gpu/tasks/main.yml
+++ b/roles/gpu/tasks/main.yml
@@ -1,36 +1,62 @@
 ---
-- name: install deps 
-  yum: name={{ item }} state=installed
-  become: true
-  with_items:
-    - gcc
-    - perl
-    - wget
-    - pciutils
-    - kernel-headers
-    - kernel-devel 
-    - xterm
-    - libX11-common
-    - libX11-devel
-    - libX11
-    - libglvnd-devel
-    - xorg-x11-server-common
-    - xorg-x11-util-macros
-    - xorg-x11-server-utils
-    - xorg-x11-font-utils
-    - xorg-x11-server-Xorg
-    - xorg-x11-glamor
-    - xorg-x11-xinit
-    - xorg-x11-utils
-    - xorg-x11-xauth
-    - xorg-x11-proto-devel
-    - xorg-x11-xkb-utils
+- name: install deps
+  package:
+    state: present
+    name:
+      - gcc
+      - perl
+      - wget
+      - pciutils
+      - kernel-headers
+      - kernel-devel
+      - xterm
+      - libX11-common
+      - libX11-devel
+      - libX11
+      - libglvnd-devel
+      - xorg-x11-server-common
+      - xorg-x11-util-macros
+      - xorg-x11-server-utils
+      - xorg-x11-font-utils
+      - xorg-x11-server-Xorg
+      - xorg-x11-glamor
+      - xorg-x11-xinit
+      - xorg-x11-utils
+      - xorg-x11-xauth
+      - xorg-x11-proto-devel
+      - xorg-x11-xkb-utils
+      - python-jinja2
+  become: true
+  when: ansible_os_family == 'RedHat'
+
+- name: install deps
+  apt:
+    name:
+     - 'gcc'
+     - 'perl'
+     - 'wget'
+     - 'pciutils'
+     - 'linux-headers-generic'
+     - 'xterm'
+     - 'libx11-dev'
+     - 'libx11-6'
+     - 'libglvnd-dev'
+     - 'xserver-xorg'
+     - 'vim'
+     - 'python-jinja2'
+     - 'python3-jinja2'
+    state: present
+    update_cache: yes
+  become: true
+  become_user: root
+  when: ansible_distribution == 'Ubuntu'
 
 - name: install development tools
   yum: name="@Development Tools" state=installed
   become: true
   become_user: root
   ignore_errors: yes
+  when: ansible_os_family == 'RedHat'
 
 - name: disable nouveau
   template: src=blacklist-nouveau.conf.j2 dest=/etc/modprobe.d/blacklist-nouveau.conf
@@ -50,7 +76,7 @@
 
 - name: remove nouveau
   modprobe: name=nouveau state=absent
-  become: true 
+  become: true
   become_user: root
 
 - name: get kernel version
@@ -66,7 +92,7 @@
   ignore_errors: true
 
 - name: set default driver version
-  set_fact: 
+  set_fact:
     installed_driver_version: '0.0'
 
 - name: check nvidia driver version
@@ -77,20 +103,20 @@
   changed_when: False
 
 - name: set install default
-  set_fact: 
+  set_fact:
     install_driver: false
 
 - name: set uninstall default
-  set_fact: 
+  set_fact:
     uninstall_driver: false
 
 - name: set install
-  set_fact: 
+  set_fact:
     install_driver: true
   when: not nvidia_driver.stat.exists or not installed_driver_version.stdout == nvidia_version
 
 - name: set uninstall
-  set_fact: 
+  set_fact:
     uninstall_driver: true
   when: nvidia_driver.stat.exists and not installed_driver_version.stdout == nvidia_version
 
@@ -99,7 +125,6 @@
   become: true
   when: install_driver
 
-
 - name: stop the persistence daemon
   service: name=nvidia-persistenced state=stopped
   become: true
@@ -112,18 +137,18 @@
   become_user: root
   when: uninstall_driver
 
-- name: get nvidia driver 
+- name: get nvidia driver
   get_url: url=http://consistency0/src/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run dest=/tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run
   become: true
   become_user: root
   when: install_driver
 
 #- name: Copy boot file
-#  template: src=grub.conf.j2 dest=/boot/grub/grub.conf 
+#  template: src=grub.conf.j2 dest=/boot/grub/grub.conf
 #  become: true
 #
 #- name: Copy X config file
-#  template: src=xorg.conf.j2 dest=/etc/X11/xorg.conf 
+#  template: src=xorg.conf.j2 dest=/etc/X11/xorg.conf
 #  become: true
 
 - name: Copy xserver file
@@ -138,7 +163,7 @@
   when: install_driver
 
 - name: build nvidia driver
-  shell: /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run
+  shell: /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run -q -a -n -X -s
   become: true
   when: install_driver
 
@@ -146,6 +171,8 @@
   shell: nvidia-smi --gom=0
   become: true
   become_user: root
+  register: nvidiagomcall
+  changed_when: '"cannot be changed" not in nvidiagomcall.stdout' # only tested on a k80
 
 - name: enable persistenced on boot
   service: name=nvidia-persistenced state=started enabled=yes
@@ -156,7 +183,7 @@
   shell: /usr/bin/nvidia-xconfig -a --use-display-device=none --preserve-busid
   become: true
   become_user: root
-  args: 
+  args:
     creates: /etc/X11/xorg.conf
 
 #- name: Template xorg.conf for nodes with one GPU
@@ -165,6 +192,7 @@
 #  become_user: root
 #  when: template_xorgconf is defined and template_xorgcon
 
+
 - name: run nvidia-xconf-gen
   script: scripts/nvidia-xconf-gen.py
   register: nvidiacards
@@ -172,7 +200,7 @@
   changed_when: False
 
 - name: set env for nvidia_card_lists
-  set_fact: 
+  set_fact:
     nvidiacardslist: "{{ nvidiacards.stdout | from_json }}"
 
 - name: generate nvidia-xorg-conf
diff --git a/roles/gpu_update/files/NVIDIA-Linux-x86_64-352.93.run.REMOVED.git-id b/roles/gpu_update/files/NVIDIA-Linux-x86_64-352.93.run.REMOVED.git-id
deleted file mode 100644
index 38a7bca176fcdd29040fe72acc76d2c44c093cab..0000000000000000000000000000000000000000
--- a/roles/gpu_update/files/NVIDIA-Linux-x86_64-352.93.run.REMOVED.git-id
+++ /dev/null
@@ -1 +0,0 @@
-48758c1a73f2a27c14f351a99923c3aa6e4c0cdf
\ No newline at end of file
diff --git a/roles/lmod/tasks/main.yml b/roles/lmod/tasks/main.yml
index 9e2ac4af909db08388e570ca586bec83e0889118..1b348eda5459d56ba3318eaba8a2a5a0d9016317 100644
--- a/roles/lmod/tasks/main.yml
+++ b/roles/lmod/tasks/main.yml
@@ -2,21 +2,21 @@
 - include_vars: "{{ ansible_os_family }}.yml"
 
 - name: install lua centos
-  yum: name={{ item }} state=installed update_cache=yes
-  with_items:
-    - lua
-    - lua-filesystem
-    - lua-posix
-    - tcl
-    - rsync
-    - gcc
-    - lua-devel
+  package:
+    state: present
+    name:
+      - lua
+      - lua-filesystem
+      - lua-posix
+      - tcl
+      - rsync
+      - gcc
+      - lua-devel
   become: true
-  when:
-   - '"CentOS" in ansible_distribution'
+  when: ansible_os_family == 'RedHat'
 
 - name: install lua RHEL7
-  yum: name={{ item }} state=installed update_cache=yes enablerepo="Monash_University_EPEL7_EPEL_7_-_x86_64"
+  yum: name={{ item }} state=present update_cache=yes enablerepo="Monash_University_EPEL7_EPEL_7_-_x86_64"
   with_items:
     - lua
     - lua-filesystem
@@ -30,18 +30,10 @@
    - '"RedHat" in ansible_distribution'
   become: true
 
-
 - name: install lua debian
-  apt: name={{ item }} state=installed
-  with_items:
-    - lua5.2
-    - lua5.2
-    - lua-filesystem
-    - lua-bitop
-    - lua-posix
-    - liblua5.2-0
-    - liblua5.2-dev
-    - tcl
+  package: 
+    name: lmod 
+    state: present
   become: true
   when: ansible_os_family == 'Debian'
 
@@ -49,13 +41,12 @@
   stat: path="{{ soft_dir }}/lmod/{{ lmod_version }}"
   register: lmodstat
 
-
 - name: Download LMOD
   get_url:
     url=http://consistency0/src/Lmod-{{ lmod_version }}.tar.bz2
     dest={{ source_dir }}/Lmod-{{ lmod_version }}.tar.bz2
     mode=0444
-  when: not lmodstat.stat.exists
+  when: ansible_os_family == 'RedHat' and not lmodstat.stat.exists
 
 - name: Uncompress LMOD
   unarchive:
@@ -63,10 +54,11 @@
     dest={{ source_dir }}
     copy=no
     creates={{ source_dir }}/Lmod-{{ lmod_version }}/README
-  when: not lmodstat.stat.exists
+  when: ansible_os_family == 'RedHat' and not lmodstat.stat.exists
 
 - name: Compile and install Lmod
   shell: cd {{ source_dir }}/Lmod-{{ lmod_version }}; ./configure --prefix={{ soft_dir }} --with-mpathSearch=YES --with-caseIndependentSorting=YES && make install LUA_INCLUDE={{ lua_include }}
   args:
     creates: "{{ soft_dir }}/lmod/{{ lmod_version }}"
   become: true
+  when: ansible_os_family == 'RedHat'
\ No newline at end of file
diff --git a/roles/mailchimpLastlogin/templates/ldapconfig.yml.j2 b/roles/mailchimpLastlogin/templates/ldapconfig.yml.j2
index 99d5f3e96f552fbb48cbf64d77f97fa0e8708545..90419b2c003ab9afbe5ee608f32c6ad15c1633e6 100644
--- a/roles/mailchimpLastlogin/templates/ldapconfig.yml.j2
+++ b/roles/mailchimpLastlogin/templates/ldapconfig.yml.j2
@@ -2,5 +2,5 @@
 ldapDomain: {{ ldapBase }}
 user: {{ ldapBindDN }} 
 passwd: {{ ldapBindDNPassword }}
-ldapURI: {{ ldap URI }}
+ldapURI: {{ ldapURI }}
 cafile: {{ ldapCaCertFile }}
diff --git a/roles/mellanox_drivers/vars/mellanoxVars.yml b/roles/mellanox_drivers/defaults/main.yml
similarity index 100%
rename from roles/mellanox_drivers/vars/mellanoxVars.yml
rename to roles/mellanox_drivers/defaults/main.yml
diff --git a/roles/mellanox_drivers/tasks/main.yml b/roles/mellanox_drivers/tasks/main.yml
index eb45d2894fd1db0eca8e4c18e34b473cb80c4999..036e93ade3be72a99bed1848a6fef38145c8389b 100644
--- a/roles/mellanox_drivers/tasks/main.yml
+++ b/roles/mellanox_drivers/tasks/main.yml
@@ -5,7 +5,7 @@
   shell: "lspci | grep Mellanox"
   check_mode: yes
 
-- include_vars: mellanoxVars.yml
+#- include_vars: mellanoxVars.yml
 
 - name: yum install dependencies
   yum:  name=perl,pciutils,gtk2,atk,cairo,gcc-gfortran,libxml2-python,tcsh,libnl,lsof,tcl,tk,kernel-devel,python-devel,createrepo,rpm-build
diff --git a/roles/modulefiles/tasks/main.yml b/roles/modulefiles/tasks/main.yml
index 05e8ca7af86ded06c96965e7eb9bdfa43ceb04d9..b24355622587aa83f20ca4029c934cd933622c93 100644
--- a/roles/modulefiles/tasks/main.yml
+++ b/roles/modulefiles/tasks/main.yml
@@ -14,7 +14,7 @@
   args:
     dest: /usr/share/Modules/init/.modulespath
     line: /usr/local/Modules/modulefiles
-  ignore_errors: true
+  ignore_errors: false
   become: true
   when: ansible_os_family == 'RedHat'
 
@@ -24,6 +24,6 @@
   args:
     dest: /usr/share/modules/init/.modulespath
     line: /usr/local/Modules/modulefiles
-  ignore_errors: true
+  ignore_errors: false
   become: true
   when: ansible_os_family == 'Debian'
diff --git a/roles/mysql/tasks/CentOS_7_mysql_server.yml b/roles/mysql/tasks/CentOS_7_mysql_server.yml
new file mode 100644
index 0000000000000000000000000000000000000000..33f65d3d5eecdc877103b3ba9fa656588b1e7b37
--- /dev/null
+++ b/roles/mysql/tasks/CentOS_7_mysql_server.yml
@@ -0,0 +1,57 @@
+---
+- name: Make sure OS is updated since apt install might fail
+  apt:
+    update_cache: yes
+  become: true
+  when: ansible_os_family == "Debian"
+
+- name: "Installing MySQL Debian"
+  apt: name="{{ server_packages }}" update_cache=yes state=present
+  become: true
+  when: ansible_os_family == "Debian"
+
+- name: Installing MySQL RedHat
+  yum: name={{ item }}
+  with_items: "{{ server_packages }}"
+  become: true
+  when: ansible_os_family == "RedHat"
+
+- name: make sure mysql conf directory exists
+  file: dest=/etc/mysql/conf.d state=directory
+  become: true
+  register: mysqldb_confdir_create
+
+- name: "Starting MySQL"
+  service: name={{ sqlServiceName }} state=started enabled=true
+  become: true
+
+#- name: "Adding root"
+#  become: true
+#  mysql_user: name=root host="{{ item }}" password="{{ mysql_root_password }}" login_user=root login_password="{{ mysql_root_password }}" check_implicit_admin=yes
+#  with_items:
+#    - "{{ ansible_hostname }}"
+#    - 127.0.0.1
+#    - ::1
+#    - localhost
+
+- name: Check that the slurm_acct_db_directory exists
+  stat:
+    path: /var/lib/mysql/slurm_acct_db/   #defined in /vars/filesystems.yaml
+  register: slurm_acct_db_directory_result
+
+# this will only work if a completely fresh db gets installed because it gets shipped with a blank root pw
+- name: update mysql root password for all root accounts
+  mysql_user: name=root host=localhost password={{ mysql_root_password }} login_user=root
+  when: not slurm_acct_db_directory_result.stat.exists and mysqldb_confdir_create.changed
+
+- name: "Adding user database"
+  mysql_db: name={{ mysql_user_db_name }} state=present login_user=root login_password={{ mysql_root_password }}
+
+- name: "Giving priviliges to user"
+  mysql_user: name={{ mysql_user_name }} host={{ mysql_user_host }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
+  when: mysql_user_host is defined
+
+- name: "Giving priviliges to user"
+  mysql_user: name={{ mysql_user_name }} host={{ hostvars[item].ansible_fqdn }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
+  with_items: "{{ mysql_user_hosts_group }}"
+  when: mysql_user_hosts_group is defined
diff --git a/roles/mysql/tasks/Ubuntu_18_mysql_server.yml b/roles/mysql/tasks/Ubuntu_18_mysql_server.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e573a9187a9ffc76168341b23c37e04675d7c54b
--- /dev/null
+++ b/roles/mysql/tasks/Ubuntu_18_mysql_server.yml
@@ -0,0 +1,54 @@
+---
+- name: Make sure OS is updated since apt install might fail
+  apt:
+    update_cache: yes
+  become: true
+
+- name: "Installing MySQL for Ubuntu"
+  apt: name="{{ server_packages }}" update_cache=yes state=present
+  become: true
+
+- name: Comment out bind address so it doesn't bind to 127.0.0.1
+  replace:
+    path: /etc/mysql/mariadb.conf.d/50-server.cnf
+    regexp: '(.*bind.*)'
+    replace: '#\1'
+  become: true
+
+- name: make sure mysql conf directory exists
+  file: dest=/etc/mysql/conf.d state=directory
+  become: true
+  register: mysqldb_confdir_create
+
+- name: "Starting MySQL"
+  service: name={{ sqlServiceName }} state=started enabled=true
+  become: true
+
+- name: Check that the slurm_acct_db_directory exists
+  stat:
+    path: /var/lib/mysql/slurm_acct_db/   #defined in /vars/filesystems.yaml
+  register: slurm_acct_db_directory_result
+
+# this will only work if a completely fresh db gets installed because it gets shipped with a blank root pw
+- name: update mysql root password for all root accounts
+  mysql_user: name=root host=localhost password={{ mysql_root_password }} login_user=root check_implicit_admin=yes
+  become: true
+  become_user: root
+
+- name: "Adding user database"
+  mysql_db: name={{ mysql_user_db_name }} state=present login_user=root login_password={{ mysql_root_password }}
+  become: true
+  become_user: root
+
+- name: "Giving priviliges to user"
+  mysql_user: name={{ mysql_user_name }} host={{ mysql_user_host }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
+  when: mysql_user_host is defined
+  become: true
+  become_user: root
+
+- name: "Giving priviliges to user"
+  mysql_user: name={{ mysql_user_name }} host={{ hostvars[item].ansible_fqdn }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
+  with_items: "{{ mysql_user_hosts_group }}"
+  when: mysql_user_hosts_group is defined
+  become: true
+  become_user: root
\ No newline at end of file
diff --git a/roles/mysql/tasks/main.yml b/roles/mysql/tasks/main.yml
index fd7181ba5206b53ab92a9a0802a239a2f0b0fde2..29bd62272f9c7e68812d95caff8ff4105a31da0c 100644
--- a/roles/mysql/tasks/main.yml
+++ b/roles/mysql/tasks/main.yml
@@ -1,3 +1,4 @@
 ---
 - include_vars: "{{ ansible_distribution }}_{{ ansible_distribution_major_version }}.yml"
-- include: "{{ mysql_type }}.yml"
+- include: "{{ ansible_distribution }}_{{ ansible_distribution_major_version }}_{{ mysql_type }}.yml"
+- include: mysql_client.yml
\ No newline at end of file
diff --git a/roles/mysql/tasks/mysql_server.yml b/roles/mysql/tasks/mysql_server.yml
index 5ad085830619f71689d367cf48f9d8bc230e0df0..33f65d3d5eecdc877103b3ba9fa656588b1e7b37 100644
--- a/roles/mysql/tasks/mysql_server.yml
+++ b/roles/mysql/tasks/mysql_server.yml
@@ -1,7 +1,12 @@
 ---
+- name: Make sure OS is updated since apt install might fail
+  apt:
+    update_cache: yes
+  become: true
+  when: ansible_os_family == "Debian"
+
 - name: "Installing MySQL Debian"
-  apt: name="{{ item }}" update_cache=yes cache_valid_time=3600 state=present
-  with_items: "{{ server_packages }}"
+  apt: name="{{ server_packages }}" update_cache=yes state=present
   become: true
   when: ansible_os_family == "Debian"
 
@@ -10,7 +15,7 @@
   with_items: "{{ server_packages }}"
   become: true
   when: ansible_os_family == "RedHat"
-  
+
 - name: make sure mysql conf directory exists
   file: dest=/etc/mysql/conf.d state=directory
   become: true
diff --git a/roles/mysql/vars/Ubuntu_18.yml b/roles/mysql/vars/Ubuntu_18.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ecfd81694c1c68b36bf7d23905c7e2002465a1d6
--- /dev/null
+++ b/roles/mysql/vars/Ubuntu_18.yml
@@ -0,0 +1,15 @@
+server_packages:
+ - python
+ - python-dev
+ - libmariadb-dev
+ - python-pip
+ - libapache2-mod-wsgi
+ - python-mysql.connector
+ - mariadb-server
+ - python-mysqldb
+
+client_packages:
+ - python
+ - mariadb-client
+
+sqlServiceName: "mariadb"
diff --git a/roles/nat_server/tasks/main.yml b/roles/nat_server/tasks/main.yml
index 1e7fd39b588a527b2c65db2246510e87afea19c8..6c56c0ce8b10d821baee37ebf990ceb3ca82c47d 100644
--- a/roles/nat_server/tasks/main.yml
+++ b/roles/nat_server/tasks/main.yml
@@ -23,6 +23,8 @@
   # output looks like
   # 8.8.8.8 via 118.138.254.254 dev eth2  src 118.138.254.185
   shell: /usr/sbin/ip route get 8.8.8.8  | awk '{print $5;exit }'
+  check_mode: no
+  changed_when: false
   register: public_device_name
 
 #if not defined, default to M3=vlan 114  ;
diff --git a/roles/nfs-client/handlers/main.yml b/roles/nfs-client/handlers/main.yml
index b05f9fbf8929597fb98eca15a29a52f843b32e65..8440d7e651278e9872b5e881535483049554f7db 100644
--- a/roles/nfs-client/handlers/main.yml
+++ b/roles/nfs-client/handlers/main.yml
@@ -3,7 +3,7 @@
   service: name=rpcbind state=restarted
   become: true
 
-- name: restart idmap 
+- name: restart idmap
   service: name=rpcidmapd state=restarted
   become: true
   when: ansible_os_family == "RedHat" and ansible_distribution_major_version < 7
diff --git a/roles/nfs-client/tasks/main.yml b/roles/nfs-client/tasks/main.yml
index 1a3ea5fd54a102c95ec8276b8e59d6187f19ac7d..75a2d98e02b3e1482e4cafa91ddd1f3321612a97 100644
--- a/roles/nfs-client/tasks/main.yml
+++ b/roles/nfs-client/tasks/main.yml
@@ -1,18 +1,29 @@
 ---
 - name: install dependencies
-  yum: name={{ item }} state=installed
+  package:
+    state: present
+    name:
+      - libnfsidmap
+      - nfs-utils
+      - nfstest.noarch
   become: true
-  with_items:
-    - libnfsidmap 
-    - nfs-utils
-    - nfstest.noarch
   when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "7"
 
 - name: install dependencies
-  yum: name={{ item }} state=installed
+  package:
+    name:
+      - nfs-utils-lib
+    state: present
   become: true
-  with_items:
-    - nfs-utils-lib
   when: ansible_os_family == "RedHat" and ansible_distribution_major_version < "7"
 
+- name: install dependencies nfs-common ubuntu
+  apt:
+    name: nfs-common
+    state: present
+    update_cache: yes
+  become: true
+  become_user: root
+  when: ansible_distribution == 'Ubuntu'
+
 - include: mountFileSystem.yml
diff --git a/roles/nfs-client/tasks/mountFileSystem.yml b/roles/nfs-client/tasks/mountFileSystem.yml
index 80dc3cb332385fb6154fdef6ded63ca748a47689..c36db919646c24bb6877b9540d8fc27723bb3cbf 100644
--- a/roles/nfs-client/tasks/mountFileSystem.yml
+++ b/roles/nfs-client/tasks/mountFileSystem.yml
@@ -1,9 +1,8 @@
---- 
-
+---
 - name: "Mounting NFS mounts"
   mount: name={{ item.name }} src="{{ item.ipv4 }}:{{ item.src }}" fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
   with_items: "{{ nfsMounts }}"
-  become: true 
-  ignore_errors: true
+  become: true
+  ignore_errors: false
   register: firstMount
   when: nfsMounts is defined
diff --git a/roles/nfs-common/handlers/main.yml b/roles/nfs-common/handlers/main.yml
index f5c928114ee92484c0bb856b936476fbddfb5324..6cdc4ec5ef9648fb49b04622b2d2a9fd57b9279b 100644
--- a/roles/nfs-common/handlers/main.yml
+++ b/roles/nfs-common/handlers/main.yml
@@ -1,4 +1,3 @@
---- 
-- 
-  name: "Run rpcbind service"
+---
+- name: "Run rpcbind service"
   service: "name=rpcbind state=started enabled=yes"
diff --git a/roles/nfs-common/tasks/aptPackages.yml b/roles/nfs-common/tasks/aptPackages.yml
index d8e07d6195e9f1012970c375cc8b6c5c236570cc..d366a7b5b4aa1ed550beadca3f872e7bdd6af996 100644
--- a/roles/nfs-common/tasks/aptPackages.yml
+++ b/roles/nfs-common/tasks/aptPackages.yml
@@ -1,9 +1,8 @@
 ---
-- 
-  name: "Install nfs-utils"
-  with_items:
-    - nfs-common
-    - nfs-kernel-server
-  apt: "name={{ item }} state=present"
+- name: "Install nfs-utils"
+  package:
+    state: present
+    name:
+      - nfs-common
+      - nfs-kernel-server
   become: true
-
diff --git a/roles/nfs-common/tasks/yumPackages.yml b/roles/nfs-common/tasks/yumPackages.yml
index 6a8fd12b59d258f9ad021653350345729ffcb2e2..9fa88e2e0733d2b4bd0ad1f84f292456a9bea875 100644
--- a/roles/nfs-common/tasks/yumPackages.yml
+++ b/roles/nfs-common/tasks/yumPackages.yml
@@ -1,8 +1,8 @@
 --- 
-- 
-  name: "Install nfs-utils"
-  with_items: 
-    - bind-utils
-    - nfs-utils
-  yum: "name={{ item }} state=present"
+- name: "Install nfs-utils"
+  package:
+    name:
+      - bind-utils
+      - nfs-utils
+    state: present
   become: true
diff --git a/roles/nfs-server/tasks/startServer.yml b/roles/nfs-server/tasks/startServer.yml
index 7ac79c0fa9ad39b43463dc2a3c4f6e8b2f1e0304..a2e0cbea827fc4dcc251b05f997dcb75581d4d9e 100644
--- a/roles/nfs-server/tasks/startServer.yml
+++ b/roles/nfs-server/tasks/startServer.yml
@@ -29,7 +29,15 @@
   become: true
   when: ansible_os_family == "RedHat"  and ansible_distribution_major_version == "7"
 
+- name: "Run exportfs"
+  command: /usr/sbin/exportfs -a
+  become: true
+  when: ansible_os_family == "Debian"
+
 - name: "Start the Server"
   service: "name=nfs-kernel-server state=started enabled=true"
   become: true
   when: ansible_os_family == "Debian"
+
+
+
diff --git a/roles/set_semaphore_count/tasks/main.yml b/roles/set_semaphore_count/tasks/main.yml
index 01203f6d9fbbfb0de5d564bb045e10430e9a0a07..1e2321d48254e1944813d6d7e56041d39ae85ac2 100644
--- a/roles/set_semaphore_count/tasks/main.yml
+++ b/roles/set_semaphore_count/tasks/main.yml
@@ -1,10 +1,12 @@
 ---
 - name: set the value of the Semaphores
   set_fact:
-       SEM_COUNT: "500 256000 64 10240"
+       SEM_COUNT: "500\t256000\t64\t10240"
   when: SEM_COUNT is not defined
+  
 - name: test value
   debug: msg="Value of semaphores is {{ SEM_COUNT }} "  #"
+  
 - name: Place comment line in file
   lineinfile: 
      path: /etc/sysctl.d/88-setSemaphore.conf
@@ -16,6 +18,7 @@
      mode: "u+rwx,o=rx,g=rx"
   become: true
   become_user: root
+  when: ansible_os_family == 'RedHat'
      
 - name: Place comment line in file
   lineinfile: 
@@ -24,6 +27,7 @@
      state: present
   become: true
   become_user: root
+  when: ansible_os_family == 'RedHat'
 
 - name: Place comment line in file
   lineinfile: 
@@ -36,9 +40,23 @@
      mode: "u+rwx,o=rx,g=rx"
   become: true
   become_user: root
+  when: ansible_os_family == 'RedHat'
+
+- name: get current value
+  command: cat /proc/sys/kernel/sem
+  register: current_sem
+  changed_when: current_sem.stdout not in "{{ SEM_COUNT }}"
+  check_mode: no
+  when: ansible_os_family == 'RedHat'
+
+#- debug:
+#    var: current_sem
 
 - name: set semaphore count now 
   shell: "/usr/bin/echo {{ SEM_COUNT }}   > /proc/sys/kernel/sem"
   become: true
   become_user: root
+  when: 
+  - current_sem.changed
+  - ansible_os_family == 'RedHat'
 
diff --git a/roles/set_timezone/tasks/main.yml b/roles/set_timezone/tasks/main.yml
index 177969103af146ee970584e774bf2d4731209e77..4d5d9f521e972cb7b69b9258e43d55da9ad70a19 100644
--- a/roles/set_timezone/tasks/main.yml
+++ b/roles/set_timezone/tasks/main.yml
@@ -3,16 +3,35 @@
   template: src=ntp.conf.j2 dest=/etc/ntp.conf mode=644 owner=root group=root
   become: true
   become_user: root
+  register: ntpinstall
 
-- name: restart ntpd
+- name: restart ntpd redhat
   service: name=ntpd state=restarted 
   become: true
   become_user: root
+  when: 
+    - ansible_os_family == "RedHat"
+    - ntpinstall.changed
 
-- name: ensure ntpd is enabled and started   
+- name: ensure ntpd is enabled and started redhat
   service: name=ntpd state=started enabled=yes   
   become: true   
   become_user: root
+  when: ansible_os_family == "RedHat"
+  
+- name: restart ntpd ubuntu
+  service: name=ntp state=restarted 
+  become: true
+  become_user: root
+  when: 
+    - ansible_os_family == "Debian"
+    - ntpinstall.changed
+
+- name: ensure ntpd is enabled and started ubuntu
+  service: name=ntp state=started enabled=yes   
+  become: true   
+  become_user: root
+  when: ansible_os_family == "Debian"
 
 - name: set local timezone
   file: path=/etc/localtime state=link src={{ TIMEZONE_PATH }}
diff --git a/roles/slurm-common/files/scripts/nvidia-probe.py b/roles/slurm-common/files/scripts/nvidia-probe.py
index 7fd743ef41b91c85842973e623e1cbfd9f3c6535..7bc00899e6f3416003aa8dea5c00519f3e78bf4c 100755
--- a/roles/slurm-common/files/scripts/nvidia-probe.py
+++ b/roles/slurm-common/files/scripts/nvidia-probe.py
@@ -1,4 +1,4 @@
-#!/bin/env python
+#!/usr/bin/env python
 # prints  a list of NIDIA devices and their type in json format for 
 # parsing by ansible program; 
 # fields are 'name':'gpu' (fixed)
diff --git a/roles/slurm-common/tasks/createSlurmDirectories.yml b/roles/slurm-common/tasks/createSlurmDirectories.yml
index 738956823167ca062efe85940774a45c9a547423..ba82cd78ea1dae229ba95a63a2f03a1131e7ec29 100644
--- a/roles/slurm-common/tasks/createSlurmDirectories.yml
+++ b/roles/slurm-common/tasks/createSlurmDirectories.yml
@@ -42,7 +42,6 @@
 - name: create shared state directory
   file: path={{slurmsharedstatedir }} state=directory owner=slurm group=slurm mode=750
   become: true
-  run_once: true
   when: usesharedstatedir is defined and usesharedstatedir
 
 - name: symlink shared state dir
diff --git a/roles/slurm-common/tasks/installCgroup.yml b/roles/slurm-common/tasks/installCgroup.yml
index c7f4253d3dfcb0540421c27249d7aee0a4920118..6ba970cb140351a2f44f05eafd9638404fe2615d 100644
--- a/roles/slurm-common/tasks/installCgroup.yml
+++ b/roles/slurm-common/tasks/installCgroup.yml
@@ -3,25 +3,22 @@
   with_items:
     - libcgroup
   become: True
-  become_method: sudo
   when: ansible_os_family == "RedHat"
 
 - name: apt install cgroup 
-  apt: name={{ item }} state=installed update_cache=yes
-  with_items:
-    - cgmanager
-    - cgmanager-utils
-    - libcgmanager0 
+  package:
+    state: installed
+    name:
+    - libcgroup1
+    - cgroupfs-mount
+    - cgroup-tools
   when: ansible_os_family == "Debian"    
   become: True
-  become_method: sudo
 
 - name: config cgroup.conf file
   template: dest={{ slurm_dir }}/etc/cgroup.conf src=cgroup.conf.j2 mode=644
   become: True
-  become_method: sudo
 
 - name: config cgroup_allowed_devices.conf file
   template: dest={{ slurm_dir }}/etc/cgroup_allowed_devices.conf src=cgroup_allowed_devices.conf.j2 mode=644
   become: True
-  become_method: sudo
diff --git a/roles/slurm-common/tasks/installMungeFromSource.yml b/roles/slurm-common/tasks/installMungeFromSource.yml
index 656d35c9ff04a253224e44c9031e2c37c67c777e..7a24698ec82a8c6eac5c263891e8c00536e6e85e 100644
--- a/roles/slurm-common/tasks/installMungeFromSource.yml
+++ b/roles/slurm-common/tasks/installMungeFromSource.yml
@@ -27,11 +27,16 @@
     creates: "{{ munge_dir }}/bin/munge"
   when: not munge_binary.stat.exists
 
-- name: set use_systemd
+- name: set use_systemd Redhat
   set_fact: 
     use_systemd: True
   when: (ansible_distribution == "CentOS" or ansible_distribution == "RedHat") and ( ansible_distribution_major_version == "7") 
 
+- name: set use_systemd Debian
+  set_fact: 
+    use_systemd: True
+  when: ansible_os_family == "Debian"
+  
 - name: copy init script
   template: dest=/etc/init.d/munge src=munge.initd.j2 mode=755
   become: true
diff --git a/roles/slurm-common/tasks/main.yml b/roles/slurm-common/tasks/main.yml
index d2351af627d7d6b32aa7d720d236c3a5139d84d5..1e53779ccd9d716cdaa1bce1b6d822c954659748 100644
--- a/roles/slurm-common/tasks/main.yml
+++ b/roles/slurm-common/tasks/main.yml
@@ -18,41 +18,44 @@
 - include: createSlurmDirectories.yml
 
 - name: install deps
-  yum: name={{ item }} state=present
-  with_items:
-    - perl
-    - perl-DBI
-    - openssl-devel
-    - gcc
-    - rpm-build
-    - wget
-    - openssl-devel
-    - readline-devel
-    - pam-devel
-    - perl-ExtUtils-MakeMaker
-    - bzip2-devel
-    - hwloc
-    - hwloc-devel
-    - lua
-    - lua-devel
+  package:
+    state: present
+    name:
+      - perl
+      - perl-DBI
+      - openssl-devel
+      - gcc
+      - rpm-build
+      - wget
+      - openssl-devel
+      - readline-devel
+      - pam-devel
+      - perl-ExtUtils-MakeMaker
+      - bzip2-devel
+      - hwloc
+      - hwloc-devel
+      - lua
+      - lua-devel
   become: true
   when: ansible_os_family == "RedHat"
 
 - name: install deps
-  apt: name={{ item }} state=installed update_cache=yes
-  become: true
-  with_items:
-    - gcc
-    - wget
-    - libssl-dev
-    - libpam0g-dev
-    - libbz2-dev
-    - make
-    - perl
-    - libdbi-perl
-    - lua5.2
-    - hwloc
-    - libhwloc-dev
+  package: 
+    state: present
+    name:
+      - gcc
+      - wget
+      - libssl-dev # downgrade needed for bionic see https://github.com/dun/munge/issues/54
+      - libpam0g-dev
+      - libbz2-dev
+      - make
+      - perl
+      - libdbi-perl
+      - lua5.2
+      - liblua5.2-dev
+      - hwloc
+      - libhwloc-dev
+  become: true
   when: ansible_os_family == "Debian"
 
 - include: installMungeFromSource.yml
diff --git a/roles/slurm-mysql-config/tasks/main.yml b/roles/slurm-mysql-config/tasks/main.yml
index 52f06b184ac0f5487e09b633a97b2db40e712f2a..6be48e8ad7c042b24912166da59b7c9b5b21ab2d 100644
--- a/roles/slurm-mysql-config/tasks/main.yml
+++ b/roles/slurm-mysql-config/tasks/main.yml
@@ -2,3 +2,10 @@
   template: src=slurm.cnf.j2 dest=/etc/my.cnf.d/slurm.cnf
   become: true
   become_user: root
+  when: ansible_os_family == "RedHat"
+
+- name: "Copy slurm db tuning config"
+  template: src=slurm.cnf.j2 dest=/etc/mysql/mariadb.conf.d/slurm.cnf
+  become: true
+  become_user: root
+  when: ansible_os_family == "Debian"
\ No newline at end of file
diff --git a/roles/slurm-start/tasks/main.yml b/roles/slurm-start/tasks/main.yml
index df0ff262a08d5c63e85f3c0efb4e19082b4be8c2..33d9ca1690932920b538b1e3532f767b35e1323a 100644
--- a/roles/slurm-start/tasks/main.yml
+++ b/roles/slurm-start/tasks/main.yml
@@ -10,7 +10,7 @@
     slurmd_enabled: True
   when: slurmd_enabled is not defined
 
-- name: install slurmdbd init
+- name: install slurmdbd initt
   template: src=slurmdbd.initd.j2 dest=/etc/init.d/slurmdbd mode=755
   become: true
   when: use_systemd is not defined and start_slurmdbd is defined
@@ -56,30 +56,60 @@
   become: true
   when: use_systemd is defined and start_slurmdbd is defined and slurmdbd_service_installed.changed
 
+- name: make sure munge is started 
+  service: name=munge state=started enabled=yes
+  become: true
+  when: use_systemd is defined and start_slurmdbd is defined
+
 - name: start munge
   service: name=munge state=restarted enabled=yes
   become: true
+  when: use_systemd is defined and ( slurmdbd_service_installed.changed or slurmctld_service_installed.changed or slurmd_service_installed.changed)
 
 - name: start slurmdbd
-  service: name=slurmdbd state=restarted enabled=no
+  service: name=slurmdbd state=started enabled={{ start_slurmdbd }}
   become: true
   when: start_slurmdbd is defined
 
-- name: "create cluster in slurm db"
-  shell:  "{{slurm_dir}}/bin/sacctmgr -i create cluster {{ clustername }}"
+- name: restart slurmdbd
+  service: name=slurmdbd state=restarted enabled={{ start_slurmdbd }}
   become: true
-  ignore_errors: true
+  when: start_slurmdbd is defined and slurmdbd_service_installed.changed
 
 - name: start slurmctl
-  service: name=slurmctld state=restarted enabled=no
+  service: name=slurmctld state=started enabled={{ start_slurmctld }}
   become: true
   when: use_systemd is defined and start_slurmctld is defined
 
+- name: restart slurmctl
+  service: name=slurmctld state=restarted enabled={{ start_slurmctld }}
+  become: true
+  when: use_systemd is defined and start_slurmctld is defined and slurmctld_service_installed.changed
+
+- name: "count clusters in slurm db"
+  shell:  "{{slurm_dir}}/bin/sacctmgr show cluster -p | wc -l"
+  register: slurm_cluster_count
+  check_mode: no
+  changed_when: false
+
+- debug:
+    var: slurm_cluster_count
+
+- name: "create cluster in slurm db"  #needs munge to run
+  shell:  "{{slurm_dir}}/bin/sacctmgr -i create cluster {{ clustername }}"
+  become: true
+  when: slurm_cluster_count.stdout == '1'
+
 - name: start slurmd
-  service: name=slurmd state=restarted enabled={{ slurmd_enabled }}
+  service: name=slurmd state=started enabled={{ slurmd_enabled }}
   become: true
   when: use_systemd is defined and start_slurmd is defined
 
+- name: restart slurmd
+  service: name=slurmd state=restarted enabled={{ slurmd_enabled }}
+  become: true
+  when: use_systemd is defined and start_slurmd is defined and slurmd_service_installed.changed
+
 - name: start slurm
   service: name=slurm state=restarted enabled={{ slurmd_enabled }}
   become: true
diff --git a/roles/slurmdb-config/tasks/main.yml b/roles/slurmdb-config/tasks/main.yml
index c189183bab51ca97da66ddbae06aba5c73931bed..f9d489d2133658ab89a3c36759355dfdbfc8d8ef 100644
--- a/roles/slurmdb-config/tasks/main.yml
+++ b/roles/slurmdb-config/tasks/main.yml
@@ -1,11 +1,12 @@
 ---
 - name: install deps in control node
-  yum: name={{ item }} state=installed
+  package:
+    state: installed
+    name:
+      - mysql
+      - mysql-devel
+      - MySQL-python
   become: true
-  with_items:
-    - mysql
-    - mysql-devel
-    - MySQL-python
   when: ansible_os_family == "RedHat"
 
 - name: install deps in control node
@@ -26,17 +27,17 @@
   become: true
 
 - name: create slurm user # this is duplicated from slurm-common
-  user: 
-    name: slurm 
-    group: slurm 
-    system: yes 
+  user:
+    name: slurm
+    group: slurm
+    system: yes
     createhome: no
     uid: 497
   become: true
 
 - name: install slurmdb.conf
-  copy: 
-    src: files/slurmdbd.conf 
+  copy:
+    src: files/slurmdbd.conf
     dest: "{{ slurm_dir }}/etc/slurmdbd.conf"
     owner: slurm
     group: slurm
@@ -46,8 +47,8 @@
 
 
 - name: install slurmdbd.conf
-  copy: 
-    src: slurmdbd.conf 
+  copy:
+    src: slurmdbd.conf
     dest: /etc/slurm/slurmdbd.conf
     owner: slurm
     group: slurm
diff --git a/roles/systemd-nvidia-uvm/tasks/main.yml b/roles/systemd-nvidia-uvm/tasks/main.yml
index 2caecf4dd2605391e32a792b6fda9c9ca1a64f3c..7d5e4db867d3e27a3547447dbc4f416f1dde8429 100644
--- a/roles/systemd-nvidia-uvm/tasks/main.yml
+++ b/roles/systemd-nvidia-uvm/tasks/main.yml
@@ -1,4 +1,11 @@
 ---
+- name: install nvidia-modprobe on ubuntu
+  package:
+    name: nvidia-modprobe
+    state: present
+  become: true
+  when: ansible_os_family == 'Debian'
+
 - name: Copy Files
   become: true
   become_user: root
diff --git a/roles/telegraf/tasks/main.yml b/roles/telegraf/tasks/main.yml
index 13701898cd1ae4c091aa148bc8928d091834b0b6..830e211629565344a08b9a560c40f308d01d2318 100644
--- a/roles/telegraf/tasks/main.yml
+++ b/roles/telegraf/tasks/main.yml
@@ -48,8 +48,6 @@
   become: true
   become_user: root
 
-
-
 - name: Install Telegraf config
   template:
     src: telegraf.conf.j2
diff --git a/roles/upgrade/tasks/main.yml b/roles/upgrade/tasks/main.yml
index 85255ce6e8c1be524eb118ff7846767f1304d8f8..c103b9f37f201beadcfeba92b661af61e67020d3 100644
--- a/roles/upgrade/tasks/main.yml
+++ b/roles/upgrade/tasks/main.yml
@@ -6,10 +6,10 @@
   become_user: root
   when: ansible_os_family=="Debian"
 
-- name: apt-get upgrade
-  apt: upgrade=safe
-  become: true
-  when: ansible_os_family=="Debian"
+#- name: apt-get upgrade
+#  apt: upgrade=safe
+#  become: true
+#  when: ansible_os_family=="Debian"
 
 - name: yum remove
   yum: name=ipa-client-common state=absent
@@ -28,6 +28,8 @@
   become: true
   become_user: root
   when: ansible_os_family == 'RedHat'
+  register: yumtransactioncleanup
+  changed_when: "'No unfinished transactions left.' not in yumtransactioncleanup.stdout"
 
 - name: yum upgrade
   yum: name=* state=latest
diff --git a/scripts/get_or_make_passwd.py b/scripts/get_or_make_passwd.py
deleted file mode 100755
index 5242f1f9c52fb93d8016f48598a9192149aef10c..0000000000000000000000000000000000000000
--- a/scripts/get_or_make_passwd.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/python
-import random
-import sys
-import string
-def get_passwd(f,passname):
-    f.seek(0)
-    for line in f.readlines():
-        (key,passwd)=line.split(':')
-        if key==passname:
-            f.close()
-            return passwd.rstrip()
-    return None
-
-def mk_passwd(f,passname):
-    passwd=''.join(random.choice(string.ascii_uppercase + string.digits+string.ascii_lowercase) for _ in range(16))
-    f.write("%s:%s\n"%(passname,passwd))
-    return passwd
-   
-try:
-    f=open('../passwd.txt','at+')
-except:
-    f=open('./passwd.txt','at+')
-passname = sys.argv[1]
-passwd = get_passwd(f,passname)
-if passwd == None:
-    passwd = mk_passwd(f,passname)
-print passwd
-f.close()
diff --git a/scripts/make_inventory.py b/scripts/make_inventory.py
index 48bd21d85e1a7314d0982d062227c33ac2b87783..09451fa855ad853fef151c1fba2fc2a3dc8c45ff 100755
--- a/scripts/make_inventory.py
+++ b/scripts/make_inventory.py
@@ -56,9 +56,14 @@ def gatherInfo(md_key,md_value,authDict,project_id,inventory):
                         else:
                             inventory['_meta']['hostvars'][hostname]['public_host'] = server.networks[nn][0]
                 if network_name == None:
-                    network_name = list(server.networks.keys())[0]
-                    
-                inventory['_meta']['hostvars'][hostname]['ansible_host'] = server.networks[network_name][0]
+                    try:
+                        network_name = list(server.networks.keys())[0]
+                    except:
+                        print("An error occured while processing ",server)
+                try:    
+                    inventory['_meta']['hostvars'][hostname]['ansible_host'] = server.networks[network_name][0]
+                except:
+                    print("An error occured while processing ",server)
 
         else:
             continue
@@ -110,14 +115,22 @@ if __name__ == "__main__":
     enabled_projects = [ x for x in projects if x.enabled ]
 
     inventory_list = Parallel(n_jobs=len(projects))(delayed(gatherInfo) (md_key,md_value, authDict, proj.id, inventory) for proj in enabled_projects)
+    
     inventory={}
 
     for i in inventory_list:
         merge(i,inventory)
+    #for k, v in inventory.items(): 
+    #    sorted_inventory={k:sorted(v)}
+    for key in inventory:
+        if key=='_meta':
+            pass
+        else:
+            inventory[key].sort()
     if not inventory['_meta']['hostvars']:
         print("I could not find any resouces tagged with {}: {}".format(md_key,md_value))
     else:
         if static:
-            print( "#!/bin/bash\necho '"+json.dumps(inventory,indent=4)+"'")
+            print( "#!/bin/bash\necho '"+json.dumps(inventory,indent=4, sort_keys=True)+"'")
         else:
-            print(json.dumps(inventory))
+            print(json.dumps(inventory, sort_keys=True))
diff --git a/scripts/userData.sh b/scripts/userData.sh
deleted file mode 100644
index 545e92248baa6d17f64115cc634c87b689ad0ae8..0000000000000000000000000000000000000000
--- a/scripts/userData.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-mkdir /local_home
-usermod -m -d /local_home/ec2-user ec2-user
-
diff --git a/syncNFS.yml b/syncNFS.yml
deleted file mode 100644
index 9095bfc8008c18aa940c5a63e760685b67f56fae..0000000000000000000000000000000000000000
--- a/syncNFS.yml
+++ /dev/null
@@ -1,14 +0,0 @@
---- 
-- 
-  hosts: openvpn-clients
-  remote_user: ec2-user
-  roles:
-    - syncExports
-    - nfs-client
-  become: true
-  vars: 
-    nfs_server: "vm-118-138-240-224.erc.monash.edu.au"
-    openvpn_servers: 
-      - vm-118-138-240-224.erc.monash.edu.au
-    x509_ca_server: vm-118-138-240-224.erc.monash.edu.au
-
diff --git a/templates/easy-rsa/vars.j2 b/templates/easy-rsa/vars.j2
deleted file mode 100644
index 77adaead4782e8dcc923bf902401b7ad725623f5..0000000000000000000000000000000000000000
--- a/templates/easy-rsa/vars.j2
+++ /dev/null
@@ -1,80 +0,0 @@
-# easy-rsa parameter settings
-
-# NOTE: If you installed from an RPM,
-# don't edit this file in place in
-# /usr/share/openvpn/easy-rsa --
-# instead, you should copy the whole
-# easy-rsa directory to another location
-# (such as /etc/openvpn) so that your
-# edits will not be wiped out by a future
-# OpenVPN package upgrade.
-
-# This variable should point to
-# the top level of the easy-rsa
-# tree.
-export EASY_RSA="/etc/easy-rsa/2.0"
-
-#
-# This variable should point to
-# the requested executables
-#
-export OPENSSL="openssl"
-export PKCS11TOOL="pkcs11-tool"
-export GREP="grep"
-
-
-# This variable should point to
-# the openssl.cnf file included
-# with easy-rsa.
-export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA`
-
-# Edit this variable to point to
-# your soon-to-be-created key
-# directory.
-#
-# WARNING: clean-all will do
-# a rm -rf on this directory
-# so make sure you define
-# it correctly!
-export KEY_DIR="$EASY_RSA/keys"
-
-# Issue rm -rf warning
-echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR
-
-# PKCS11 fixes
-export PKCS11_MODULE_PATH="dummy"
-export PKCS11_PIN="dummy"
-
-# Increase this to 2048 if you
-# are paranoid.  This will slow
-# down TLS negotiation performance
-# as well as the one-time DH parms
-# generation process.
-export KEY_SIZE={{ keySize }}
-
-# In how many days should the root CA key expire?
-export CA_EXPIRE=3650
-
-# In how many days should certificates expire?
-export KEY_EXPIRE=3650
-
-# These are the default values for fields
-# which will be placed in the certificate.
-# Don't leave any of these fields blank.
-export KEY_COUNTRY={{ countryName }}
-export KEY_PROVINCE={{ reginalName }} 
-export KEY_CITY={{ cityName }} 
-export KEY_ORG={{ organizationName }} 
-export KEY_EMAIL={{ emailAddress }} 
-export KEY_OU={{ organizationUnit }}
-
-# X509 Subject Field
-export KEY_NAME="EasyRSA"
-
-# PKCS11 Smart Card
-# export PKCS11_MODULE_PATH="/usr/lib/changeme.so"
-# export PKCS11_PIN=1234
-
-# If you'd like to sign all keys with the same Common Name, uncomment the KEY_CN export below
-# You will also need to make sure your OpenVPN server config has the duplicate-cn option set
-# export KEY_CN="CommonName"