diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 522596d1e5b084738fea4f88254fa2b214335692..ce5e50faf8916eb3477ece0c0c9a17a9423bc3dc 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -24,7 +24,7 @@ trigger_pipeline_in_Clusterbuild:
   - ansible
   script:
   - echo ${CI_JOB_TOKEN}
-  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=aciab_upstream https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline  # ID is from clusterbuild
+  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline  # ID is from clusterbuild
 
 
 trigger_pipeline_in_monarch:
@@ -33,7 +33,7 @@ trigger_pipeline_in_monarch:
   - ansible
   script:
   - echo ${CI_JOB_TOKEN}
-  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=cicd https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline  # ID is from monarch
+  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline  # ID is from monarch
 
 
 yamllint:
@@ -147,6 +147,7 @@ tests:
     - bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem"
     - bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
     - bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
+    - bash -e ./tests/run_tests.sh slurm "files/inventory.$STACKNAME" "../gc_key.pem"
     
 extended:
   stage: extended
@@ -159,7 +160,7 @@ extended:
     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
   script:
     - source ./$NECTAR_ALLOCATION-openrc.sh
-    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME ${CI_PROJECT_NAME}
   only:
     variables:
       - $EXTENDED != null
@@ -180,7 +181,7 @@ manual_cluster_spawn:
     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
   script:
     - source ./$NECTAR_ALLOCATION-openrc.sh
-    - bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME
+    - bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME ${CI_PROJECT_NAME}
     - openstack stack list
     - export STACKNAME=$MANUAL_STACKNAME
     - sleep 25
@@ -189,7 +190,7 @@ manual_cluster_spawn:
   only:
     refs:
       - "cicd"
-
+  
 clean:
   stage: clean
   tags:
@@ -228,11 +229,4 @@ clean:
 #   after_script:
 #     - sleep 20 # artifically wait a bit to make sure it is really dead
 
-#     - openstack stack list
-#     - bash -x ./CICD/heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
-#     - openstack stack list
-#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
-#     - openstack stack list
-#   after_script:
-#     - sleep 20 # artifically wait a bit to make sure it is really dead
 
diff --git a/CICD/heat/gc_HOT.yaml b/CICD/heat/gc_HOT.yaml
index 861e5dc0b68da14dcfb5128bacf002b0a0bd6baa..ae74244ea11aa8f446a5fd777ed7366424cc3e95 100644
--- a/CICD/heat/gc_HOT.yaml
+++ b/CICD/heat/gc_HOT.yaml
@@ -2,6 +2,7 @@
 heat_template_version: 2013-05-23
 description: "A simple template to boot a cluster of desktops (LoginNode, ManagementNodes and Desktop Nodes)"
 # avz parameters disabled. they are working but I want just more options than monash-02. I would like to have a parameter that says "I don't care"
+#This requires gc_secgroups to be called beforehand
 
 parameters:
   ubuntu_1804_image_id:
@@ -33,192 +34,33 @@ parameters:
   Flavour:
     type: string
     default: m3.xsmall
-
+  SlurmSecGroupID:
+    type: string
+    label: Resource ID
+    default: 6e7a09b0-981c-424f-a9b7-9fd4f4d8f416
+  NFSSecGroupID:
+    type: string
+    label: Resource ID
+    default: b07a75a3-830c-4778-96c6-8a3732ec7d6c
+  MySQLSecGroupID:
+    type: string
+    label: Resource ID
+    default: 4478f245-de5c-4177-bcbd-6fa661032cbe
+  SSHMonashSecGroupID:
+    type: string
+    label: Resource ID
+    default: c15081f4-c756-4c57-b8cf-388dd7fdbcd4
+  HTTPsSecGroupID:
+    type: string
+    label: Resource ID
+    default: 2d4510c3-ae73-44ea-9700-b6f0a00bf7aa
+  PublicSSHSecGroupID:
+    type: string
+    label: Resource ID
+    default: 8a029c04-08ce-40f1-a705-d45a2077e27d
 
 resources:
 
-  SlurmSecGroup:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "heatslurmsecgroup"
-     rules: [ { protocol: tcp,
-               port_range_min: 12000,
-               port_range_max: 12999},
-              { protocol: tcp,
-               port_range_min: 6817,
-               port_range_max: 6819},
-              { protocol: tcp,
-               port_range_min: 1019,
-               port_range_max: 1019}]
-  NFSSecGroup:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "heatnfssecgroup"
-     rules: [ { protocol: tcp,
-               port_range_min: 2049,
-               port_range_max: 2049,
-               remote_mode: "remote_group_id"},
-              { protocol: tcp,
-               port_range_min: 111,
-               port_range_max: 111,
-               remote_mode: "remote_group_id"},
-              { protocol: udp,
-               port_range_min: 2049,
-               port_range_max: 2049,
-               remote_mode: "remote_group_id"},
-              { protocol: udp,
-               port_range_min: 111,
-               port_range_max: 111,
-               remote_mode: "remote_group_id"} ]
-  MySQLSecGroup:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "heatmysqlsecgroup"
-     rules: [ { protocol: tcp,
-               port_range_min: 3306,
-               port_range_max: 3306,
-               remote_mode: "remote_group_id"} ]
-  SSHMonashSecGroup:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "SSHMonashSecGroup"
-     rules: [ { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 59.191.208.0/20
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 114.30.64.0/21
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 118.138.0.0/16
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 118.139.0.0/17
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 130.194.0.0/16
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 203.0.141.0/24
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 203.6.141.0/24
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 203.23.136.0/24
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 59.191.192.0/20
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 202.158.212.32/27
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 130.194.13.96/27
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 49.127.0.0/16
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 202.58.246.0/24
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 202.94.69.0/24
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 103.35.228.0/22
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 43.246.232.0/22
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 103.35.228.0/22
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 59.191.208.0/20
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 49.127.0.0/16
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 59.191.220.0/22
-     },
-     { protocol: tcp,
-               port_range_min: 22,
-               port_range_max: 22,
-               direction: ingress,
-               remote_ip_prefix: 43.246.232.0/22
-     } ]
-
-  webaccess:
-   type: "OS::Neutron::SecurityGroup"
-   properties:
-     name: "webaccess"
-     rules: [ { protocol: tcp,
-               port_range_min: 80,
-               port_range_max: 80},
-              { protocol: tcp,
-               port_range_min: 443,
-               port_range_max: 443} ]
 
   SQLNode0:
    type: "OS::Nova::Server"
@@ -229,7 +71,7 @@ resources:
     flavor: m3.xsmall
     image: { get_param: centos_7_image_id }
     key_name: { get_param: ssh_key }
-    security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: MySQLSecGroup }, { get_resource: NFSSecGroup } ]
+    security_groups: [ { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: MySQLSecGroupID }, { get_param: NFSSecGroupID } ]
     metadata:
      ansible_host_groups: [ SQLNodes, NFSNodes ]
      ansible_ssh_user: ec2-user
@@ -273,7 +115,7 @@ resources:
     volume_id: { get_resource: DBVolume }
     instance_uuid: { get_resource: SQLNode0 }
 
-  MgmtNodesC:
+  MgmtNodesCentos7:
    type: "OS::Heat::ResourceGroup"
    properties:
     count: 1
@@ -286,7 +128,7 @@ resources:
         mynodename:
          list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmt%index%' ]]
         ssh_key: { get_param: ssh_key }
-        security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup }, { get_resource: MySQLSecGroup } ]
+        security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
         project_name: { get_param: project_name }
 
   MgmtNodesU:
@@ -302,7 +144,7 @@ resources:
         mynodename:
          list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmtU%index%' ]]
         ssh_key: { get_param: ssh_key }
-        security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup }, { get_resource: MySQLSecGroup } ]
+        security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
         project_name: { get_param: project_name }
 
   LoginNodesC:
@@ -318,7 +160,7 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'login%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: PublicSSHSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ LoginNodes ]
        ansible_ssh_user: ec2-user
@@ -329,7 +171,7 @@ resources:
   LoginNodesU:
    type: "OS::Heat::ResourceGroup"
    properties:
-    count: 0
+    count: 1
     resource_def:
      type: "OS::Nova::Server"
      properties:
@@ -339,7 +181,7 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'loginU%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ LoginNodes ]
        ansible_ssh_user: ubuntu
@@ -360,7 +202,7 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopc%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ DesktopNodes, VisNodes, ComputeNodes ]
        ansible_ssh_user: ec2-user
@@ -380,8 +222,8 @@ resources:
       image: { get_param: ubuntu_1804_image_id }
       key_name: { get_param: ssh_key }
       name:
-       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec7%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+       list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computeU%index%' ]]
+      security_groups: [ default, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: SSHMonashSecGroupID } ]
       metadata:
        ansible_host_groups: [ ComputeNodes ]
        ansible_ssh_user: ubuntu
@@ -389,7 +231,7 @@ resources:
       networks:
        - network: { get_param: NetID }
 
-  ComputeNodesC:
+  ComputeNodesCentos7:
    type: "OS::Heat::ResourceGroup"
    properties:
     count: 1
@@ -402,7 +244,7 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec7%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ ComputeNodes ]
        ansible_ssh_user: ec2-user
@@ -413,7 +255,7 @@ resources:
   UbuntuDesktopNodes:
    type: "OS::Heat::ResourceGroup"
    properties:
-    count: 0
+    count: 1
     resource_def:
      type: "OS::Nova::Server"
      properties:
@@ -423,7 +265,7 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'gpudesktopu%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, K1, VisNodes ]
        ansible_ssh_user: ubuntu
@@ -465,10 +307,11 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computerhel%index%' ]]
-      security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
+      security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
        ansible_host_groups: [ DGXRHELNodes ]
        ansible_ssh_user: cloud-user
        project_name: { get_param: project_name }
       networks:
        - network: { get_param: NetID }
+
diff --git a/CICD/heat/gc_secgroups.hot b/CICD/heat/gc_secgroups.hot
new file mode 100644
index 0000000000000000000000000000000000000000..ad6e779035e99df7fd93639f2a0179848cc69a5e
--- /dev/null
+++ b/CICD/heat/gc_secgroups.hot
@@ -0,0 +1,194 @@
+---
+heat_template_version: 2013-05-23
+description: # call with openstack stack [update || create ] --wait --template gc_secgroups.hot SecurityGroupStack
+
+resources:
+
+  SlurmSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatslurmsecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 12000,
+               port_range_max: 12999},
+              { protocol: tcp,
+               port_range_min: 6817,
+               port_range_max: 6819},
+              { protocol: tcp,
+               port_range_min: 1019,
+               port_range_max: 1019}]
+  NFSSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatnfssecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 2049,
+               port_range_max: 2049,
+               remote_mode: "remote_group_id"},
+              { protocol: tcp,
+               port_range_min: 111,
+               port_range_max: 111,
+               remote_mode: "remote_group_id"},
+              { protocol: udp,
+               port_range_min: 2049,
+               port_range_max: 2049,
+               remote_mode: "remote_group_id"},
+              { protocol: udp,
+               port_range_min: 111,
+               port_range_max: 111,
+               remote_mode: "remote_group_id"} ]
+  MySQLSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "heatmysqlsecgroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 3306,
+               port_range_max: 3306,
+               remote_mode: "remote_group_id"} ]
+  PublicSSHSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "SSHSecGroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22} ]
+  SSHMonashSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "SSHMonashSecGroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 59.191.208.0/20
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 114.30.64.0/21
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 118.138.0.0/16
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 118.139.0.0/17
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 130.194.0.0/16
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 203.0.141.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 203.6.141.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 203.23.136.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 59.191.192.0/20
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 202.158.212.32/27
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 130.194.13.96/27
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 49.127.0.0/16
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 202.58.246.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 202.94.69.0/24
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 103.35.228.0/22
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 43.246.232.0/22
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 103.35.228.0/22
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 59.191.208.0/20
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 49.127.0.0/16
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 59.191.220.0/22
+     },
+     { protocol: tcp,
+               port_range_min: 22,
+               port_range_max: 22,
+               direction: ingress,
+               remote_ip_prefix: 43.246.232.0/22
+     } ]
+  HTTPsSecGroup:
+   type: "OS::Neutron::SecurityGroup"
+   properties:
+     name: "HTTPsSecGroup"
+     rules: [ { protocol: tcp,
+               port_range_min: 80,
+               port_range_max: 80},
+              { protocol: tcp,
+               port_range_min: 443,
+               port_range_max: 443} ]
diff --git a/CICD/heat/heatcicdwrapper.sh b/CICD/heat/heatcicdwrapper.sh
index 26afdebda88b5ba150f9e947b997f21e7f6b461d..e6554d664e78d68acbf5bc2ec9160a12541fee00 100644
--- a/CICD/heat/heatcicdwrapper.sh
+++ b/CICD/heat/heatcicdwrapper.sh
@@ -8,14 +8,8 @@ function usage {
     exit 1
 }
 
-if [ "$#" -ne 2 ]; then
-    echo "Illegal number of parameters expecting 2"
-    usage
-fi
-
 STACKNAME=$2
 
-
 if [[ "$STACKNAME" == "CICD"* ]]; then
   echo "CICD found in stackname. doing nothing"
 else
diff --git a/CICD/heat/resource_registry.yaml b/CICD/heat/resource_registry.yaml
index 0638b887c8c09d5d6a98f51a34d3b4eeb6e9aafb..421a309d5ce769fdaa0cfcf590fc927a4104eab1 100644
--- a/CICD/heat/resource_registry.yaml
+++ b/CICD/heat/resource_registry.yaml
@@ -1,2 +1,2 @@
 resource_registry:
-  My::Server::MgmtNode: mgmtnode_HOT.yaml
+  My::Server::MgmtNode: ./mgmtnode_HOT.yaml
diff --git a/CICD/tests/run_tests.sh b/CICD/tests/run_tests.sh
index d063e98d1d7e4617882bb14a5e1c51d9e8cda381..bfb8278ee1a8f2a8534236990d6ac11455ebb7e7 100644
--- a/CICD/tests/run_tests.sh
+++ b/CICD/tests/run_tests.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 
 function usage {
-    echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql}" INVENTORY_FILE KEY
+    echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql, slurm}" INVENTORY_FILE KEY
     exit 1
 }
 
@@ -23,22 +23,4 @@ function run_them ()
     done
 }
 
-# I think I am just checking the if $1 is one of the listes strings (see usage) not proud of this at all but works
-case "$1" in
-        all)
-        ;;
-        ComputeNodes)
-        ;;
-        ManagementNodes)
-        ;;
-        NFSNodes)
-        ;;
-        SQLNodes)
-        ;;
-        LoginNodes)
-        ;;
-        *)
-        usage
-esac
-
 run_them $1 $2 $3
\ No newline at end of file
diff --git a/CICD/tests/slurm/srunHostname.yml b/CICD/tests/slurm/srunHostname.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2e1c0886b847674b68cf3cd5f186cc50cfa54bdf
--- /dev/null
+++ b/CICD/tests/slurm/srunHostname.yml
@@ -0,0 +1,55 @@
+---
+- hosts: ManagementNodes,LoginNodes,ComputeNodes
+  gather_facts: false
+  tasks:
+  - name: add user hpctest
+    user:
+      name: hpctest
+      shell: /bin/bash
+    become: true
+
+- hosts: ManagementNodes
+  gather_facts: false
+  tasks:
+  - name: Create a parent account
+    command: ./sacctmgr -i add account parentAccount cluster=m3 Description="Test parent account" Organization="Monash"
+    args:
+      chdir: '/opt/slurm-latest/bin'
+    become: true
+    register: result 
+    failed_when: result.rc != 0 and result.stdout != " Nothing new added."
+    
+  - name: Create a project associated with a given parent
+    command: ./sacctmgr -i add account testProject parent=parentAccount cluster=m3 Organization="Monash"
+    args:
+      chdir: '/opt/slurm-latest/bin'
+    become: true
+    register: result 
+    failed_when: result.rc != 0 and result.stdout != " Nothing new added."
+    
+  - name: Create a user and associate them with a project
+    command: ./sacctmgr -i create user hpctest cluster=m3 account=testProject partition=batch
+    args:
+      chdir: '/opt/slurm-latest/bin'
+    become: true 
+    register: result 
+    failed_when: result.rc != 0 and result.stdout != " Nothing new added."
+
+#sudo `which sacctmgr` modify user where name=hpctest set maxjobs=200
+##  18  sudo `which sacctmgr` update account hpctest set qos=normal
+#   22  sudo `which sacctmgr` update account testProject set qos=normal
+
+- hosts: LoginNodes
+  gather_facts: false
+  tasks:
+  - name: make sure munge is running
+    service:
+      name: munge
+      state: started
+    become: true
+  - name: simple srun test
+    command: ./srun --ntasks=1  --partition=batch  hostname
+    args:
+      chdir: '/opt/slurm-latest/bin'
+    become: true       
+    become_user: hpctest
diff --git a/CICD/vars/slurm.yml b/CICD/vars/slurm.yml
index 0d665291de05f39f40aae2f57b1f5a2b11431481..09dfc0af95ef91b97ad30576a92662dd7feecc55 100644
--- a/CICD/vars/slurm.yml
+++ b/CICD/vars/slurm.yml
@@ -3,7 +3,7 @@ desktopNodeList:
   - { name : 'DesktopNodes', interface : 'eth0' }
 clustername: "m3"
 projectname: "m3"
-slurm_version: 19.05.3-2
+slurm_version: 19.05.4
 munge_version: 0.5.13
 nhc_version: 1.4.2
 munge_dir: /opt/munge-{{ munge_version }}
diff --git a/roles/mysql/tasks/Centos_7_mysql_server.yml b/roles/mysql/tasks/CentOS_7_mysql_server.yml
similarity index 100%
rename from roles/mysql/tasks/Centos_7_mysql_server.yml
rename to roles/mysql/tasks/CentOS_7_mysql_server.yml
diff --git a/roles/nhc/templates/nhc.sysconfig.j2 b/roles/nhc/templates/nhc.sysconfig.j2
index a91646c3673a75b56ed0017306dc72462ff28528..04c68b1336a4f3412f7bc636eb15a650a6a93489 100644
--- a/roles/nhc/templates/nhc.sysconfig.j2
+++ b/roles/nhc/templates/nhc.sysconfig.j2
@@ -9,4 +9,6 @@ NHC_EMAIL_TO="{{ nhc_emails }}"
 NHC_EMAIL_SUBJECT="{{ nhc_email_subject }}"
 NHC_LOOP_TIME="300"
 DF_FLAGS="-Tk"
+# Timeout value added temporarily by Trung - 20200409
+TIMEOUT=150
 
diff --git a/roles/set_timezone/tasks/main.yml b/roles/set_timezone/tasks/main.yml
index 177969103af146ee970584e774bf2d4731209e77..5a89a6482d7414dd557936954afa4617cade1da1 100644
--- a/roles/set_timezone/tasks/main.yml
+++ b/roles/set_timezone/tasks/main.yml
@@ -4,15 +4,29 @@
   become: true
   become_user: root
 
-- name: restart ntpd
+- name: restart ntpd redhat
   service: name=ntpd state=restarted 
   become: true
   become_user: root
+  when: ansible_os_family == "RedHat"
 
-- name: ensure ntpd is enabled and started   
+- name: ensure ntpd is enabled and started redhat
   service: name=ntpd state=started enabled=yes   
   become: true   
   become_user: root
+  when: ansible_os_family == "RedHat"
+  
+- name: restart ntpd ubuntu
+  service: name=ntp state=restarted 
+  become: true
+  become_user: root
+  when: ansible_os_family == "Debian"
+
+- name: ensure ntpd is enabled and started ubuntu
+  service: name=ntp state=started enabled=yes   
+  become: true   
+  become_user: root
+  when: ansible_os_family == "Debian"
 
 - name: set local timezone
   file: path=/etc/localtime state=link src={{ TIMEZONE_PATH }}
diff --git a/roles/slurm-common/tasks/installCgroup.yml b/roles/slurm-common/tasks/installCgroup.yml
index b97f820a904b3eab6bea660ac92db74d9906d0eb..6ba970cb140351a2f44f05eafd9638404fe2615d 100644
--- a/roles/slurm-common/tasks/installCgroup.yml
+++ b/roles/slurm-common/tasks/installCgroup.yml
@@ -3,25 +3,22 @@
   with_items:
     - libcgroup
   become: True
-  become_method: sudo
   when: ansible_os_family == "RedHat"
 
 - name: apt install cgroup 
-  apt: name={{ item }} state=installed update_cache=yes
-  with_items:
+  package:
+    state: installed
+    name:
     - libcgroup1
     - cgroupfs-mount
     - cgroup-tools
   when: ansible_os_family == "Debian"    
   become: True
-  become_method: sudo
 
 - name: config cgroup.conf file
   template: dest={{ slurm_dir }}/etc/cgroup.conf src=cgroup.conf.j2 mode=644
   become: True
-  become_method: sudo
 
 - name: config cgroup_allowed_devices.conf file
   template: dest={{ slurm_dir }}/etc/cgroup_allowed_devices.conf src=cgroup_allowed_devices.conf.j2 mode=644
   become: True
-  become_method: sudo
diff --git a/roles/slurm-common/tasks/installMungeFromSource.yml b/roles/slurm-common/tasks/installMungeFromSource.yml
index a8f5496207dc643a1cc870c2fd6441f632e2bbd1..7a24698ec82a8c6eac5c263891e8c00536e6e85e 100644
--- a/roles/slurm-common/tasks/installMungeFromSource.yml
+++ b/roles/slurm-common/tasks/installMungeFromSource.yml
@@ -8,22 +8,22 @@
     src: "http://consistency0/src/munge-{{ munge_version }}.tar.bz2"
     copy: no
     dest: /tmp
-    creates: /tmp/munge-munge-{{ munge_version }}/configure
+    creates: /tmp/munge-{{ munge_version }}/configure
   when: not munge_binary.stat.exists
 
 
 - name: build munge
   shell: ./configure --prefix={{ munge_dir }} && make
   args:
-    chdir: /tmp/munge-munge-{{ munge_version }}
-    creates: /tmp/munge-munge-{{ munge_version }}/src/munge/munge
+    chdir: /tmp/munge-{{ munge_version }}
+    creates: /tmp/munge-{{ munge_version }}/src/munge/munge
   when: not munge_binary.stat.exists
 
 - name: install munge
   shell: make install
   become: true
   args:
-    chdir: /tmp/munge-munge-{{ munge_version }}
+    chdir: /tmp/munge-{{ munge_version }}
     creates: "{{ munge_dir }}/bin/munge"
   when: not munge_binary.stat.exists