diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 261650f45f2d4d3c291675385e97418b8a3d5e3e..1f974383314e5a0c373650570161e1a47ea3d88d 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -13,9 +13,27 @@ stages:
   - ansible_create_cluster_stage
   - push_button_spawn_cluster
   - tests
-
+  - integration_test #https://docs.gitlab.com/ee/ci/triggers/
+  - clean
   
  
+trigger_pipeline_in_Clusterbuild:
+  stage: integration_test
+  tags: 
+  - ansible
+  script:
+  - echo ${CI_JOB_TOKEN}
+  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline  # ID is from clusterbuild
+
+
+trigger_pipeline_in_monarch:
+  stage: integration_test
+  tags: 
+  - ansible
+  script:
+  - echo ${CI_JOB_TOKEN}
+  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline  # ID is from monarch
+
 
 yamllint:
   stage: lint
@@ -27,6 +45,21 @@ yamllint:
     - cd CICD
     - yamllint -c ./.yamllintheat.yaml ./heat
 
+# delete_stack_manual:
+#   stage: delete_stack_manual
+#   tags:
+#   - heat
+#   before_script:
+#     - echo "$GC_KEY" > gc_key.pem
+#     - chmod 400 gc_key.pem
+#     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+#   script:
+#     - echo "heat stage"
+#     - source ./$NECTAR_ALLOCATION-openrc.sh
+#     - openstack stack list
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+#   when: manual
+
 ansiblelint:
   allow_failure: true
   stage: lint
@@ -101,9 +134,9 @@ tests:
     - grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME   #fail if inventory file is empty
     - ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
     - ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
-    
-    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sinfo" ManagementNodes
-    - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "squeue" ManagementNodes
+    # Need to find a better check for sinfo
+    #- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "find /opt/ -name sinfo -type f" ManagementNodes
+    #- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "find /opt/ -name squeue -type f" ManagementNodes
     - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet mariadb" SQLNodes
     - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmctld" ManagementNodes
     - ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmdbd" ManagementNodes
@@ -114,6 +147,7 @@ tests:
     - bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem"
     - bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
     - bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
+    - bash -e ./tests/run_tests.sh slurm "files/inventory.$STACKNAME" "../gc_key.pem"
     
 extended:
   stage: extended
@@ -126,8 +160,72 @@ extended:
     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
   script:
     - source ./$NECTAR_ALLOCATION-openrc.sh
-    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME ${CI_PROJECT_NAME}
   only:
     variables:
       - $EXTENDED != null
   
+
+manual_cluster_spawn:
+  stage: push_button_spawn_cluster
+  tags:
+  - heat
+  - ansible
+  before_script:
+    - echo "press button spawn cluster."
+    - echo "for this to work you have to provide a variable called manual stackname"
+    - echo I still need to handle os password
+    - echo $MANUAL_STACKNAME
+    - echo "$GC_KEY" > gc_key.pem
+    - chmod 400 gc_key.pem
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - source ./$NECTAR_ALLOCATION-openrc.sh
+    - bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME ${CI_PROJECT_NAME}
+    - openstack stack list
+    - export STACKNAME=$MANUAL_STACKNAME
+    - sleep 25
+    - bash -x CICD/ansible_create_cluster_script.sh
+  when: manual 
+  only:
+    refs:
+      - "cicd"
+  
+clean:
+  stage: clean
+  tags:
+  - heat
+  before_script:
+    - echo "cleanup stack"
+    - sleep 30
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - source ./$NECTAR_ALLOCATION-openrc.sh
+    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+
+  
+# heat_test:
+#   stage: heat_test
+#   allow_failure: false
+#   tags:
+#   - heat
+#   before_script:
+#     - echo "$GC_KEY" > gc_key.pem
+#     - chmod 400 gc_key.pem
+#     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+#     - source ./$NECTAR_ALLOCATION-openrc.sh
+#     - export HEAT_TEST_STACKNAME=_TESTING_HEAT
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
+#     - sleep 60
+#   script:
+#     - echo "heat_test stage"
+#     - source ./$NECTAR_ALLOCATION-openrc.sh
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh create $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#   after_script:
+#     - sleep 20 # artifically wait a bit to make sure it is really dead
+
diff --git a/CICD/heat/gc_HOT.yaml b/CICD/heat/gc_HOT.yaml
index ce5d2cb9a74187020e53a0148f2d2abad5e76bf4..75f12b0ee4902e6b2c3a742914ef282e0d869814 100644
--- a/CICD/heat/gc_HOT.yaml
+++ b/CICD/heat/gc_HOT.yaml
@@ -117,7 +117,7 @@ resources:
   MgmtNodesCentos7:
    type: "OS::Heat::ResourceGroup"
    properties:
-    count: 1
+    count: 2
     resource_def:
       type: My::Server::MgmtNode
       properties:
@@ -159,9 +159,9 @@ resources:
       key_name: { get_param: ssh_key }
       name:
        list_join: [ '-', [ { get_param: "OS::stack_name" }, 'login%index%' ]]
-      security_groups: [ default, { get_param: PublicSSHSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
+      security_groups: [ default, { get_param: PublicSSHSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
       metadata:
-       ansible_host_groups: [ LoginNodes, ManagementNodes ]
+       ansible_host_groups: [ LoginNodes ]
        ansible_ssh_user: ec2-user
        project_name: { get_param: project_name }
       networks: