diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 18c7d93c31bcf075157165f0bda32eddfe6074a5..24b4dcbd65298b92768dc13857f9c679750b3a9f 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,57 +1,39 @@
 variables:
   GIT_SUBMODULE_STRATEGY: recursive
-  STACKNAME: CICD_reporef$CI_COMMIT_REF_NAME
+  STACKNAME: CICD_aciab_$CI_COMMIT_REF_NAME
   NECTAR_ALLOCATION: HPCCICD
   ANSIBLE_HOST_KEY_CHECKING: "False"
+  DEFAULT_PATH: "CICD"
 
 stages:
-#  - integration_test_downstream # working but unwanted here
-#  - trigger_pipeline_in_B   # working but unwanted here
   - lint
-  #- delete_stack_manual
   - extended
   #- heat_test
   - heat
   - ansible_create_cluster_stage
   - push_button_spawn_cluster
-#  - e2e
   - tests
-  - clean # manually delete stack
+  - integration_test #https://docs.gitlab.com/ee/ci/triggers/
+  - clean
+  
+ 
+trigger_pipeline_in_Clusterbuild:
+  stage: integration_test
+  tags: 
+  - ansible
+  script:
+  - echo ${CI_JOB_TOKEN}
+  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=aciab_upstream https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline  # ID is from clusterbuild
 
 
-  
-#trigger_pipeline_in_B:
-#  stage: integration_test_downstream
-#  tags: 
-#  - ansible
-#  script:
-#  - "curl --request POST --form token=${CI_JOB_TOKEN} --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/1085/trigger/pipeline"  # ID is from pysshauthz
+trigger_pipeline_in_monarch:
+  stage: integration_test
+  tags: 
+  - ansible
+  script:
+  - echo ${CI_JOB_TOKEN}
+  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=cicd https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline  # ID is from monarch
 
-# heat_test:
-#   stage: heat_test
-#   allow_failure: false
-#   tags:
-#   - heat
-#   before_script:
-#     - echo "$GC_KEY" > gc_key.pem
-#     - chmod 400 gc_key.pem
-#     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
-#     - source ./$NECTAR_ALLOCATION-openrc.sh
-#     - export HEAT_TEST_STACKNAME=_TESTING_HEAT
-#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
-#     - sleep 60
-#   script:
-#     - echo "heat_test stage"
-#     - source ./$NECTAR_ALLOCATION-openrc.sh
-#     - bash -x ./CICD/heat/heatcicdwrapper.sh create $HEAT_TEST_STACKNAME
-#     - openstack stack list
-#     - bash -x ./CICD/heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
-#     - openstack stack list
-#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
-#     - openstack stack list
-#   after_script:
-#     - sleep 20 # artifically wait a bit to make sure it is really dead
-#   when: manual
 
 yamllint:
   stage: lint
@@ -61,7 +43,6 @@ yamllint:
   script:
     - echo "stage yamllint"
     - cd CICD
-    # - ansible-lint -c .yamllintconf.yaml -x ANSIBLE0002 master_playbook.yml
     - yamllint -c ./.yamllintheat.yaml ./heat
 
 # delete_stack_manual:
@@ -96,6 +77,7 @@ build_cluster_cicd:
   tags:
   - heat
   before_script:
+    - cd $DEFAULT_PATH
     - echo "$GC_KEY" > gc_key.pem
     - chmod 400 gc_key.pem
     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
@@ -103,9 +85,10 @@ build_cluster_cicd:
     - echo "heat stage"
     - source ./$NECTAR_ALLOCATION-openrc.sh
     - openstack stack list
-    - bash -x ./CICD/heat/heatcicdwrapper.sh create_or_update $STACKNAME
+    - bash -x ./heat/heatcicdwrapper.sh create_or_update $STACKNAME
+    - bash -x ./heat/server_rebuild.sh all
   after_script:
-    - sleep 20 # artifically wait a bit to give the nodes time to boot
+    - sleep 30 # artifically wait a bit to give the nodes time to boot
 #  only:
 #    changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
 #    - "heat/*HOT*.yaml"
@@ -117,20 +100,22 @@ ansible_create_cluster_stage:
   tags: 
   - ansible
   before_script:
+    - cd $DEFAULT_PATH/..
     - echo "$GC_KEY" > gc_key.pem
     - chmod 400 gc_key.pem
     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
   script:
     - echo "ansible_create_cluster_stage"
     - bash -x ./CICD/ansible_create_cluster_script.sh
-  #after_script:
-    #- rm ./files/inventory.$STACKNAME
-  #only:
-  #  changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
-  #  - "master_playbook.yml"
-  #  - "vars/*.{yml,yaml}"
-  #  - schedules
-  #  - CICD/.gitlab-ci.yml
+    - cd CICD
+    - ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem --skip-tags monitoring master_playbook.yml
+    - sleep 15
+    - echo uglyuglyfix
+    - ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -b -a "systemctl restart slurmdbd" ManagementNodes
+    - sleep 60
+    - echo do it again
+    - ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem --skip-tags monitoring master_playbook.yml
+    
 
 tests:
   stage: tests
@@ -163,7 +148,22 @@ tests:
     - bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
     - bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
     
-    # licensing https://gitlab.erc.monash.edu.au/hpc-team/license_server/tree/master/roles/avizo_license_monitor
+extended:
+  stage: extended
+  tags:
+  - heat
+  - ansible
+  before_script:
+    - echo "cleanup stack"
+    - sleep 30
+    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+  script:
+    - source ./$NECTAR_ALLOCATION-openrc.sh
+    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
+  only:
+    variables:
+      - $EXTENDED != null
+  
 
 manual_cluster_spawn:
   stage: push_button_spawn_cluster
@@ -189,22 +189,6 @@ manual_cluster_spawn:
   only:
     refs:
       - "cicd"
-
-extended:
-  stage: extended
-  tags:
-  - heat
-  - ansible
-  before_script:
-    - echo "cleanup stack"
-    - sleep 30
-    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
-  script:
-    - source ./$NECTAR_ALLOCATION-openrc.sh
-    - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
-  only:
-    variables:
-      - $EXTENDED != null
   
 clean:
   stage: clean
@@ -217,4 +201,30 @@ clean:
   script:
     - source ./$NECTAR_ALLOCATION-openrc.sh
     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
-  #when: manual
\ No newline at end of file
+
+  
+# heat_test:
+#   stage: heat_test
+#   allow_failure: false
+#   tags:
+#   - heat
+#   before_script:
+#     - echo "$GC_KEY" > gc_key.pem
+#     - chmod 400 gc_key.pem
+#     - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
+#     - source ./$NECTAR_ALLOCATION-openrc.sh
+#     - export HEAT_TEST_STACKNAME=_TESTING_HEAT
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
+#     - sleep 60
+#   script:
+#     - echo "heat_test stage"
+#     - source ./$NECTAR_ALLOCATION-openrc.sh
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh create $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#     - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
+#     - openstack stack list
+#   after_script:
+#     - sleep 20 # artifically wait a bit to make sure it is really dead
+
diff --git a/CICD/ChordDiagramm/Chord_Diagramm - Sheet1.csv b/CICD/ChordDiagramm/Chord_Diagramm - Sheet1.csv
new file mode 100644
index 0000000000000000000000000000000000000000..ffed806fb3b659db9b40714983a4a3af881520ac
--- /dev/null
+++ b/CICD/ChordDiagramm/Chord_Diagramm - Sheet1.csv	
@@ -0,0 +1,10 @@
+0,0,0,1,1,1,1,1,1,0
+0,0,0,0,0,0,1,1,0,0
+0,0,0,0,1,1,1,1,0,0
+1,0,0,0,0,0,0,1,0,0
+1,0,1,0,0,0,0,1,0,0
+1,0,1,0,0,0,0,1,0,0
+1,1,1,0,0,0,0,1,1,0
+1,1,1,1,1,1,1,0,0,0
+1,0,0,0,0,0,1,0,0,0
+0,0,0,0,0,0,0,0,0,0
\ No newline at end of file
diff --git a/CICD/ChordDiagramm/Chord_Diagramm.png b/CICD/ChordDiagramm/Chord_Diagramm.png
new file mode 100644
index 0000000000000000000000000000000000000000..c53a11844083e852d45d2b824c3bae9bd4fc8876
Binary files /dev/null and b/CICD/ChordDiagramm/Chord_Diagramm.png differ
diff --git a/CICD/ChordDiagramm/genChordDiagramm.py b/CICD/ChordDiagramm/genChordDiagramm.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc75befc62f535fea23862300d18eb6432ab700f
--- /dev/null
+++ b/CICD/ChordDiagramm/genChordDiagramm.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python3
+# script copied from https://github.com/fengwangPhysics/matplotlib-chord-diagram/blob/master/README.md
+# source data manually edited via https://docs.google.com/spreadsheets/d/1JN9S_A5ICPQOvgyVbWJSFJiw-5gO2vF-4AeYuWl-lbs/edit#gid=0
+# chord diagram
+import matplotlib.pyplot as plt
+from matplotlib.path import Path
+import matplotlib.patches as patches
+
+import numpy as np
+
+LW = 0.3
+
+def polar2xy(r, theta):
+    return np.array([r*np.cos(theta), r*np.sin(theta)])
+
+def hex2rgb(c):
+    return tuple(int(c[i:i+2], 16)/256.0 for i in (1, 3 ,5))
+
+def IdeogramArc(start=0, end=60, radius=1.0, width=0.2, ax=None, color=(1,0,0)):
+    # start, end should be in [0, 360)
+    if start > end:
+        start, end = end, start
+    start *= np.pi/180.
+    end *= np.pi/180.
+    # optimal distance to the control points
+    # https://stackoverflow.com/questions/1734745/how-to-create-circle-with-b%C3%A9zier-curves
+    opt = 4./3. * np.tan((end-start)/ 4.) * radius
+    inner = radius*(1-width)
+    verts = [
+        polar2xy(radius, start),
+        polar2xy(radius, start) + polar2xy(opt, start+0.5*np.pi),
+        polar2xy(radius, end) + polar2xy(opt, end-0.5*np.pi),
+        polar2xy(radius, end),
+        polar2xy(inner, end),
+        polar2xy(inner, end) + polar2xy(opt*(1-width), end-0.5*np.pi),
+        polar2xy(inner, start) + polar2xy(opt*(1-width), start+0.5*np.pi),
+        polar2xy(inner, start),
+        polar2xy(radius, start),
+        ]
+
+    codes = [Path.MOVETO,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.LINETO,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CLOSEPOLY,
+             ]
+
+    if ax == None:
+        return verts, codes
+    else:
+        path = Path(verts, codes)
+        patch = patches.PathPatch(path, facecolor=color+(0.5,), edgecolor=color+(0.4,), lw=LW)
+        ax.add_patch(patch)
+
+
+def ChordArc(start1=0, end1=60, start2=180, end2=240, radius=1.0, chordwidth=0.7, ax=None, color=(1,0,0)):
+    # start, end should be in [0, 360)
+    if start1 > end1:
+        start1, end1 = end1, start1
+    if start2 > end2:
+        start2, end2 = end2, start2
+    start1 *= np.pi/180.
+    end1 *= np.pi/180.
+    start2 *= np.pi/180.
+    end2 *= np.pi/180.
+    opt1 = 4./3. * np.tan((end1-start1)/ 4.) * radius
+    opt2 = 4./3. * np.tan((end2-start2)/ 4.) * radius
+    rchord = radius * (1-chordwidth)
+    verts = [
+        polar2xy(radius, start1),
+        polar2xy(radius, start1) + polar2xy(opt1, start1+0.5*np.pi),
+        polar2xy(radius, end1) + polar2xy(opt1, end1-0.5*np.pi),
+        polar2xy(radius, end1),
+        polar2xy(rchord, end1),
+        polar2xy(rchord, start2),
+        polar2xy(radius, start2),
+        polar2xy(radius, start2) + polar2xy(opt2, start2+0.5*np.pi),
+        polar2xy(radius, end2) + polar2xy(opt2, end2-0.5*np.pi),
+        polar2xy(radius, end2),
+        polar2xy(rchord, end2),
+        polar2xy(rchord, start1),
+        polar2xy(radius, start1),
+        ]
+
+    codes = [Path.MOVETO,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             ]
+
+    if ax == None:
+        return verts, codes
+    else:
+        path = Path(verts, codes)
+        patch = patches.PathPatch(path, facecolor=color+(0.5,), edgecolor=color+(0.4,), lw=LW)
+        ax.add_patch(patch)
+
+def selfChordArc(start=0, end=60, radius=1.0, chordwidth=0.7, ax=None, color=(1,0,0)):
+    # start, end should be in [0, 360)
+    if start > end:
+        start, end = end, start
+    start *= np.pi/180.
+    end *= np.pi/180.
+    opt = 4./3. * np.tan((end-start)/ 4.) * radius
+    rchord = radius * (1-chordwidth)
+    verts = [
+        polar2xy(radius, start),
+        polar2xy(radius, start) + polar2xy(opt, start+0.5*np.pi),
+        polar2xy(radius, end) + polar2xy(opt, end-0.5*np.pi),
+        polar2xy(radius, end),
+        polar2xy(rchord, end),
+        polar2xy(rchord, start),
+        polar2xy(radius, start),
+        ]
+
+    codes = [Path.MOVETO,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             Path.CURVE4,
+             ]
+
+    if ax == None:
+        return verts, codes
+    else:
+        path = Path(verts, codes)
+        patch = patches.PathPatch(path, facecolor=color+(0.5,), edgecolor=color+(0.4,), lw=LW)
+        ax.add_patch(patch)
+
+def chordDiagram(X, ax, colors=None, width=0.1, pad=2, chordwidth=0.7):
+    """Plot a chord diagram
+    Parameters
+    ----------
+    X :
+        flux data, X[i, j] is the flux from i to j
+    ax :
+        matplotlib `axes` to show the plot
+    colors : optional
+        user defined colors in rgb format. Use function hex2rgb() to convert hex color to rgb color. Default: d3.js category10
+    width : optional
+        width/thickness of the ideogram arc
+    pad : optional
+        gap pad between two neighboring ideogram arcs, unit: degree, default: 2 degree
+    chordwidth : optional
+        position of the control points for the chords, controlling the shape of the chords
+    """
+    # X[i, j]:  i -> j
+    x = X.sum(axis = 1) # sum over rows
+    ax.set_xlim(-1.1, 1.1)
+    ax.set_ylim(-1.1, 1.1)
+
+    if colors is None:
+    # use d3.js category10 https://github.com/d3/d3-3.x-api-reference/blob/master/Ordinal-Scales.md#category10
+        colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
+                  '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf', '#c49c94']
+        if len(x) > len(colors):
+            print('x is too large! Use x smaller than 11')
+        colors = [hex2rgb(colors[i]) for i in range(len(x))]
+
+    # find position for each start and end
+    y = x/np.sum(x).astype(float) * (360 - pad*len(x))
+
+    pos = {}
+    arc = []
+    nodePos = []
+    start = 0
+    for i in range(len(x)):
+        end = start + y[i]
+        arc.append((start, end))
+        angle = 0.5*(start+end)
+        #print(start, end, angle)
+        if -30 <= angle <= 210:
+            angle -= 90
+        else:
+            angle -= 270
+        nodePos.append(tuple(polar2xy(1.1, 0.5*(start+end)*np.pi/180.)) + (angle,))
+        z = (X[i, :]/x[i].astype(float)) * (end - start)
+        ids = np.argsort(z)
+        z0 = start
+        for j in ids:
+            pos[(i, j)] = (z0, z0+z[j])
+            z0 += z[j]
+        start = end + pad
+
+    for i in range(len(x)):
+        start, end = arc[i]
+        IdeogramArc(start=start, end=end, radius=1.0, ax=ax, color=colors[i], width=width)
+        start, end = pos[(i,i)]
+        selfChordArc(start, end, radius=1.-width, color=colors[i], chordwidth=chordwidth*0.7, ax=ax)
+        for j in range(i):
+            color = colors[i]
+            if X[i, j] > X[j, i]:
+                color = colors[j]
+            start1, end1 = pos[(i,j)]
+            start2, end2 = pos[(j,i)]
+            ChordArc(start1, end1, start2, end2,
+                     radius=1.-width, color=colors[i], chordwidth=chordwidth, ax=ax)
+
+    #print(nodePos)
+    return nodePos
+
+##################################
+if __name__ == "__main__":
+    fig = plt.figure(figsize=(6,6))
+    flux = np.array([
+      [ 0, 1, 0, 0], #OS Sum:2 ; Centos, Ubuntu
+      [ 0, 0, 0, 0], #Plays
+      [ 0, 0, 0, 1], # Cluster: Sum5; Generic, M3, Monarch, SHPC, ACCS
+      [ 0, 0, 1, 2]  #Cloud Sum3: AWS,Nimbus,Nectar
+    ])
+    from numpy import genfromtxt
+
+    flux = genfromtxt('Chord_Diagramm - Sheet1.csv', delimiter=',')
+    ax = plt.axes([0,0,1,1])
+
+    #nodePos = chordDiagram(flux, ax, colors=[hex2rgb(x) for x in ['#666666', '#66ff66', '#ff6666', '#6666ff']])
+    nodePos = chordDiagram(flux, ax)
+    ax.axis('off')
+    prop = dict(fontsize=16*0.8, ha='center', va='center')
+    nodes = ['OS_Centos76','OS_Centos8','OS_Ubuntu1804','PLY_NFSSQL','PLY_MGMT','PLY_Login','PLY_Compute','C_Generic','C_M3','C_Monarch']
+    #nodes = ['M3_MONARCH','SHPC','Ubuntu','Centos7','Centos8','Tested','Security','Nectar','?AWS?','DGX@Baremetal','ML@M3','CVL@UWA','CVL_SW','CVL_Desktop','Strudel','/usr/local']
+    for i in range(len(nodes)):
+        ax.text(nodePos[i][0], nodePos[i][1], nodes[i], rotation=nodePos[i][2], **prop)
+
+    plt.savefig("Chord_Diagramm.png", dpi=600,transparent=False,bbox_inches='tight', pad_inches=0.02)
+    plt.show(pad_inches=0.02)
diff --git a/CICD/ansible_create_cluster_script.sh b/CICD/ansible_create_cluster_script.sh
index b062d4f21e71371698683164a1c81a9ef40a39b2..0f1fa8504d8b80432d819d08674842dfc09d9e1d 100755
--- a/CICD/ansible_create_cluster_script.sh
+++ b/CICD/ansible_create_cluster_script.sh
@@ -15,7 +15,3 @@ ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
 #cd roles 
     #- "egrep -lRZ 'sudo: true' . | xargs -0 -l sed -i -e 's/sudo: true/become: true/g' "
 #cd ..
-ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem master_playbook.yml
-sleep 15
-echo uglyuglyfix
-ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -b -a "systemctl restart slurmdbd" ManagementNodes
\ No newline at end of file
diff --git a/CICD/heat/gc_HOT.yaml b/CICD/heat/gc_HOT.yaml
index da11e6a1bc80ed80ba841064234b5a630fe71b47..e5be4b8d13e82e7532cefd8b0f55e8c1f383a829 100644
--- a/CICD/heat/gc_HOT.yaml
+++ b/CICD/heat/gc_HOT.yaml
@@ -27,7 +27,7 @@ parameters:
     default: Classic Provider
   Flavour:
     type: string
-    default: t3.xsmall
+    default: m3.xsmall
 
 
 resources:
@@ -67,7 +67,8 @@ resources:
      name: "heatmysqlsecgroup"
      rules: [ { protocol: tcp,
                port_range_min: 3306,
-               port_range_max: 3306} ]
+               port_range_max: 3306,
+               remote_mode: "remote_group_id"} ]
   SSHMonashSecGroup:
    type: "OS::Neutron::SecurityGroup"
    properties:
@@ -104,7 +105,7 @@ resources:
     name:
      list_join: [ '-', [ { get_param: "OS::stack_name" }, 'sql0' ]]
     availability_zone: { get_param: avz }
-    flavor: m3.small
+    flavor: m3.xsmall
     image: { get_param: centos_7_image_id }
     key_name: { get_param: ssh_key }
     security_groups: [ { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: MySQLSecGroup }, { get_resource: NFSSecGroup } ]
@@ -257,7 +258,7 @@ resources:
 #    name:
 #     list_join: [ '-', [ { get_param: "OS::stack_name" }, 'pysshautz' ]]
 #    availability_zone: { get_param: avz }
-#    flavor: t3.xsmall
+#    flavor: m3.xsmall
 #    image: { get_param: ubuntu_1804_image_id }
 #    key_name: { get_param: ssh_key }
 #    security_groups: [ { get_resource: SSHMonashSecGroup }, { get_resource: webaccess } ]
diff --git a/CICD/heat/heatcicdwrapper.sh b/CICD/heat/heatcicdwrapper.sh
index abbd2ee6e7734b6126cc217bad45f858f5ee1958..26afdebda88b5ba150f9e947b997f21e7f6b461d 100644
--- a/CICD/heat/heatcicdwrapper.sh
+++ b/CICD/heat/heatcicdwrapper.sh
@@ -60,7 +60,7 @@ function create_stack {
     echo "I will NOT create existing stack maybe use update"
     exit -44
  fi
- openstack stack create --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
+ openstack stack create --wait --template ./heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./heat/resource_registry.yaml $STACKNAME
  createreturn=$?
  if [ $createreturn -ne "0" ]
  then
@@ -83,14 +83,14 @@ case "$1" in
                    echo "I cannot update a stack which does not exist"
                    exit -45
             fi
-            openstack stack update --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
+            openstack stack update --wait --template ./heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./heat/resource_registry.yaml $STACKNAME
             ret=$?
             exit $ret
             ;;
         create_or_update)
             if  check_stack_exists
                then
-               openstack stack update --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
+               openstack stack update --wait --template ./heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./heat/resource_registry.yaml $STACKNAME
                ret=$?
                exit $ret
             fi
diff --git a/CICD/heat/server_rebuild.sh b/CICD/heat/server_rebuild.sh
new file mode 100755
index 0000000000000000000000000000000000000000..004206023c61b779a9fef1b0968a09d40a94e72b
--- /dev/null
+++ b/CICD/heat/server_rebuild.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+REBOOT_TIME=90
+
+NODETYPES=${@}
+
+# make sure you define variable STACKNAME in current environment
+if [[ -z $STACKNAME ]]; then
+    echo "please define STACKNAME variable"  
+    exit 1
+fi
+
+# prepend CICD to stack name
+if [[ "$STACKNAME" == "CICD"* ]]; then
+  echo "CICD found in stackname. doing nothing"
+else
+  STACKNAME="CICD"$STACKNAME
+fi
+
+function check_stack_exists {
+    if openstack stack list | grep -w $STACKNAME;
+        then 
+            echo "stack found";
+        else 
+            echo "stack not found";
+            return 1
+    fi
+}
+
+if ! check_stack_exists
+then
+    exit 0
+fi
+
+# return and dictionary in json format, which map server name to ansible_host_groups. There will be IndexError if the servers, which names are prefixed by STACKNAME, don't have ansible_host_groups property
+host_group_mapping=$(openstack server list --long -f json | python3 -c "import json,sys,re;ivt=json.load(sys.stdin);json.dump({i['Name']: re.search('ansible_host_groups\=\'\[(.+)\]\'', i['Properties']).groups()[0].replace('\"', '').split() for i in ivt if re.match('$STACKNAME',i['Name'])}, fp=sys.stdout)")
+
+# all available ansible_host_groups
+available_groups=$(echo $host_group_mapping |  python3 -c "import json,sys;mapping=json.load(sys.stdin);output=[];[output.extend(v) for v in mapping.values()];print(output)" | tr -d "[',]")
+
+echo "available groups are $available_groups"
+# if the first input parameter is all then rebuild all groups
+if [[ "$1" == "all" ]]; then
+    NODETYPES=$available_groups
+fi
+echo "going to update group $NODETYPES"
+server_list=$(echo $host_group_mapping | python3 -c "import json,sys;mapping=json.load(sys.stdin);node_types='$NODETYPES'.split();output=[k for k in mapping.keys() if len(set(mapping[k]).intersection(set(node_types))) > 0];print(output)" | tr -d "[,]'" | xargs -n1 | sort -u)
+
+echo "server list is $server_list"
+rebuild_func(){
+    echo "rebuilding server $1"
+    openstack server rebuild --wait $1
+}
+
+# for eaech line in data
+for server in $server_list
+do
+rebuild_func $server & # run parallel in background
+done
+wait # wait for all servers to be rebuild
+# add an extra time for reboot
+echo "waiting for reboot"
+sleep $REBOOT_TIME
+echo "All done"
+
diff --git a/CICD/master_playbook.yml b/CICD/master_playbook.yml
index 04dc7e747b8894c60a049697e93c8ac89a2b8dc1..1a6541fbeb0f02f42a3fce3ebd162db0f47dae79 100644
--- a/CICD/master_playbook.yml
+++ b/CICD/master_playbook.yml
@@ -5,3 +5,4 @@
 - import_playbook: plays/nfssqlnodes.yml
 - import_playbook: plays/mgmtnodes.yml
 - import_playbook: plays/computenodes.yml
+- import_playbook: plays/loginnodes.yml
\ No newline at end of file
diff --git a/CICD/plays/allnodes.yml b/CICD/plays/allnodes.yml
index 406bdb797108aa4d19a505742cb4df02f5df6909..d6eed3ef32a32d9b702b20534e2f916652c3c3e4 100644
--- a/CICD/plays/allnodes.yml
+++ b/CICD/plays/allnodes.yml
@@ -46,3 +46,4 @@
   - { role: calculateKnownHosts, tags: [ calculateKnownHosts ] }
   - { role: SSHKnownHosts, tags: [ known_hosts ] }
   - { role: jasons_ssh_ca, tags: [ ssh_ca ] }
+  - { role: set_timezone }
diff --git a/CICD/plays/loginnodes.yml b/CICD/plays/loginnodes.yml
new file mode 120000
index 0000000000000000000000000000000000000000..75153990e471c597e4f4c9da7d1230a59487bd7e
--- /dev/null
+++ b/CICD/plays/loginnodes.yml
@@ -0,0 +1 @@
+../tests/ManagementNodes/check.yml
\ No newline at end of file
diff --git a/CICD/plays/mgmtnodes.yml b/CICD/plays/mgmtnodes.yml
index c890a5456b5306f1478070e3f329fc57adc51340..5d4241194324fe13739e074b4ee749c969935dfb 100644
--- a/CICD/plays/mgmtnodes.yml
+++ b/CICD/plays/mgmtnodes.yml
@@ -38,6 +38,7 @@
   - { role: slurm-common, tags: [ slurm, slurm-common ]  }
   - { role: slurm_config, tags: [ slurm, slurm-config ] }
   - { role: slurm-start, start_slurmdbd: True, start_slurmctld: True, tags: [ slurm-start ]  }
+  - { role: telegraf, tags: [ monitoring ] }
 #  - { role: provision_slurm, use_active_directory: False, lockpath: "/mnt/home", tags: [ slurm ]  }
 #  - { role: provision_homedir, use_active_directory: False, mntpt: "/mnt/home", tags: [ provisioning ] }
 
diff --git a/CICD/tests/Readme.md b/CICD/tests/Readme.md
index feca06268d107c2aeae9a6a8f61a2ed59e8648bc..bfea85a5948bbfed3381544346147c06ec2fa608 100644
--- a/CICD/tests/Readme.md
+++ b/CICD/tests/Readme.md
@@ -1,7 +1,10 @@
-this folder should contain tests that will be run automatically by the CICD pipeline
+This folder should contain tests that will be run automatically by the CICD pipeline.
 
-all files with fileending .sh will be executed by a shell
-all files with fileending yml will be executed by ansible-playbook
-./tmp can be used as temporary folder and will be cleaned after execution
+The trigger mechanism to execute these tests is `.gitlab-ci.yml` in the toplevel folder of this repository.
 
-because I can I am prefixing tests with 0-9 to give the execution some priority
\ No newline at end of file
+ - *.sh will be executed by a shell, *.yml will be executed by ansible-playbook
+ - A test should return 0 as success or otherwise as fail. See 0_Example*
+ - Tests should aim not to alter the system except /tmp
+ - Prefixing tests with 0-9 to give the execution some priority
+ - Tests should not need elevated priviliges. A special user TODO is considered to be present
+ - TODO a way to deal with Test Payload. This will be determined on test submission
\ No newline at end of file
diff --git a/README.md b/README.md
index f06edfcda4693187e6cff8e9daaa00f40a8ba763..57aefd5e1a70dcf85e22950266922d5f87333714 100644
--- a/README.md
+++ b/README.md
@@ -8,6 +8,10 @@ as our architecture document.
 
 [![pipeline status](https://gitlab.erc.monash.edu.au/hpc-team/ansible_cluster_in_a_box/badges/cicd/pipeline.svg)](https://gitlab.erc.monash.edu.au/hpc-team/ansible_cluster_in_a_box/commits/cicd)
 
+![Coverage Diagramm](CICD/ChordDiagramm/Chord_Diagramm.png)
+A connecting line represents a test coverage of its two endpoints. E.G. Centos8 is only tested on the Cluster_Generic for ComputeNodes
+
+
 We aim to make these roles as generic as possible. You should be able to start from an inventory file, an ssh key and a git clone of this and end up with a working cluster. In the longer term we might branch to include utilities to make an inventory file using NeCTAR credentials.
 
 If you need a password use get_or_make_password.py (delegated to the passwword server/localhost) to generate a random one that can be shared between nodes
diff --git a/plays/allnodes.yml b/plays/allnodes.yml
deleted file mode 100644
index ac098f4dd1496fd05c7ce869b09ab54144cd307b..0000000000000000000000000000000000000000
--- a/plays/allnodes.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-- hosts: 'all'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  tasks:
-  - { name: set use shared state, set_fact: usesharedstatedir=False }
-  - { name: set hostgroup, set_fact: hostgroup='ComputeNodes' }
-  tags: [ always ]
-
-- hosts: 'all'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  strategy: free
-  roles:
-#  - { role: disable_selinux, tags: [ disableselinux ] }
-  - { role: upgrade }
-  - { role: set_password }
-  - { role: etcHosts, tags: [ networking ] }
-#  - { role: config_repos, tags: [ repos ] }
-
-- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,ManagementNodes'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  strategy: free
-  roles:
-  - { role: disable_selinux, tags: [ disableselinux ] }
-  - { role: ldapclient, tags: [ authentication ] }
-  - { role: ssh-password-login, tags: [ authentication ] }
-  - { role: enable_sudo_group, tags: [ authentication, sudo ] }
-  - { role: move_homedir }
-  - { role: calculateKnownHosts, tags: [ calculateKnownHosts ] }
-  - { role: SSHKnownHosts, tags: [ known_hosts ] }
-  - { role: jasons_ssh_ca, tags: [ ssh_ca ] }
diff --git a/plays/computenodes.yml b/plays/computenodes.yml
deleted file mode 100644
index 208ad954f57c479461c4270b69abefe20384c468..0000000000000000000000000000000000000000
--- a/plays/computenodes.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-
-- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,VisNodes'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  tasks:
-  - { name: set use shared state, set_fact: usesharedstatedir=False }
-  tags: [ always ]
-
-- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  strategy: free
-  roles:
-  - { role: move_homedir, tags: [ authentication, filesystems ] }
-  - { role: nfs-client, nfsMounts: "{{ computeNfsMounts }}", tags: [ filesystems ] }
-  - { role: slurm-common, tags: [ slurm, slurm-common ] }
-  - { role: lmod, tags: [ other ] }
-  - { role: enable_modules, default_modules: "lmod", tags: [ other ] }
-  - { role: postfix, tags: [ mail, other ] }
-
-- hosts: 'VisNodes'
-  vars_files:
-  - vars/passwords.yml
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml
-  - vars/slurm.yml
-  - vars/vars.yml
-  roles:
-  - { role: gpu, tags: [ gpu ] }
-
-- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
-  vars_files:
-  - vars/passwords.yml
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml
-  - vars/slurm.yml
-  - vars/vars.yml
-  roles:
-  - { role: slurm_config, tags: [slurm, slurm_config] }
-
-- hosts: 'DesktopNodes,ComputeNodes'
-  vars_files:
-  - vars/passwords.yml
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml
-  - vars/slurm.yml
-  - vars/vars.yml
-  strategy: free
-  roles:
-  - { role: slurm-start, start_slurmd: True, tags: [ slurm, slurmstart ] }
-  - { role: mate-de-install, tags: [ mate-de-install ] }   # TODO this crashes for everything except cmca
diff --git a/plays/files b/plays/files
deleted file mode 120000
index feb122881ce2321d72ad6b867bd2a3d01eadaac3..0000000000000000000000000000000000000000
--- a/plays/files
+++ /dev/null
@@ -1 +0,0 @@
-../files
\ No newline at end of file
diff --git a/plays/init_slurmconf.yml b/plays/init_slurmconf.yml
deleted file mode 100644
index 30667ac53b5b6c387af0bdacb609f09cc8bfa5c3..0000000000000000000000000000000000000000
--- a/plays/init_slurmconf.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- hosts: 'all'
-  tasks:
-  - include_vars: vars/passwords.yml 
-  - include_vars: vars/names.yml
-  - include_vars: vars/ldapConfig.yml
-  - include_vars: vars/filesystems.yml 
-  - include_vars: vars/slurm.yml 
-  - include_vars: vars/vars.yml 
-- hosts: 'all'
-  tasks:
-  - { name: setup, setup: }
-- hosts: 'ManagementNodes'
-  roles:
-  - { role: calculateSlurmConf }
diff --git a/plays/make_files.yml b/plays/make_files.yml
deleted file mode 100644
index b05925ce73f9be136bb46128961990b938c07910..0000000000000000000000000000000000000000
--- a/plays/make_files.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# just calculates an etc hosts
-- hosts: 'all'
-  tasks:
-  - include_vars: vars/passwords.yml
-  - include_vars: vars/names.yml
-  - include_vars: vars/ldapConfig.yml
-  - include_vars: vars/filesystems.yml
-  - include_vars: vars/slurm.yml
-  - include_vars: vars/vars.yml
-- hosts: 'all'
-  tasks:
-  - { name: setup, setup: }
-- hosts: 'ManagementNodes'
-  roles:
-  - { role: calculateEtcHosts }
-  
-#- hosts: 'NFSNodes'
-#  roles:
-#  - { role: calculateExports }
-
-
diff --git a/plays/mgmtnodes.yml b/plays/mgmtnodes.yml
deleted file mode 100644
index 5d4241194324fe13739e074b4ee749c969935dfb..0000000000000000000000000000000000000000
--- a/plays/mgmtnodes.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-# Basic stuff to make the nodes functionl
-# i.e. upgrade operating systems, etc
-#
-
-- hosts: 'ManagementNodes'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  tasks:
-      #  - { name: set hostgroup, set_fact: hostgroup='ManagementNodes' }
-  - { name: set use shared state, set_fact: usesharedstatedir=True }
-  tags: [ always ]
-
-- hosts: 'ManagementNodes'
-  strategy: free
-  gather_facts: False
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  roles:
-#  - { role: ldapclient, tags: [ authentication ] }
-#  - { role: ssh-password-login }
-#  - { role: enable_sudo_group }
-#  - { role: make_filesystems, volumes: "{{ glustervolumes }}" }
-#  - { role: gluster_server, volname: "gv", brickmnt: '/gbrick', gluster_servers: "{{ groups['ManagementNodes'] }}", replicas: 2, tags: [ gluster_server ]  }
-#  - { role: gluster_volcreate, volname: "gv", gluster_servers: "{{ groups['ManagementNodes'] }}", brickmnt: '/gbrick', replicas: 2 }
-#  - { role: gluster_client, volname: "gv", gluster_servers: ['mgmt0','mgmt1','sql0'], volmnt: '/glusterVolume' }
-  - { role: nfs-client, nfsMounts: "{{ mgmtNfsMounts }}", tags: [ nfs ] }
-  - { role: slurmdb-config, tags: [ slurm, slurmdb-config ] }
-  - { role: slurm-common, tags: [ slurm, slurm-common ]  }
-  - { role: slurm_config, tags: [ slurm, slurm-config ] }
-  - { role: slurm-start, start_slurmdbd: True, start_slurmctld: True, tags: [ slurm-start ]  }
-  - { role: telegraf, tags: [ monitoring ] }
-#  - { role: provision_slurm, use_active_directory: False, lockpath: "/mnt/home", tags: [ slurm ]  }
-#  - { role: provision_homedir, use_active_directory: False, mntpt: "/mnt/home", tags: [ provisioning ] }
-
diff --git a/plays/nfssqlnodes.yml b/plays/nfssqlnodes.yml
deleted file mode 100644
index 30b3b1ed1d6ddab06d6b538757ef636538338082..0000000000000000000000000000000000000000
--- a/plays/nfssqlnodes.yml
+++ /dev/null
@@ -1,84 +0,0 @@
-# Role to initialize nfs and SQL Nodes
-# 
-#
-
-- hosts: 'all'
-  tasks:
-  - { name: setup, setup: }
-  tags: [ always ]
-  
-#we need this here to gather facts and fill required variables.
-- hosts: 'ManagementNodes'
-  gather_facts: True
-  tasks:
-  - include_vars: vars/passwords.yml 
-  - include_vars: vars/names.yml
-  - include_vars: vars/ldapConfig.yml
-  - include_vars: vars/filesystems.yml 
-  - include_vars: vars/slurm.yml 
-  - include_vars: vars/vars.yml 
-  - { name: set hostgroup, set_fact: hostgroup='ManagementNodes' }
-  - { name: set use shared state, set_fact: usesharedstatedir=True }
-  tags: [ always ]
-  
-- hosts: 'SQLNodes,NFSNodes'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  pre_tasks:
-  - { name: set hostgroup, set_fact: hostgroup='SQLNodes', tags: [ always ] }
-  - { name: set use shared state, set_fact: usesharedstatedir=True, tags: [ always ] }
-
-- hosts: 'SQLNodes'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  strategy: free
-  gather_facts: True
-  roles:
-  - { role: upgrade, tags: [ upgrade ] }
-  - { role: make_filesystems, volumes: "{{ dbvolumes }}" }
-  - { role: mysql, mysql_type: mysql_server,  mysql_root_password: "{{ sqlrootPasswd }}", mysql_user_name: slurmdb, mysql_user_db_name: slurm_acct_db, mysql_user_hosts_group: "{{ groups['ManagementNodes'] }}", mysql_user_password: "{{ slurmdb_passwd }}", tags: [ database ] }
-  - { role: slurm-mysql-config, tags: [database,slurmdb] }
-  tags: [ sql ]
- 
-- hosts: 'NFSNodes'
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  strategy: free
-  gather_facts: False
-  roles:
-  - { role: make_filesystems, volumes: "{{ nfsvolumes }}" }
-  tasks:
-  - { name: make homedir, file: { path: /nfsvol/home, state: directory }, become: true, become_user: root }
-  - { name: make usr_local, file: { path: /nfsvol/usr_local_centos7, state: directory }, become: true, become_user: root }
-  - { name: make projects, file: { path: /nfsvol/projects, state: directory }, become: true, become_user: root }
-  - { name: make projects, file: { path: /nfsvol/scratch, state: directory }, become: true, become_user: root }
-  tags: [ nfs ]
-
-- hosts: 'NFSNodes'
-  strategy: free
-  gather_facts: False
-  vars_files: 
-  - vars/passwords.yml 
-  - vars/names.yml
-  - vars/ldapConfig.yml
-  - vars/filesystems.yml 
-  - vars/slurm.yml 
-  - vars/vars.yml 
-  roles:
-  - { role: nfs-server }
-  tags: [ nfs ]
diff --git a/plays/roles b/plays/roles
deleted file mode 120000
index d8c4472ca1b65cea039252e137ff3b4ab5d3a555..0000000000000000000000000000000000000000
--- a/plays/roles
+++ /dev/null
@@ -1 +0,0 @@
-../roles
\ No newline at end of file
diff --git a/plays/vars b/plays/vars
deleted file mode 120000
index e8d9a6429b3aaab679b98557469104f0f7cc952b..0000000000000000000000000000000000000000
--- a/plays/vars
+++ /dev/null
@@ -1 +0,0 @@
-../vars
\ No newline at end of file
diff --git a/roles/extra_packages/tasks/main.yml b/roles/extra_packages/tasks/main.yml
index 46ce40a581ae7baf34981217f2313fb5d506841a..89b03b15efd4a62544bf6c8f7a58ae6ed895efdc 100644
--- a/roles/extra_packages/tasks/main.yml
+++ b/roles/extra_packages/tasks/main.yml
@@ -42,6 +42,13 @@
   when: ansible_os_family == 'RedHat' 
   register: result
 
+- name: "Install extra packages from epel only"
+  yum: "name={{ item }}  update_cache=yes state=present enablerepo=epel" # exclude={{ excludes|join(',') }}
+  with_items: "{{ extra_packages_epel }}"
+  become: true
+  become_user: root
+  when: ansible_os_family == 'RedHat'
+
 - name: "Show yum install output"
   debug: 
     msg: "{{ result.results }}"
diff --git a/roles/mellanox_drivers/tasks/main.yml b/roles/mellanox_drivers/tasks/main.yml
index e088c9cc970c26910ff588d89e1323ce790f95c1..c084b9dad15290738693e02c00253f3f739a1e48 100644
--- a/roles/mellanox_drivers/tasks/main.yml
+++ b/roles/mellanox_drivers/tasks/main.yml
@@ -1,4 +1,10 @@
 ---
+
+- name: "Force this role to fail if no Mellanox hardware is present"
+  #Exclude the role via tags ans ansible-playbook --skip-tags mlx
+  shell: "lspci | grep Mellanox"
+  check_mode: yes
+
 - include_vars: mellanoxVars.yml
 
 - name: yum install dependencies
@@ -7,7 +13,7 @@
   become_user: root
   ignore_errors: true
   when: ansible_os_family == "RedHat"
-  
+
 - name: test for existing installation of drivers
   shell: '/bin/ibv_devinfo'
   become: true
@@ -21,8 +27,8 @@
   debug: var=drivers_installed
 
 - name: default dont install
-  set_fact: 
-    install_now: false 
+  set_fact:
+    install_now: false
     reboot_now: false
 
 - name: get driver version
@@ -39,8 +45,8 @@
   changed_when: False
 
 - name: set install due to drivers not installed
-  set_fact: 
-    install_now: true 
+  set_fact:
+    install_now: true
     reboot_now: true
   when: drivers_installed.failed
 
@@ -51,8 +57,8 @@
   debug: var=desired_driver_version
 
 - name: set install due to version mismatch
-  set_fact: 
-    install_now: true 
+  set_fact:
+    install_now: true
     reboot_now: true
   when: driver_version.failed or not desired_driver_version.stdout in driver_version.stdout
 
@@ -60,13 +66,13 @@
   debug: var=install_now
 
 - name: copy driver source
-  unarchive: copy=no src="http://consistency0/src/{{ MELLANOX_DRIVER_SRC }}.tgz" dest=/tmp 
+  unarchive: copy=no src="http://consistency0/src/{{ MELLANOX_DRIVER_SRC }}.tgz" dest=/tmp
   become: true
   become_user: root
   when: install_now
 
 #remove old mellanox drivers as they may interfere with an update
-- name: stop lustre 
+- name: stop lustre
   service: name=lustre-client state=stopped
   become: true
   become_user: root
@@ -103,8 +109,8 @@
   become_user: root
   ignore_errors: true
   with_items:
-  - mlnx-ofa_kernel 
-  - mlnx-ofa_kernel-devel 
+  - mlnx-ofa_kernel
+  - mlnx-ofa_kernel-devel
   - mlnx-ofa_kernel-modules
   when: install_now
 
diff --git a/roles/mysql/handlers/main.yml b/roles/mysql/handlers/main.yml
index a934d6955d71ee34415abe3d39d478e296518a3f..a40308453a8ae5304a4fbec76d990cd1b56a1ffa 100644
--- a/roles/mysql/handlers/main.yml
+++ b/roles/mysql/handlers/main.yml
@@ -1,4 +1,4 @@
 ---
-- name: "Restart MySQL" 
+- name: "Restart MySQL"
   service: name={{ sqlServiceName }} state=restarted
   become: true
diff --git a/roles/mysql/tasks/mysql_server.yml b/roles/mysql/tasks/mysql_server.yml
index f8edd4e66ceed4323aa0ad83364ec890b93e80c1..5ad085830619f71689d367cf48f9d8bc230e0df0 100644
--- a/roles/mysql/tasks/mysql_server.yml
+++ b/roles/mysql/tasks/mysql_server.yml
@@ -29,12 +29,18 @@
 #    - ::1
 #    - localhost
 
-- name: update mysql root password for all root accounts  # this will only work if a completely fresh db gets installed because it gets shipped with a blank root pw
+- name: Check that the slurm_acct_db_directory exists
+  stat:
+    path: /var/lib/mysql/slurm_acct_db/   #defined in /vars/filesystems.yaml
+  register: slurm_acct_db_directory_result
+
+# this will only work if a completely fresh db gets installed because it gets shipped with a blank root pw
+- name: update mysql root password for all root accounts
   mysql_user: name=root host=localhost password={{ mysql_root_password }} login_user=root
-  when: mysqldb_confdir_create.changed
-  
+  when: not slurm_acct_db_directory_result.stat.exists and mysqldb_confdir_create.changed
+
 - name: "Adding user database"
-  mysql_db: name={{ mysql_user_db_name }} state=present login_user=root login_password={{ mysql_root_password }} 
+  mysql_db: name={{ mysql_user_db_name }} state=present login_user=root login_password={{ mysql_root_password }}
 
 - name: "Giving priviliges to user"
   mysql_user: name={{ mysql_user_name }} host={{ mysql_user_host }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
diff --git a/roles/slurm-start/tasks/main.yml b/roles/slurm-start/tasks/main.yml
index 5bd124c036f53a36c5132a2e21a835f4bcf0189e..df0ff262a08d5c63e85f3c0efb4e19082b4be8c2 100644
--- a/roles/slurm-start/tasks/main.yml
+++ b/roles/slurm-start/tasks/main.yml
@@ -1,9 +1,9 @@
 ---
 - name: set use_systemd
-  set_fact: 
+  set_fact:
     use_systemd: True
   when: (ansible_distribution == "CentOS" or ansible_distribution == "RedHat") and
-          ( ansible_distribution_major_version == "7") 
+          ( ansible_distribution_major_version == "7")
 
 - name: set slurmd_enabled (default enabled)
   set_fact:
@@ -38,18 +38,21 @@
   when: use_systemd is defined and start_slurmctld is defined
   register: slurmctld_service_installed
 
-- name: reload systemd
-  shell: systemctl daemon-reload
+- name: reload systemd after slurmd install
+  systemd:
+    daemon_reload: yes
   become: true
   when: use_systemd is defined and start_slurmd is defined and slurmd_service_installed.changed
 
-- name: reload systemd
-  shell: systemctl daemon-reload
+- name: reload systemd after slurmctld _service _installed
+  systemd:
+    daemon_reload: yes
   become: true
   when: use_systemd is defined and start_slurmctld is defined and slurmctld_service_installed.changed
 
-- name: reload systemd
-  shell: systemctl daemon-reload
+- name: reload systemd slurmdbd_ service _installed
+  systemd:
+    daemon_reload: yes
   become: true
   when: use_systemd is defined and start_slurmdbd is defined and slurmdbd_service_installed.changed
 
@@ -62,7 +65,6 @@
   become: true
   when: start_slurmdbd is defined
 
-
 - name: "create cluster in slurm db"
   shell:  "{{slurm_dir}}/bin/sacctmgr -i create cluster {{ clustername }}"
   become: true
diff --git a/roles/vncserver/tasks/main.yml b/roles/vncserver/tasks/main.yml
index 1a0078fb7dc9b2f4870f3fae247148bbb9abcb79..d955ea1c8eb1e4886a81507d1efcd73223bc5214 100644
--- a/roles/vncserver/tasks/main.yml
+++ b/roles/vncserver/tasks/main.yml
@@ -6,8 +6,8 @@
 
 - name: install system packages apt
   apt: name={{ item }} state=present update_cache=true  force=yes
+  with_items: "{{ system_packages }}"
   become: true
-  with_items: system_packages
   when: ansible_os_family == 'Debian'
 
 - name: force the use of KDE desktop
diff --git a/vars/CentOS.yml b/vars/CentOS.yml
deleted file mode 100644
index b75cf94e8a8f294ea8a61452bbad446e526d1d0e..0000000000000000000000000000000000000000
--- a/vars/CentOS.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-apache: httpd
-packager: yum
diff --git a/vars/Debian.yml b/vars/Debian.yml
deleted file mode 100644
index 8f507dba4373f768cbea30130d0b4919a9e1b01c..0000000000000000000000000000000000000000
--- a/vars/Debian.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-apache: apache2 
-packager: apt