Newer
Older
variables:
GIT_SUBMODULE_STRATEGY: recursive
STACKNAME: _reporef$CI_COMMIT_REF_NAME
NECTAR_ALLOCATION: HPCCICD
stages:
# - integration_test_downstream # working but unwanted here
# - trigger_pipeline_in_B # working but unwanted here
- yamllint
- heat
- ansible_create_cluster_stage
- push_button_spawn_cluster
# - e2e
- tests
- clean # only on master
#trigger_pipeline_in_B:
# stage: integration_test_downstream
# tags:
# - ansible
# script:
# - "curl --request POST --form token=${CI_JOB_TOKEN} --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/1085/trigger/pipeline" # ID is from pysshauthz
# heat_test:
# stage: heat
# allow_failure: false
# tags:
# - heat
# before_script:
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - export HEAT_TEST_STACKNAME=_TESTING_HEAT
# - bash -x ./heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
# - sleep 60
# script:
# - echo "heat_test stage"
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - bash -x ./heat/heatcicdwrapper.sh create $HEAT_TEST_STACKNAME
# - openstack stack list
# - bash -x ./heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
# - openstack stack list
# - bash -x ./heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
# - openstack stack list
# after_script:
# - sleep 20 # artifically wait a bit to make sure it is really dead
# only:
# changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
# - "heat/heatcicdwrapper.sh"
# - "heat/*.{yml,yaml}"
# - schedules
# #- CICD/.gitlab-ci.yml
yamllint:
stage: yamllint
allow_failure: true
tags:
- yamllint
script:
- echo "stage yamllint"
- ansible-lint -x ANSIBLE0002 master_playbook.yml
- yamllint -c ./.yamllintheat.yaml ./heat
build_cluster_cicd:
stage: heat
allow_failure: false
tags:
- heat
before_script:
- echo $GC_KEY > gc_key.pem
- chmod 400 gc_key.pem
- echo $HPCCICD_openrc > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "heat stage"
- source ./$NECTAR_ALLOCATION-openrc.sh
- openstack stack list
- bash -x ./CICD/heat/heatcicdwrapper.sh create_or_update $STACKNAME
after_script:
- sleep 20 # artifically wait a bit to give the nodes time to boot
only:
changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
- "heat/*HOT*.yaml"
- schedules
- ./.gitlab-ci.yml
ansible_create_cluster_stage:
stage: ansible_create_cluster_stage
tags:
- ansible
before_script:
- echo $GC_KEY > gc_key.pem
- chmod 400 gc_key.pem
- echo $HPCCICD_openrc > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "ansible_create_cluster_stage"
- bash -x ./CICD/ansible_create_cluster_script.sh
#after_script:
#- rm ./files/cicd_inventory
#only:
# changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
# - "master_playbook.yml"
# - "vars/*.{yml,yaml}"
# - schedules
# - CICD/.gitlab-ci.yml
tests:
stage: tests
tags:
- ansible
before_script:
- echo $GC_KEY > gc_key.pem
- chmod 400 gc_key.pem
- echo $HPCCICD_openrc > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "tests stage"
- export ANSIBLE_HOST_KEY_CHECKING=False
- source ./$NECTAR_ALLOCATION-openrc.sh
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
- python3 dependencies/ansible_cluster_in_a_box/scripts/make_inventory.py static CICD$STACKNAME | tee ./files/cicd_inventory
- grep -qv "I could not find any resouces tagged with project_name:" ./files/cicd_inventory #fail if inventory file is empty
- chmod 755 ./files/cicd_inventory
- chmod 400 ./keys/gc_key.pem
- ansible -B 30 -i files/cicd_inventory --key-file keys/gc_key.pem -a "sinfo" ManagementNodes
- ansible -B 30 -i files/cicd_inventory --key-file keys/gc_key.pem -a "squeue" ManagementNodes
- ansible -B 30 -i files/cicd_inventory --key-file keys/gc_key.pem -a "systemctl is-active --quiet mariadb" SQLNodes
- ansible -B 30 -i files/cicd_inventory --key-file keys/gc_key.pem -a "systemctl is-active --quiet slurmctld" ManagementNodes
- ansible -B 30 -i files/cicd_inventory --key-file keys/gc_key.pem -a "systemctl is-active --quiet slurmdbd" ManagementNodes
- bash -e ./tests/run_tests.sh all "files/cicd_inventory" "keys/gc_key.pem"
- bash -e ./tests/run_tests.sh ComputeNodes "files/cicd_inventory" "keys/gc_key.pem"
- bash -e ./tests/run_tests.sh LoginNodes "files/cicd_inventory" "keys/gc_key.pem"
- bash -e ./tests/run_tests.sh ManagementNodes "files/cicd_inventory" "keys/gc_key.pem"
- bash -e ./tests/run_tests.sh NFSNodes "files/cicd_inventory" "keys/gc_key.pem"
- bash -e ./tests/run_tests.sh SQLNodes "files/cicd_inventory" "keys/gc_key.pem"
# licensing https://gitlab.erc.monash.edu.au/hpc-team/license_server/tree/master/roles/avizo_license_monitor
manual_cluster_spawn:
stage: push_button_spawn_cluster
tags:
- heat
- ansible
before_script:
- echo "press button spawn cluster."
- echo "for this to work you have to provide a variable called manual stackname"
- echo I still need to handle os password
- echo $MANUAL_STACKNAME
- echo $GC_KEY > gc_key.pem
- chmod 400 gc_key.pem
- echo $HPCCICD_openrc > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./heat/heatcicdwrapper.sh create $MANUAL_STACKNAME
- git submodule update --init --remote --merge
- openstack stack list
- export STACKNAME=$MANUAL_STACKNAME
- sleep 25
- bash -x CICD/ansible_create_cluster_script.sh
environment:
name: openstack
url: https://dashboard.rc.nectar.org.au/project/instances/
when: manual
only:
refs:
- "devel"
clean:
stage: clean
tags:
- heat
before_script:
- echo "cleanup master"
- sleep 30
- echo $GC_KEY > gc_key.pem
- chmod 400 gc_key.pem
- echo $HPCCICD_openrc > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./heat/heatcicdwrapper.sh delete_if_exists $STACKNAME