Newer
Older

Andreas Hamacher
committed
variables:
GIT_SUBMODULE_STRATEGY: recursive

Andreas Hamacher
committed
STACKNAME: CICD_${CI_PROJECT_NAME}_${CI_COMMIT_REF_NAME}

Andreas Hamacher
committed
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
NECTAR_ALLOCATION: HPCCICD
ANSIBLE_HOST_KEY_CHECKING: "False"
stages:
# - integration_test_downstream # working but unwanted here
# - trigger_pipeline_in_B # working but unwanted here
- lint
#- delete_stack_manual
- extended
#- heat_test
- heat
- ansible_create_cluster_stage
- push_button_spawn_cluster
# - e2e
- tests
- clean # manually delete stack
#trigger_pipeline_in_B:
# stage: integration_test_downstream
# tags:
# - ansible
# script:
# - "curl --request POST --form token=${CI_JOB_TOKEN} --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/1085/trigger/pipeline" # ID is from pysshauthz
# heat_test:
# stage: heat_test
# allow_failure: false
# tags:
# - heat
# before_script:
# - echo "$GC_KEY" > gc_key.pem
# - chmod 400 gc_key.pem
# - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - export HEAT_TEST_STACKNAME=_TESTING_HEAT
# - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
# - sleep 60
# script:
# - echo "heat_test stage"
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - bash -x ./CICD/heat/heatcicdwrapper.sh create $HEAT_TEST_STACKNAME
# - openstack stack list
# - bash -x ./CICD/heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
# - openstack stack list
# - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
# - openstack stack list
# after_script:
# - sleep 20 # artifically wait a bit to make sure it is really dead
# when: manual
yamllint:
stage: lint
allow_failure: true
tags:
- yamllint
script:
- echo "stage yamllint"
- cd CICD
# - ansible-lint -c .yamllintconf.yaml -x ANSIBLE0002 master_playbook.yml
- yamllint -c ./.yamllintheat.yaml ./heat
# delete_stack_manual:
# stage: delete_stack_manual
# tags:
# - heat
# before_script:
# - echo "$GC_KEY" > gc_key.pem
# - chmod 400 gc_key.pem
# - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
# script:
# - echo "heat stage"
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - openstack stack list
# - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
# when: manual
ansiblelint:
allow_failure: true
stage: lint
tags:
- ansiblelint
script:
- echo "stage ansiblelint"
- cd CICD
- python3 ansiblelint/run_lint.py --targets master_playbook.yml
build_cluster_cicd:
stage: heat
allow_failure: false
tags:
- heat
before_script:
- echo "$GC_KEY" > gc_key.pem
- chmod 400 gc_key.pem
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "heat stage"
- source ./$NECTAR_ALLOCATION-openrc.sh
- openstack stack list
- bash -x ./CICD/heat/heatcicdwrapper.sh create_or_update $STACKNAME
after_script:
- sleep 20 # artifically wait a bit to give the nodes time to boot

Andreas Hamacher
committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
# only:
# changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
# - "heat/*HOT*.yaml"
# - schedules
# - ./.gitlab-ci.yml
ansible_create_cluster_stage:
stage: ansible_create_cluster_stage
tags:
- ansible
before_script:
- echo "$GC_KEY" > gc_key.pem
- chmod 400 gc_key.pem
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "ansible_create_cluster_stage"
- bash -x ./CICD/ansible_create_cluster_script.sh
#after_script:
#- rm ./files/inventory.$STACKNAME
#only:
# changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
# - "master_playbook.yml"
# - "vars/*.{yml,yaml}"
# - schedules
# - CICD/.gitlab-ci.yml
tests:
stage: tests
tags:
- ansible
before_script:
- echo "$GC_KEY" > gc_key.pem
- chmod 400 gc_key.pem
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "tests stage"
- source ./$NECTAR_ALLOCATION-openrc.sh
- openstack stack list
- cd CICD
- python3 ../scripts/make_inventory.py static $STACKNAME | tee ./files/inventory.$STACKNAME && chmod 755 ./files/inventory.$STACKNAME
- grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME #fail if inventory file is empty
- ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
- ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sinfo" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "squeue" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet mariadb" SQLNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmctld" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmdbd" ManagementNodes
- bash -e ./tests/run_tests.sh all "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh ComputeNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh LoginNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
# licensing https://gitlab.erc.monash.edu.au/hpc-team/license_server/tree/master/roles/avizo_license_monitor
manual_cluster_spawn:
stage: push_button_spawn_cluster
tags:
- heat
- ansible
before_script:
- echo "press button spawn cluster."
- echo "for this to work you have to provide a variable called manual stackname"
- echo I still need to handle os password
- echo $MANUAL_STACKNAME
- echo "$GC_KEY" > gc_key.pem
- chmod 400 gc_key.pem
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME
- openstack stack list
- export STACKNAME=$MANUAL_STACKNAME
- sleep 25
- bash -x CICD/ansible_create_cluster_script.sh
when: manual
only:
refs:
- "cicd"
extended:
stage: extended
tags:
- heat
- ansible
before_script:
- echo "cleanup stack"
- sleep 30
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
only:
variables:
- $EXTENDED != null
clean:
stage: clean
tags:
- heat
before_script:
- echo "cleanup stack"
- sleep 30
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
when: manual