Skip to content
Snippets Groups Projects
Commit e8e157fb authored by Andreas Hamacher's avatar Andreas Hamacher
Browse files

merging master

Former-commit-id: 71c625c6
parents bb1f1896 cb30fa6c
No related branches found
No related tags found
No related merge requests found
Showing
with 989 additions and 0 deletions
*.swp
*.retry
*-openrc.sh
gc_key.pem
CICD/files/slurm.conf
CICD/files/slurmdbd.conf
CICD/files/ssh_known_hosts
variables:
GIT_SUBMODULE_STRATEGY: recursive
STACKNAME: CICD_reporef$CI_COMMIT_REF_NAME
NECTAR_ALLOCATION: HPCCICD
ANSIBLE_HOST_KEY_CHECKING: "False"
stages:
# - integration_test_downstream # working but unwanted here
# - trigger_pipeline_in_B # working but unwanted here
- lint
#- delete_stack_manual
- extended
#- heat_test
- heat
- ansible_create_cluster_stage
- push_button_spawn_cluster
# - e2e
- tests
- clean # manually delete stack
#trigger_pipeline_in_B:
# stage: integration_test_downstream
# tags:
# - ansible
# script:
# - "curl --request POST --form token=${CI_JOB_TOKEN} --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/1085/trigger/pipeline" # ID is from pysshauthz
# heat_test:
# stage: heat_test
# allow_failure: false
# tags:
# - heat
# before_script:
# - echo "$GC_KEY" > gc_key.pem
# - chmod 400 gc_key.pem
# - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - export HEAT_TEST_STACKNAME=_TESTING_HEAT
# - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
# - sleep 60
# script:
# - echo "heat_test stage"
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - bash -x ./CICD/heat/heatcicdwrapper.sh create $HEAT_TEST_STACKNAME
# - openstack stack list
# - bash -x ./CICD/heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
# - openstack stack list
# - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
# - openstack stack list
# after_script:
# - sleep 20 # artifically wait a bit to make sure it is really dead
# when: manual
yamllint:
stage: lint
allow_failure: true
tags:
- yamllint
script:
- echo "stage yamllint"
- cd CICD
# - ansible-lint -c .yamllintconf.yaml -x ANSIBLE0002 master_playbook.yml
- yamllint -c ./.yamllintheat.yaml ./heat
# delete_stack_manual:
# stage: delete_stack_manual
# tags:
# - heat
# before_script:
# - echo "$GC_KEY" > gc_key.pem
# - chmod 400 gc_key.pem
# - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
# script:
# - echo "heat stage"
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - openstack stack list
# - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
# when: manual
ansiblelint:
allow_failure: true
stage: lint
tags:
- ansiblelint
script:
- echo "stage ansiblelint"
- cd CICD
- python3 ansiblelint/run_lint.py --targets master_playbook.yml
build_cluster_cicd:
stage: heat
allow_failure: false
tags:
- heat
before_script:
- echo "$GC_KEY" > gc_key.pem
- chmod 400 gc_key.pem
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "heat stage"
- source ./$NECTAR_ALLOCATION-openrc.sh
- openstack stack list
- bash -x ./CICD/heat/heatcicdwrapper.sh create_or_update $STACKNAME
after_script:
- sleep 20 # artifically wait a bit to give the nodes time to boot
# only:
# changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
# - "heat/*HOT*.yaml"
# - schedules
# - ./.gitlab-ci.yml
ansible_create_cluster_stage:
stage: ansible_create_cluster_stage
tags:
- ansible
before_script:
- echo "$GC_KEY" > gc_key.pem
- chmod 400 gc_key.pem
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "ansible_create_cluster_stage"
- bash -x ./CICD/ansible_create_cluster_script.sh
#after_script:
#- rm ./files/inventory.$STACKNAME
#only:
# changes: #https://docs.gitlab.com/ee/ci/yaml/#onlychangesexceptchanges
# - "master_playbook.yml"
# - "vars/*.{yml,yaml}"
# - schedules
# - CICD/.gitlab-ci.yml
tests:
stage: tests
tags:
- ansible
before_script:
- echo "$GC_KEY" > gc_key.pem
- chmod 400 gc_key.pem
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "tests stage"
- source ./$NECTAR_ALLOCATION-openrc.sh
- openstack stack list
- cd CICD
- python3 ../scripts/make_inventory.py static $STACKNAME | tee ./files/inventory.$STACKNAME && chmod 755 ./files/inventory.$STACKNAME
- grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME #fail if inventory file is empty
- ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
- ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sinfo" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "squeue" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet mariadb" SQLNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmctld" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmdbd" ManagementNodes
- bash -e ./tests/run_tests.sh all "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh ComputeNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh LoginNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
# licensing https://gitlab.erc.monash.edu.au/hpc-team/license_server/tree/master/roles/avizo_license_monitor
manual_cluster_spawn:
stage: push_button_spawn_cluster
tags:
- heat
- ansible
before_script:
- echo "press button spawn cluster."
- echo "for this to work you have to provide a variable called manual stackname"
- echo I still need to handle os password
- echo $MANUAL_STACKNAME
- echo "$GC_KEY" > gc_key.pem
- chmod 400 gc_key.pem
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME
- openstack stack list
- export STACKNAME=$MANUAL_STACKNAME
- sleep 25
- bash -x CICD/ansible_create_cluster_script.sh
when: manual
only:
refs:
- "cicd"
extended:
stage: extended
tags:
- heat
- ansible
before_script:
- echo "cleanup stack"
- sleep 30
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
only:
variables:
- $EXTENDED != null
clean:
stage: clean
tags:
- heat
before_script:
- echo "cleanup stack"
- sleep 30
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
#when: manual
\ No newline at end of file
# copied from luhan
extends: default
rules:
braces:
level: warning
max-spaces-inside: 1
brackets:
level: warning
max-spaces-inside: 1
colons:
level: warning
commas:
level: warning
comments: disable
comments-indentation: disable
document-start: disable
empty-lines:
level: warning
hyphens:
level: warning
indentation:
level: warning
indent-sequences: consistent
line-length:
level: warning
allow-non-breakable-inline-mappings: true
truthy: disable
trailing-spaces:
level: warning
---
extends: default
rules:
braces: {min-spaces-inside: 0, max-spaces-inside: 1}
brackets: {min-spaces-inside: 0, max-spaces-inside: 1}
comments: disable
comments-indentation: disable
document-start: disable
indentation: disable
line-length: disable
\ No newline at end of file
#!/bin/bash
set -e
export ANSIBLE_HOST_KEY_CHECKING=False
source ./$NECTAR_ALLOCATION-openrc.sh
openstack stack list
cd CICD
python3 ../scripts/make_inventory.py static $STACKNAME | tee ./files/inventory.$STACKNAME && chmod 755 ./files/inventory.$STACKNAME
grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME #fail if inventory file is empty
ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
#cd roles
#- "egrep -lRZ 'sudo: true' . | xargs -0 -l sed -i -e 's/sudo: true/become: true/g' "
#cd ..
ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem master_playbook.yml
sleep 15
echo uglyuglyfix
ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -b -a "systemctl restart slurmdbd" ManagementNodes
\ No newline at end of file
logdir/*
import yaml
from argparse import ArgumentParser
import subprocess
from pathlib import Path
import re
import sys
import os
from collections import defaultdict
def parse_argument():
parser = ArgumentParser("ansible lint runner with customized spec")
parser.add_argument('--targets', type=str, nargs='*',
help="path to roles or playbook targets")
parser.add_argument('--logdir', type=Path, default=Path( __file__ + '/../logdir').resolve(), nargs='?', help='log directory default to ./ansiblelint/logdir')
args = parser.parse_args()
args.logdir.mkdir(exist_ok=True)
return args
def parse_rule_output(line):
# (filepath, line, rule, severity, rule_desc)
expression = '(.*\.yml):([0-9]+): \[(.*)\] \[(.*)\] (.*$)'
matched = re.match(expression, line)
# print(line)
matched_groups = matched.groups()
return matched_groups
def group_by(output, idx):
res = defaultdict(list)
for i in output:
# print(i)
res[i[idx]].append(i)
return res
cmd_template = "ansible-lint --parseable-severity --nocolor "
outputs = defaultdict()
def main():
exit_code = 0
args = parse_argument()
for item in args.logdir.iterdir():
item.unlink()
cmd = cmd_template
if args.targets is not None:
cmd += ' ' + ' '.join(args.targets)
else:
rolenames = [str(i.resolve())
for i in Path(__file__ + '/../../plays/roles').resolve().iterdir() if i.is_dir()]
cmd += ' ' + ' '.join(rolenames)
# print(cmd)
logfile = args.logdir.joinpath('logfile')
cmd += ' 2>&1 | tee {}'.format(str(logfile.resolve()))
# print(cmd)
output = subprocess.check_output(cmd, shell=True)
print(output.decode())
output = output.decode().splitlines()
# print(output)
output = [parse_rule_output(line) for line in output]
# group by serverity
output = group_by(output, 3)
# print(output.keys())
# print(output.keys())
for k,v in output.items():
# print(k, v)
if (k=='VERY_HIGH') and len(v) != 0:
exit_code = 1
current_log = args.logdir.joinpath(k).resolve()
with current_log.open(mode='w') as f:
f.writelines(['filepath\tline\trule\tserverity\trule description\n'])
f.writelines(['\t'.join(list(i)) + '\n' for i in v])
sys.exit(exit_code)
# return
if __name__ == "__main__":
main()
---
# https://docs.ansibl.com/ansibl-lint/ruls/dfault_ruls.html
error:
- 101
- 102
- 103
- 104
- 202
- 304
- 306
- 401
- 402
- 403
- 404
- 501
- 502
- 701
warning:
- 105
- 201
- 203
- 204
- 205
- 206
- 301
- 302
- 303
- 305
- 503
- 504
- 601
- 602
- 702
- 703
- 704
ssh_known_hosts
*.conf
etcHosts
inventory.*
/nfsvol/home *(fsid=1,rw,no_root_squash)
/slurmstate *(fsid=2,rw,no_root_squash)
/nfsvol/projects *(fsid=4,rw,no_root_squash)
/nfsvol/scratch *(fsid=5,rw,no_root_squash)
---
heat_template_version: 2013-05-23
description: "A simple template to boot a cluster of desktops (LoginNode, ManagementNodes and Desktop Nodes)"
# avz parameters disabled. they are working but I want just more options than monash-02. I would like to have a parameter that says "I don't care"
parameters:
ubuntu_1804_image_id:
type: string
label: Image ID
description: Ubuntu Image
default: 99d9449a-084f-4901-8bd8-c04aebd589ca
centos_7_image_id:
type: string
label: Image ID
description: Centos Image
default: c47c3acb-9657-4243-9e14-e6c676157e3b #with NetworkManager
ssh_key:
type: string
default: gc_key
avz:
type: string
default: monash-02
project_name:
type: string
NetID:
type: string
default: Classic Provider
Flavour:
type: string
default: t3.xsmall
resources:
SlurmSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatslurmsecgroup"
rules: [ { protocol: tcp,
port_range_min: 12000,
port_range_max: 12999},
{ protocol: tcp,
port_range_min: 6817,
port_range_max: 6819},
{ protocol: tcp,
port_range_min: 1019,
port_range_max: 1019}]
NFSSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatnfssecgroup"
rules: [ { protocol: tcp,
port_range_min: 2049,
port_range_max: 2049},
{ protocol: tcp,
port_range_min: 111,
port_range_max: 111},
{ protocol: udp,
port_range_min: 2049,
port_range_max: 2049},
{ protocol: udp,
port_range_min: 111,
port_range_max: 111}]
MySQLSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatmysqlsecgroup"
rules: [ { protocol: tcp,
port_range_min: 3306,
port_range_max: 3306} ]
SSHMonashSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "SSHMonashSecGroup"
rules: [ { protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 118.138.240.0/21
} ]
# SSHInternalSecGroup:
# type: "OS::Neutron::SecurityGroup"
# properties:
# name: "SSHInternalSecGroup"
# rules: [ { protocol: tcp,
# port_range_min: 22,
# port_range_max: 22,
# direction: ingress} ]
#remote_ip_prefix: { get_param: REMOTE_IP }, direction: ingress
webaccess:
type: "OS::Neutron::SecurityGroup"
properties:
name: "webaccess"
rules: [ { protocol: tcp,
port_range_min: 80,
port_range_max: 80},
{ protocol: tcp,
port_range_min: 443,
port_range_max: 443} ]
SQLNode0:
type: "OS::Nova::Server"
properties:
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'sql0' ]]
availability_zone: { get_param: avz }
flavor: m3.small
image: { get_param: centos_7_image_id }
key_name: { get_param: ssh_key }
security_groups: [ { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: MySQLSecGroup }, { get_resource: NFSSecGroup } ]
metadata:
ansible_host_groups: [ SQLNodes, NFSNodes ]
ansible_ssh_user: ec2-user
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
NFSVolume:
type: OS::Cinder::Volume
properties:
availability_zone: { get_param: avz }
size: 1
name: nfsvol
NFSVolumeAttachment:
type: "OS::Cinder::VolumeAttachment"
properties:
volume_id: { get_resource: NFSVolume }
instance_uuid: { get_resource: SQLNode0 }
SLURMSTATEVolume:
type: OS::Cinder::Volume
properties:
availability_zone: { get_param: avz }
size: 1
name: slurmstate
SLURMSTATEVolumeAttachment:
type: "OS::Cinder::VolumeAttachment"
properties:
volume_id: { get_resource: SLURMSTATEVolume }
instance_uuid: { get_resource: SQLNode0 }
DBVolume:
type: OS::Cinder::Volume
properties:
availability_zone: { get_param: avz }
size: 10
name: dbvol
DBVolumeAttachment:
type: "OS::Cinder::VolumeAttachment"
properties:
volume_id: { get_resource: DBVolume }
instance_uuid: { get_resource: SQLNode0 }
MgmtNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 2
resource_def:
type: My::Server::MgmtNode
properties:
#avz: { get_param: avz }
image: { get_param: centos_7_image_id }
ansible_ssh_user: ec2-user
mynodename:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmt%index%' ]]
ssh_key: { get_param: ssh_key }
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup }, { get_resource: MySQLSecGroup } ]
project_name: { get_param: project_name }
LoginNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 1
resource_def:
type: "OS::Nova::Server"
properties:
#availability_zone: { get_param: avz }
flavor: m3.xsmall
image: { get_param: centos_7_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'login%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
metadata:
ansible_host_groups: [ LoginNodes ]
ansible_ssh_user: ec2-user
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
DesktopNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 0
resource_def:
type: "OS::Nova::Server"
properties:
#availability_zone: { get_param: avz }
flavor: m3.xsmall
image: { get_param: centos_7_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopc%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
metadata:
ansible_host_groups: [ DesktopNodes, VisNodes, ComputeNodes ]
ansible_ssh_user: ec2-user
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
ComputeNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 1
resource_def:
type: "OS::Nova::Server"
properties:
#availability_zone: { get_param: avz }
flavor: m3.xsmall
image: { get_param: centos_7_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
metadata:
ansible_host_groups: [ ComputeNodes ]
ansible_ssh_user: ec2-user
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
UbuntuDesktopNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 0
resource_def:
type: "OS::Nova::Server"
properties:
#availability_zone: { get_param: avz }
flavor: m3.xsmall
image: { get_param: ubuntu_1804_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopu%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ]
metadata:
ansible_host_groups: [ DesktopNodes ]
ansible_ssh_user: ubuntu
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
# PySSHauthz:
# type: "OS::Nova::Server"
# properties:
# name:
# list_join: [ '-', [ { get_param: "OS::stack_name" }, 'pysshautz' ]]
# availability_zone: { get_param: avz }
# flavor: t3.xsmall
# image: { get_param: ubuntu_1804_image_id }
# key_name: { get_param: ssh_key }
# security_groups: [ { get_resource: SSHMonashSecGroup }, { get_resource: webaccess } ]
# metadata:
# ansible_host_groups: [ PySSHauthz ]
# ansible_ssh_user: ubuntu
# project_name: { get_param: project_name }
# networks:
# - network: { get_param: NetID }
#!/bin/bash
# This script does not check available ressources on nectar!
function usage {
echo $"Usage: $0 {create|update|show|create_or_update,delete_if_exists} STACKNAME"
exit 1
}
if [ "$#" -ne 2 ]; then
echo "Illegal number of parameters expecting 2"
usage
fi
STACKNAME=$2
if [[ "$STACKNAME" == "CICD"* ]]; then
echo "CICD found in stackname. doing nothing"
else
STACKNAME="CICD"$STACKNAME
fi
echo "[heatcicdwrapper] Prefixing Stackname with CICD. This is a safety feature because this script can also delete stacks" $STACKNAME
function check_stack_exists {
if openstack stack list | grep -w $STACKNAME;
then
echo "stack found";
else
echo "stack not found";
return 1
fi
}
function func_delete_if_exists {
if ! check_stack_exists
then
exit 0
fi
openstack stack delete -y --wait $STACKNAME
ret=$?
if [ $ret -ne "0" ]
then
sleep 15
openstack stack delete -y --wait $STACKNAME
ret=$?
fi
exit $ret
}
function create_stack {
if check_stack_exists
then
echo "I will NOT create existing stack maybe use update"
exit -44
fi
openstack stack create --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
createreturn=$?
if [ $createreturn -ne "0" ]
then
openstack stack delete -y --wait $STACKNAME
echo "creation failed. trying to delete"
exit -47
fi
exit $createreturn
}
case "$1" in
create)
create_stack
;;
update)
if ! check_stack_exists
then
echo "I cannot update a stack which does not exist"
exit -45
fi
openstack stack update --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
ret=$?
exit $ret
;;
create_or_update)
if check_stack_exists
then
openstack stack update --wait --template ./CICD/heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./CICD/heat/resource_registry.yaml $STACKNAME
ret=$?
exit $ret
fi
create_stack
;;
delete_if_exists)
func_delete_if_exists
;;
show)
check_stack_exists
echo $?
OUTPUT=$(openstack stack show $STACKNAME| grep -w stack_status)
echo $OUTPUT
;;
*)
usage
esac
heat_template_version: 2013-05-23
parameters:
mynodename:
type: string
ssh_key:
type: string
image:
type: string
#avz:
# type: string
project_name:
type: string
ansible_ssh_user:
type: string
security_groups:
type: json
NetID:
type: string
#default: 915a3d96-693d-4c9d-a2ef-04996ab085d3
default: Classic Provider
resources:
instance:
type: OS::Nova::Server
properties:
#availability_zone: { get_param: avz }
flavor: m3.xsmall
image: { get_param: image }
key_name: { get_param: ssh_key }
security_groups: { get_param: security_groups }
name: { get_param: mynodename }
metadata:
ansible_host_groups: [ ManagementNodes ]
ansible_ssh_user: { get_param: ansible_ssh_user }
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
resource_registry:
My::Server::MgmtNode: mgmtnode_HOT.yaml
---
- import_playbook: plays/make_files.yml
- import_playbook: plays/allnodes.yml
- import_playbook: plays/init_slurmconf.yml # this requires management nodes
- import_playbook: plays/nfssqlnodes.yml
- import_playbook: plays/mgmtnodes.yml
- import_playbook: plays/computenodes.yml
- hosts: 'all'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
tasks:
- { name: set use shared state, set_fact: usesharedstatedir=False }
- { name: set hostgroup, set_fact: hostgroup='ComputeNodes' }
tags: [ always ]
- hosts: 'all'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
roles:
# - { role: disable_selinux, tags: [ disableselinux ] }
- { role: etcHosts, tags: [ networking ] }
- { role: config_repos, tags: [ repos ] }
- { role: upgrade }
- { role: set_password }
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,ManagementNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
roles:
- { role: disable_selinux, tags: [ disableselinux ] }
#- { role: ldapclient, tags: [ authentication ] }
- { role: ssh-password-login, tags: [ authentication ] }
- { role: enable_sudo_group, tags: [ authentication, sudo ] }
- { role: move_homedir }
- { role: calculateKnownHosts, tags: [ calculateKnownHosts ] }
- { role: SSHKnownHosts, tags: [ known_hosts ] }
- { role: jasons_ssh_ca, tags: [ ssh_ca ] }
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,VisNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
tasks:
- { name: set use shared state, set_fact: usesharedstatedir=False }
tags: [ always ]
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
roles:
- { role: move_homedir, tags: [ authentication, filesystems ] }
- { role: nfs-client, nfsMounts: "{{ computeNfsMounts }}", tags: [ filesystems ] }
- { role: slurm-common, tags: [ slurm, slurm-common ] }
- { role: lmod, tags: [ other ] }
- { role: enable_modules, default_modules: "lmod", tags: [ other ] }
- { role: postfix, tags: [ mail, other ] }
- hosts: 'VisNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
roles:
- { role: gpu, tags: [ gpu ] }
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
roles:
- { role: slurm_config, tags: [slurm, slurm_config] }
- hosts: 'DesktopNodes,ComputeNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
roles:
- { role: slurm-start, start_slurmd: True, tags: [ slurm, slurmstart ] }
#- { role: mate-de-install, tags: [ mate-de-install ] } # TODO this crashes for everything except cmca
\ No newline at end of file
../files
\ No newline at end of file
---
- hosts: 'all'
tasks:
- include_vars: vars/passwords.yml
- include_vars: vars/names.yml
- include_vars: vars/ldapConfig.yml
- include_vars: vars/filesystems.yml
- include_vars: vars/slurm.yml
- include_vars: vars/vars.yml
- hosts: 'all'
tasks:
- { name: setup, setup: }
- hosts: 'ManagementNodes'
roles:
- { role: calculateSlurmConf }
---
# just calculates an etc hosts
- hosts: 'all'
tasks:
- include_vars: vars/passwords.yml
- include_vars: vars/names.yml
- include_vars: vars/ldapConfig.yml
- include_vars: vars/filesystems.yml
- include_vars: vars/slurm.yml
- include_vars: vars/vars.yml
- hosts: 'all'
tasks:
- { name: setup, setup: }
- hosts: 'ManagementNodes'
roles:
- { role: calculateEtcHosts }
#- hosts: 'NFSNodes'
# roles:
# - { role: calculateExports }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment