Skip to content
Snippets Groups Projects
Commit cb54f895 authored by Andreas Hamacher's avatar Andreas Hamacher
Browse files

still working on gc_key file

parent 55acbda5
No related branches found
No related tags found
4 merge requests!381Yumadminpackage,!379Pipelinefix,!376Pipelinefix,!373Pipelinefix
This commit is part of merge request !379. Comments created here will be created in the context of that merge request.
......@@ -55,7 +55,7 @@ yamllint:
# - heat
# before_script:
# - echo "$GC_KEY" > GC_KEY
# - chmod 400 GC_KEY
# - chmod 400 $GC_KEY
# - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
# script:
# - echo "heat stage"
......@@ -88,14 +88,14 @@ testlustre:
- sleep 60
script:
- echo "heat stage"
- chmod 400 GC_KEY
- chmod 400 $GC_KEY
- source ./$NECTAR_ALLOCATION-openrc.sh
- openstack stack list
- openstack stack create --wait --template heat/lustre_HOT.yaml --parameter "project_name=$STACKNAME" $STACKNAME
- python3 ../scripts/make_inventory.py static $STACKNAME | tee ./files/inventory.$STACKNAME && chmod 755 ./files/inventory.$STACKNAME
- cd plays/testlustre
- sleep 100
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../../GC_KEY testlustre.yml
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../$GC_KEY testlustre.yml
- sleep 60
- cd ../../
- bash -x ./heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
......@@ -113,7 +113,7 @@ build_cluster_cicd:
script:
- echo "heat stage"
#- source ./$NECTAR_ALLOCATION-openrc.sh
- chmod 400 ../GC_KEY
- chmod 400 $GC_KEY
- openstack stack list
- bash -x ./heat/heatcicdwrapper.sh create_or_update $STACKNAME
- bash -x ./heat/server_rebuild.sh all
......@@ -134,19 +134,19 @@ ansible_create_cluster_stage:
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "ansible_create_cluster_stage"
- chmod 400 GC_KEY
- chmod 400 $GC_KEY
- bash -x ./CICD/ansible_create_cluster_script.sh
- cd CICD
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../GC_KEY --skip-tags SiteSpecific master_playbook.yml
- ansible-playbook -i files/inventory.$STACKNAME --key-file $GC_KEY --skip-tags SiteSpecific master_playbook.yml
- sleep 15
- echo uglyuglyfix
- ansible -i files/inventory.$STACKNAME --key-file ../GC_KEY -b -a "systemctl restart slurmdbd" ManagementNodes
- ansible -i files/inventory.$STACKNAME --key-file ../GC_KEY -b -a "systemctl restart slurmctld" ManagementNodes
- ansible -i files/inventory.$STACKNAME --key-file $GC_KEY -b -a "systemctl restart slurmdbd" ManagementNodes
- ansible -i files/inventory.$STACKNAME --key-file $GC_KEY -b -a "systemctl restart slurmctld" ManagementNodes
- cd plays
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../../GC_KEY --skip-tags monitoring computenodes.yml | tee nochange.log
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../$GC_KEY --skip-tags monitoring computenodes.yml | tee nochange.log
- echo [ `grep changed= ./nochange.log -c` = `grep changed=0 ./nochange.log -c` ] > bashtest.sh # a crude way to make sure all changed lines are equal to changed=0
- bash ./bashtest.sh
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../../GC_KEY --skip-tags monitoring --check computenodes.yml
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../$GC_KEY --skip-tags monitoring --check computenodes.yml
tests:
stage: tests
......@@ -156,30 +156,30 @@ tests:
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- echo "tests stage"
- chmod 400 GC_KEY
- chmod 400 $$GC_KEY
- source ./$NECTAR_ALLOCATION-openrc.sh
- openstack stack list
- cd CICD
- python3 ../scripts/make_inventory.py static $STACKNAME | tee ./files/inventory.$STACKNAME && chmod 755 ./files/inventory.$STACKNAME
- grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME #fail if inventory file is empty
- ansible -m ping -i files/inventory.$STACKNAME --key-file ../GC_KEY all
- ansible -i files/inventory.$STACKNAME --key-file ../GC_KEY -a "sudo ls" all
- ansible -m ping -i files/inventory.$STACKNAME --key-file $GC_KEY all
- ansible -i files/inventory.$STACKNAME --key-file $GC_KEY -a "sudo ls" all
- echo -e '[defaults]\r\nallow_world_readable_tmpfiles = True' > ansible.cfg
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../GC_KEY ./tests/mockSlurmData.yml
- ansible-playbook -i files/inventory.$STACKNAME --key-file $GC_KEY ./tests/mockSlurmData.yml
# Need to find a better check for sinfo
#- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../GC_KEY -a "find /opt/ -name sinfo -type f" ManagementNodes
#- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../GC_KEY -a "find /opt/ -name squeue -type f" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../GC_KEY -a "systemctl is-active --quiet mariadb" SQLNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../GC_KEY -a "systemctl is-active --quiet slurmctld" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../GC_KEY -a "systemctl is-active --quiet slurmdbd" ManagementNodes
- bash -e ./tests/run_tests.sh all "files/inventory.$STACKNAME" "../GC_KEY"
- bash -e ./tests/run_tests.sh ComputeNodes "files/inventory.$STACKNAME" "../GC_KEY"
- bash -e ./tests/run_tests.sh LoginNodes "files/inventory.$STACKNAME" "../GC_KEY"
- bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../GC_KEY"
- bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../GC_KEY"
- bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../GC_KEY"
- bash -e ./tests/run_tests.sh slurm "files/inventory.$STACKNAME" "../GC_KEY"
- ansible -i files/inventory.$STACKNAME --key-file ../GC_KEY -a 'sudo su - user1 -c whoami' LoginNodes,ComputeNodes # to test ldap
#- ansible -B 30 -i files/inventory.$STACKNAME --key-file $GC_KEY -a "find /opt/ -name sinfo -type f" ManagementNodes
#- ansible -B 30 -i files/inventory.$STACKNAME --key-file $GC_KEY -a "find /opt/ -name squeue -type f" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file $GC_KEY -a "systemctl is-active --quiet mariadb" SQLNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file $GC_KEY -a "systemctl is-active --quiet slurmctld" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file $GC_KEY -a "systemctl is-active --quiet slurmdbd" ManagementNodes
- bash -e ./tests/run_tests.sh all "files/inventory.$STACKNAME" "$GC_KEY"
- bash -e ./tests/run_tests.sh ComputeNodes "files/inventory.$STACKNAME" "$GC_KEY"
- bash -e ./tests/run_tests.sh LoginNodes "files/inventory.$STACKNAME" "$GC_KEY"
- bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "$GC_KEY"
- bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "$GC_KEY"
- bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "$GC_KEY"
- bash -e ./tests/run_tests.sh slurm "files/inventory.$STACKNAME" "$GC_KEY"
- ansible -i files/inventory.$STACKNAME --key-file $GC_KEY -a 'sudo su - user1 -c whoami' LoginNodes,ComputeNodes # to test ldap
#- sshpass -p 'redhat' ssh -o StrictHostKeyChecking=no user1@server.example.com
extended:
......@@ -212,7 +212,7 @@ manual_cluster_spawn:
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- chmod 400 GC_KEY
- chmod 400 $GC_KEY
- bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME ${CI_PROJECT_NAME}
- openstack stack list
- export STACKNAME=$MANUAL_STACKNAME
......@@ -243,7 +243,7 @@ clean:
# - heat
# before_script:
# - echo "$GC_KEY" > GC_KEY
# - chmod 400 GC_KEY
# - chmod 400 $GC_KEY
# - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
# - source ./$NECTAR_ALLOCATION-openrc.sh
# - export HEAT_TEST_STACKNAME=_TESTING_HEAT
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment