variables:
  GIT_SUBMODULE_STRATEGY: recursive
  STACKNAME: CICD-${CI_PROJECT_NAME}-gitlabci #-$CI_COMMIT_REF_NAME
  NECTAR_ALLOCATION: HPCCICD
  ANSIBLE_HOST_KEY_CHECKING: "False"
  DEFAULT_PATH: "CICD"

stages:
  - lint
  - openstack_create
  - configure_cluster
  - tests
  - integration_test #https://docs.gitlab.com/ee/ci/triggers/
  - openstack_destroy


trigger_pipeline_in_Clusterbuild:
  stage: integration_test
  tags:
  - ansible
  script:
  - echo ${CI_JOB_TOKEN}
  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline  # ID is from clusterbuild
  only:
  - ${CI_PROJECT_NAME} == 'HPCasCode'


trigger_pipeline_in_monarch:
  stage: integration_test
  tags:
  - ansible
  script:
  - echo ${CI_JOB_TOKEN}
  - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline  # ID is from monarch
  only:
  - ${CI_PROJECT_NAME} == 'HPCasCode'

yamllint:
  stage: lint
  allow_failure: true
  tags:
  - yamllint
  script:
    - echo "stage yamllint"
    - cd CICD
    - yamllint -c ./.yamllintheat.yaml ./heat


ansiblelint:
  allow_failure: true
  stage: lint
  tags:
  - ansiblelint
  script:
    - echo "stage ansiblelint"
    - cd CICD
    - python3 ansiblelint/run_lint.py --targets master_playbook.yml
    - python3 ansiblelint/run_lint.py --targets ../qa.yml
    - python3 ansiblelint/run_lint.py --targets ../maintenance.yml


build_infra:
  stage: openstack_create
  image: ubuntu
  allow_failure: false
  tags:
  - heat
  before_script:
    - cd $DEFAULT_PATH
    - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
  script:
    - whoami
    - apt update
    - apt -y upgrade
    - apt -y install openssh-client python3-pip
    - pip3 install joblib python-cinderclient python-keystoneclient python-novaclient python-openstackclient PyYAML ansible
    - source ./$NECTAR_ALLOCATION-openrc.sh
    - echo "Generating a random SSH key for our cluster"
    - ssh-keygen -f tmp_key -N "" -t ed25519
    - export SSH_CA=`cat tmp_key.pub`
    - cd infra
    - python3 ./template.py
    - ansible-playbook os_create.yml
    - python3 ./make_inventory.py $CI_PROJECT_NAME-ci-$CI_COMMIT_REF_NAME > ../inventory.yml
    - cd ..
  artifacts:
    paths:
      - CICD/inventory.yml
      - CICD/tmp_key
      - CICD/tmp_key.pub
      - CICD/infra/os_vars.yml

configure_cluster:
  stage: configure_cluster
  image: ubuntu
  tags:
  - ansible
  artifacts:
    paths:
      - CICD/files
      - CICD/vars
  script:
    - whoami
    - apt update
    - apt -y upgrade
    - apt -y install openssh-client python3-pip
    - pip3 install joblib python-cinderclient python-keystoneclient python-novaclient python-openstackclient PyYAML ansible
    - cd CICD
    - python3 ./make_files.py ./inventory.yml ./infra/os_vars.yml ./vars/versions.yml
    - mkdir -p ~/.ssh
    - ssh-keyscan -H `cat ssh.cfg  | grep Proxy | cut -f 2 -d "@"` >> ~/.ssh/known_hosts
    - eval `ssh-agent`
    - ssh-add ./tmp_key
    - ssh -vvv `cat ssh.cfg  | grep Proxy | cut -f 2 -d "="` exit 0
    - export ANSIBLE_HOST_KEY_CHECKING=False
    - export ANSIBLE_CONFIG=`pwd`/ansible.cfg
    - ansible -i inventory.yml -m ping 'all'
    - ansible-playbook -i inventory.yml upgrade_and_reboot.yml || true
    - ansible-playbook -i inventory.yml master_playbook.yml
      # I don't think this is necessary any more
      #- ansible -i inventory.yml -b -a "systemctl restart slurmdbd" ManagementNodes
      #- ansible -i inventory.yml -b -a "systemctl restart slurmctld" ManagementNodes

tests:
  stage: tests
  tags:
  - ansible
  before_script:
    - whoami
    - apt update
    - apt -y upgrade
    - apt -y install openssh-client python3-pip
    - pip3 install joblib python-cinderclient python-keystoneclient python-novaclient python-openstackclient PyYAML ansible
    - cd CICD
    - mkdir -p ~/.ssh
    - ssh-keyscan -H `cat ssh.cfg  | grep Proxy | cut -f 2 -d "@"` >> ~/.ssh/known_hosts
    - eval `ssh-agent`
    - ssh-add ./tmp_key
    - export ANSIBLE_HOST_KEY_CHECKING=False
    - export ANSIBLE_CONFIG=`pwd`/ansible.cfg
  script:
    - ansible-playbook -i inventory.yml ./tests/mockSlurmData.yml
    - ansible -B 30 -i inventory.yml -a "/opt/slurm-latest/bin/sinfo" ManagementNodes
    - ansible -B 30 -i inventory.yml -a "/opt/slurm-latest/bin/squeue" ManagementNodes
    - ansible -B 30 -i inventory.yml -a "/opt/slurm-latest/bin/scontrol ping" LoginNodes
    - ansible -B 30 -i inventory.yml -a "systemctl is-active --quiet ntpd" CentosNodes
    - ansible -B 30 -i inventory.yml -a "systemctl is-active --quiet ntp" UbuntuNodes
    - ansible -B 30 -i inventory.yml -a "systemctl is-active --quiet mariadb" SQLNodes
    - ansible -B 30 -i inventory.yml -a "systemctl is-active --quiet slurmctld" ManagementNodes
    - ansible -B 30 -i inventory.yml -a "systemctl is-active --quiet slurmdbd" ManagementNodes

    - bash -e ./tests/run_tests.sh all "inventory.yml" "../gc_key.pem"
    - bash -e ./tests/run_tests.sh ComputeNodes "inventory.yml" "../gc_key.pem"
    - bash -e ./tests/run_tests.sh LoginNodes "inventory.yml" "../gc_key.pem"
    - bash -e ./tests/run_tests.sh ManagementNodes "inventory.yml" "../gc_key.pem"
    - bash -e ./tests/run_tests.sh NFSNodes "inventory.yml" "../gc_key.pem"
    - bash -e ./tests/run_tests.sh SQLNodes "inventory.yml" "../gc_key.pem"
    #  Note to self: deactivated because it is broken. please fix it again - bash -e ./tests/run_tests.sh slurm "files/inventory.$STACKNAME" "../gc_key.pem"
    - ansible -i inventory.yml -a 'sudo su - user1 -c whoami' LoginNodes,ComputeNodes  # to test ldap
    #- sshpass -p 'redhat' ssh -o StrictHostKeyChecking=no user1@server.example.com