Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • hpc-team/HPCasCode
  • chines/ansible_cluster_in_a_box
2 results
Show changes
Showing
with 368 additions and 78 deletions
---
importRepo: { command: "wget http://cvlrepo.massive.org.au/repo/cvl.repo -O", destination: "/etc/yum.repos.d/cvl.repo" }
#yumGroupPackageList:
# - CVL Pre-installation
# - CVL Base Packages
# - CVL System
# - CVL System Extension
# - CVL General Imaging Tools
Files in the playbook directory should be used as examples for the reference only.
---
description: " A simple template to boot a 3 node cluster"
heat_template_version: 2013-05-23
parameters:
image_id:
type: string
label: Image ID
description: Image to be used for compute instance
default: a5e74703-f343-415a-aa23-bd0f0aacfc9e
key_name:
type: string
label: Key Name
description: Name of key-pair to be used for compute instance
default: shahaan
availability_z:
type: string
label: Availability Zone
description: Availability Zone to be used for launching compute instance
default: monash-01
resources:
computeNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 2
resource_def:
type: "OS::Nova::Server"
properties:
availability_zone: { get_param: availability_z }
flavor: m1.small
image: { get_param: image_id }
key_name: { get_param: key_name }
metadata:
ansible_host_group: computeNodes
ansible_ssh_user: ec2-user
ansible_ssh_private_key_file: /home/sgeadmin/.ssh/shahaan.pem
headNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 1
resource_def:
type: headNode.yaml
#- hosts: 'all'
#gather_facts: false # not sure if false is clever here
#tasks:
#- include_vars: vars/ldapConfig.yml
#- include_vars: vars/filesystems.yml
#- include_vars: vars/slurm.yml
#- include_vars: vars/vars.yml
#- { name: set use shared state, set_fact: usesharedstatedir=False }
#tags: [ always ]
# this playbook is roughly sorted by
# - hostgroupstopics like ComputeNodes or ComputeNodes,LoginNodes, last VisNodes
# - "tag_groups" each starting after a #comment see #misc or misc tag
- hosts: 'ComputeNodes'
gather_facts: false
tasks:
# these are just templates.
#Note the tag never! Everything with never is only executed if called explicitly aka ansible-playbook --tags=foo,bar OR -tags=tag_group
- { name: template_shell, shell: ls, tags: [never,tag_group,uniquetag_foo] }
- { name: template_command, command: uname chdir=/bin, tags: [never,tag_group,uniquetag_bar] }
- { name: template_scipt, script: ./scripts/qa/test.sh, tags: [never,tag_group,uniquetag_script] }
#mpi stuff
- { name: run mpi on one computenode, command: ls, args: {chdir: "/tmp"} , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_local,TODO] }
- { name: run mpi on two computenode, command: ls, args: {chdir: "/tmp"} , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_local_two,TODO] }
#- { name: run mpi via sbatch, command: cmd=ls chdir="/tmp" , failed_when: "TODO is TRUE", tags: [never,mpi,slurm_mpi,TODO] }
#- { name: mpi_pinging, command: cmd=ls chdir="/tmp" , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_ping,TODO] }
#module load openmpi/3.1.6-ucx;mpirun --mca btl self --mca pml ucx -x UCX_TLS=mm -n 24 /projects/pMOSP/mpi/parallel_mandelbrot/parallel/mandelbrot
#module load openmpi/3.1.6-ucx;srun mpirun --mca btl self --mca pml ucx -x UCX_TLS=mm -n 24 /projects/pMOSP/mpi/parallel_mandelbrot/parallel/mandelbrot
#slurm
- { name: slurmd should be running, service: name=slurmd state=started, tags: [never,slurm,slurmd] }
- { name: munged should be running, service: name=munged state=started, tags: [never,slurm,munged] }
- { name: ensure connectivity to the controller, shell: scontrol ping, tags: [never,slurm,scontrol_ping] }
- { name: the most simple srun test, shell: srun --reservation=AWX hostname, tags: [never,slurm,srun_hostname] }
#nhc, manually run nhc because it contains many tests
- { name: run nhc explicitly, command: /opt/nhc-1.4.2/sbin/nhc -c /opt/nhc-1.4.2/etc/nhc/nhc.conf, become: true , tags: [never,slurm,nhc] }
# networking
- { name: ping license server, shell: ls, tags: [never,network,ping_license] }
- { name: ping something outside monash, command: ping -c 1 8.8.8.8, tags: [never,network,ping_external] }
#mounts
- hosts: 'ComputeNodes,LoginNodes'
gather_facts: false
tasks:
- { name: check mount for usr_local, shell: "mount | grep -q local", tags: [never,mountpoints,mountpoints_local] }
- { name: check mount for projects, shell: "lfs df -h", tags: [never,mountpoints_projects] }
- { name: check mount for home, shell: "mount | grep -q home", tags: [never,mountpoints,mountpoints_home] }
- { name: check mount for scratch, shell: "mount | grep -q scratch" , tags: [never,mountpoints_scratch] }
#misc
- { name: check singularity, shell: module load octave && octave --version, tags: [never,misc,singularity3] }
- { name: module test, shell: cmd="module load gcc" executable="/bin/bash", tags: [never,misc,modulecmd] }
- { name: contact ldap, shell: maybe test ldapsearch, failed_when: "TODO is TRUE", tags: [never,misc,ldap,TODO] }
#gpu
- hosts: 'VisNodes'
gather_facts: false
tasks:
- { name: run nvida-smi to see if a gpu driver is present, command: "/bin/nvidia-smi", tags: [never,gpu,smi] }
- { name: run gpu burn defaults to 30 seconds, command: "/usr/local/gpu_burn/1.0/run_silent.sh", tags: [never,gpu,long,gpuburn] }
# extended time-consuming tests
# relion see https://docs.massive.org.au/communities/cryo-em/tuning/tuning.html
# linpack
#module load openmpi/1.10.7-mlx;ldd /usr/local/openmpi/1.10.7-mlx/bin/* | grep -ic found
#!/bin/bash
mpbctrl='/home/hines/mbp_script/get_node.py'
node=$( $mbpctrl $1 )
if [[ $node ]]; then
ssh -t $node tmux attach-session
fi
---
- name: Install tmux
apt: name=tmux state=latest
sudo: true
- name: make sure /usr/local/bin exists
file: path=/usr/local/bin state=directory mode=755 owner=root
become: true
- name: install get_node.py
copy: src=get_node.py dest=/usr/local/bin/get_node.py mode=755 owner=root
become: true
- name: install mbp_node
copy: src=mbp_node dest=/usr/local/bin/mbp_node mode=755 owner=root
become: true
---
# This role is to fix a misconfiguration of some OpenStack Base images at Monash University.
# the misconfiguration is dev/vdb mounted in fstab of the Image and the Openstack Flavour not providing a second disk.
- name: unmount vdb if absent
mount:
path: "/mnt"
src: "/dev/vdb"
state: absent
become: true
when: 'hostvars[inventory_hostname]["ansible_devices"]["vdb"] is not defined'
- name: keep mnt present
file:
path: "/mnt"
owner: root
group: root
mode: "u=rwx,g=rx,o=rx"
state: directory
become: true
when: 'hostvars[inventory_hostname]["ansible_devices"]["vdb"] is not defined'
---
- name: restart openvpn
service: name=openvpn state=restarted
sudo: true
become: true
---
-
---
-
copy: "src=/tmp/{{ inventory_hostname }}/ca.crt dest=/etc/openvpn/ca.crt mode=644 owner=root group=root"
name: "Copying CA certificate"
when: "client_ca_cert.stat.exists == false"
-
-
copy: "src=/tmp/{{ inventory_hostname }}/{{ inventory_hostname }}.crt dest=/etc/openvpn/{{ inventory_hostname }}.crt mode=644 owner=root group=root"
name: "Copying Client certificate"
when: "client_sign_cert.stat.exists == false"
-
-
copy: "src=/tmp/{{ inventory_hostname }}/{{ inventory_hostname }}.key dest=/etc/openvpn/{{ inventory_hostname }}.key mode=600 owner=root group=root"
name: "Copying Client key"
when: "client_key.stat.exists == false"
......
---
---
- name: "Install OpenVPN"
yum: "name=openvpn state=present"
sudo: true
become: true
notify: restart openvpn
- name: "Copying client.conf to the OpenVPN client"
template: "src=client.conf.j2 dest=/etc/openvpn/client.conf"
sudo: true
become: true
notify: restart openvpn
......@@ -3,6 +3,6 @@
include: installOpenVPN.yml
- name: "Start OpenVPN"
service: name=openvpn state=started
sudo: true
service: name=openvpn state=started enabled=yes
become: true
---
- name: restart openvpn
service: name=openvpn state=restarted
sudo: true
become: true
---
---
- name: "Install OpenVPN"
yum: "name=openvpn state=present"
notify: "restart openvpn"
sudo: true
become: true
- name: Create path
shell: mkdir -p {{ dhparms_file | dirname }}
args:
creates: "{{ dhparms_file | dirname }}"
sudo: true
become: true
- name: "Generate DH parameters"
shell: openssl dhparam -out {{ dhparms_file }} 512
args:
creates: "{{ dhparms_file }}"
sudo: true
become: true
- name: "Configure OpenVPN Server"
template: "src=server.conf.j2 dest=/etc/openvpn/server.conf"
notify: "restart openvpn"
sudo: true
become: true
......@@ -3,5 +3,5 @@
include: installOpenVPN.yml
- name: "Start OpenVPN"
service: name=openvpn state=started
sudo: true
service: name=openvpn state=started enabled=yes
become: true
- name: install known hosts file
copy: src=files/ssh_known_hosts dest=/etc/ssh/ssh_known_hosts owner=root mode=644
become: true
become_user: root
- name: setup additiona PATHs in /etc/profile.d
template:
src: additional_paths.sh.j2
dest: /etc/profile.d/additional_paths.sh
become: true
when: additional_paths is defined
export PATH=$PATH:{{ additional_paths|join(":") }}
---
- name: place /usr/local/ last in the PATH in /etc/profile
lineinfile:
args:
dest: "/etc/profile"
insertbefore: BOF
line: "PATH=/bin:/usr/bin:/usr/local/bin"
become: true
become_user: root
- name: remove old line
lineinfile:
args:
dest: "/etc/profile"
regexp: "^PATH=/usr/local/bin:/bin:/usr/bin$"
state: absent
become: true
become_user: root
- name: remove /usr/local/ from the PATH in /etc/profile
lineinfile:
args:
dest: "/etc/profile"
regexp: ".*pathmunge /usr/local.*"
state: absent
become: true
become_user: root
- name: dont execute abrt-cli on login
file: path=/etc/profile.d/abrt-console-notification.sh state=absent
become: true
become_user: root