Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • hpc-team/HPCasCode
  • chines/ansible_cluster_in_a_box
2 results
Show changes
Showing
with 358 additions and 60 deletions
Files in the playbook directory should be used as examples for the reference only.
---
description: " A simple template to boot a 3 node cluster"
heat_template_version: 2013-05-23
parameters:
image_id:
type: string
label: Image ID
description: Image to be used for compute instance
default: a5e74703-f343-415a-aa23-bd0f0aacfc9e
key_name:
type: string
label: Key Name
description: Name of key-pair to be used for compute instance
default: shahaan
availability_z:
type: string
label: Availability Zone
description: Availability Zone to be used for launching compute instance
default: monash-01
resources:
computeNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 2
resource_def:
type: "OS::Nova::Server"
properties:
availability_zone: { get_param: availability_z }
flavor: m1.small
image: { get_param: image_id }
key_name: { get_param: key_name }
metadata:
ansible_host_group: computeNodes
ansible_ssh_user: ec2-user
ansible_ssh_private_key_file: /home/sgeadmin/.ssh/shahaan.pem
headNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 1
resource_def:
type: headNode.yaml
#- hosts: 'all'
#gather_facts: false # not sure if false is clever here
#tasks:
#- include_vars: vars/ldapConfig.yml
#- include_vars: vars/filesystems.yml
#- include_vars: vars/slurm.yml
#- include_vars: vars/vars.yml
#- { name: set use shared state, set_fact: usesharedstatedir=False }
#tags: [ always ]
# this playbook is roughly sorted by
# - hostgroupstopics like ComputeNodes or ComputeNodes,LoginNodes, last VisNodes
# - "tag_groups" each starting after a #comment see #misc or misc tag
- hosts: 'ComputeNodes'
gather_facts: false
tasks:
# these are just templates.
#Note the tag never! Everything with never is only executed if called explicitly aka ansible-playbook --tags=foo,bar OR -tags=tag_group
- { name: template_shell, shell: ls, tags: [never,tag_group,uniquetag_foo] }
- { name: template_command, command: uname chdir=/bin, tags: [never,tag_group,uniquetag_bar] }
- { name: template_scipt, script: ./scripts/qa/test.sh, tags: [never,tag_group,uniquetag_script] }
#mpi stuff
- { name: run mpi on one computenode, command: ls, args: {chdir: "/tmp"} , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_local,TODO] }
- { name: run mpi on two computenode, command: ls, args: {chdir: "/tmp"} , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_local_two,TODO] }
#- { name: run mpi via sbatch, command: cmd=ls chdir="/tmp" , failed_when: "TODO is TRUE", tags: [never,mpi,slurm_mpi,TODO] }
#- { name: mpi_pinging, command: cmd=ls chdir="/tmp" , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_ping,TODO] }
#module load openmpi/3.1.6-ucx;mpirun --mca btl self --mca pml ucx -x UCX_TLS=mm -n 24 /projects/pMOSP/mpi/parallel_mandelbrot/parallel/mandelbrot
#module load openmpi/3.1.6-ucx;srun mpirun --mca btl self --mca pml ucx -x UCX_TLS=mm -n 24 /projects/pMOSP/mpi/parallel_mandelbrot/parallel/mandelbrot
#slurm
- { name: slurmd should be running, service: name=slurmd state=started, tags: [never,slurm,slurmd] }
- { name: munged should be running, service: name=munged state=started, tags: [never,slurm,munged] }
- { name: ensure connectivity to the controller, shell: scontrol ping, tags: [never,slurm,scontrol_ping] }
- { name: the most simple srun test, shell: srun --reservation=AWX hostname, tags: [never,slurm,srun_hostname] }
#nhc, manually run nhc because it contains many tests
- { name: run nhc explicitly, command: /opt/nhc-1.4.2/sbin/nhc -c /opt/nhc-1.4.2/etc/nhc/nhc.conf, become: true , tags: [never,slurm,nhc] }
# networking
- { name: ping license server, shell: ls, tags: [never,network,ping_license] }
- { name: ping something outside monash, command: ping -c 1 8.8.8.8, tags: [never,network,ping_external] }
#mounts
- hosts: 'ComputeNodes,LoginNodes'
gather_facts: false
tasks:
- { name: check mount for usr_local, shell: "mount | grep -q local", tags: [never,mountpoints,mountpoints_local] }
- { name: check mount for projects, shell: "lfs df -h", tags: [never,mountpoints_projects] }
- { name: check mount for home, shell: "mount | grep -q home", tags: [never,mountpoints,mountpoints_home] }
- { name: check mount for scratch, shell: "mount | grep -q scratch" , tags: [never,mountpoints_scratch] }
#misc
- { name: check singularity, shell: module load octave && octave --version, tags: [never,misc,singularity3] }
- { name: module test, shell: cmd="module load gcc" executable="/bin/bash", tags: [never,misc,modulecmd] }
- { name: contact ldap, shell: maybe test ldapsearch, failed_when: "TODO is TRUE", tags: [never,misc,ldap,TODO] }
#gpu
- hosts: 'VisNodes'
gather_facts: false
tasks:
- { name: run nvida-smi to see if a gpu driver is present, command: "/bin/nvidia-smi", tags: [never,gpu,smi] }
- { name: run gpu burn defaults to 30 seconds, command: "/usr/local/gpu_burn/1.0/run_silent.sh", tags: [never,gpu,long,gpuburn] }
# extended time-consuming tests
# relion see https://docs.massive.org.au/communities/cryo-em/tuning/tuning.html
# linpack
#module load openmpi/1.10.7-mlx;ldd /usr/local/openmpi/1.10.7-mlx/bin/* | grep -ic found
#!/usr/bin/python
import subprocess
import sys
def getTime():
print "How long do you think you need this computer for?"
print "If you need the computer for 2 days and 12 hours please enter as 2-12 or 2-12:00:00"
time=sys.stdin.readline().strip()
try:
(days,hours)=time.split('-')
except:
days=0
hours=time
try:
(hours,minues) = time.split(':')
except:
pass
return (days,hours)
def getNCPUs():
print "How many CPUs would you like?"
cpus=None
while cpus==None:
cpustr=sys.stdin.readline().strip()
try:
cpus=int(cpustr)
except:
print "Sorry I can't interpret %s as a number"%cpustr
print "How many CPUs would you like?"
return cpus
def getRAM():
print "How much RAM would you like (press enter for the default)?"
ramstr= sys.stdin.readline().strip()
while ramstr!=None and ramstr!="":
try:
ram=int(ramstr)
return ram
except:
print "Sorry I can't interpret %s as a number"%ramstr
print "How much RAM would you like?"
ramstr= sys.stdin.readline()
return None
def subjob(time,cpus,ram):
if ram==None:
ram=cpus*2000
import subprocess
scriptpath='/home/chines'
p=subprocess.Popen(['sbatch','--time=%s-%s'%(time[0],time[1]),'--nodes=1','--mincpu=%s'%cpus,'--mem=%s'%ram,'%s/mbpjob.sh'%scriptpath],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(stdout,stderr)=p.communicate()
import re
m=re.match('Submitted batch job (?P<jobid>[0-9]+)',stdout)
if m:
return m.groupdict()['jobid']
def isState(jobid,state='RUNNING'):
import re
p=subprocess.Popen(['scontrol','show','job','-d',jobid],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(stdout,stderr)=p.communicate()
jobidre=re.compile('JobId=(?P<jobid>[0-9]+)\s')
statere=re.compile('^\s+JobState=(?P<state>\S+)\s')
currentjobid=None
for l in stdout.splitlines():
m=jobidre.match(l)
if m:
currentjobid=m.groupdict()['jobid']
m=statere.match(l)
if m:
if m.groupdict()['state']==state:
if jobid==currentjobid:
return True
else:
if jobid==currentjobid:
return False
return False
def waitjob(jobid):
import time
while True:
if isState(jobid,'RUNNING'):
return
else:
print "job %s not running"%jobid
time.sleep(1)
def listJobs():
import re
r=[]
user = subprocess.check_output(['whoami'])
jobs = subprocess.check_output(['squeue','-u',user,'-h','-o','"%i %L %j %c"'])
jobre=re.compile("(?P<jobid>(?P<jobidNumber>[0-9]+)) (?P<time>\S+ (?P<jobname>\S+) (?P<cpus>[0-9]+))$"
for l in jobs.splitlines():
m=jobidre.search(l)
if m:
r.append(m.groupdict())
return r
def getNode(jobid):
import re
stdout=subprocess.check_output(['scontrol','show','job','-d',jobid])
for l in stdout.splitlines():
m=re.search('^\s+Nodes=(?P<nodelist>\S+)\s',l)
if m:
nodes=m.groupdict()['nodelist'].split(',')
return nodes[0]
def createJob(*args,**kwargs):
time=getTime()
#cpus=getNCPUs()
cpus=1
#ram=getRAM()
ram=None
subjob(time,cpus,ram)
def selectJob(jobidlist):
if len(jobidlist)==1:
return jobidlist[0]['jobid']
else:
print "Please select a job (or press enter to cancel)"
i=1
print "\tJob name\tNum CPUs\tRemaining Time"
for j in jobidlist:
print "%s\t%s\t%s\t%s"%(i,j['jobname'],j['numcpus'],j['time'])
try:
jobnum=int(sys.stdin.readline().strip())
if (jobnum>0 and jobnum<=jobidlist):
return jobidlist[jobnum-1]['jobid']
except:
pass
return None
def connect(*args,**kwargs):
jobidlist=listJobs()
jobid=selectJob(jobidlist)
if jobid!=None:
waitjob(jobid)
node=getNode(jobid)
print node
def stop(*args,**kwargs):
jobidlist=listJobs()
jobid=selectJob(jobidlist)
if jobid!=None:
stopjob(jobid)
def main():
import argparse
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers()
start = subparser.add_parser('start', help='alloate a node to the user')
start.set_defaults(func=createJob)
connect = subparser.add_parser('connect')
start.set_defaults(func=connect)
stop = subparser.add_parser('stop')
start.set_defaults(func=stop)
args = parser.parse_args()
args.func(args)
try:
jobidlist=listJobs()
if len(jobidlist)>1:
print "cancel all jobs here"
jobidlist=listJobs()
if len(jobidlist)==0:
time=getTime()
#cpus=getNCPUs()
cpus=1
#ram=getRAM()
ram=None
subjob(time,cpus,ram)
jobidlist=listJobs()
if len(jobidlist)==1:
jobid=jobidlist[0]
waitjob(jobid)
node=getNode(jobid)
print node
sys.exit(0)
except Exception as e:
print e
import traceback
print traceback.format_exc()
sys.exit(1)
main()
#!/bin/bash
mpbctrl='/home/hines/mbp_script/get_node.py'
node=$( $mbpctrl $1 )
if [[ $node ]]; then
ssh -t $node tmux attach-session
fi
---
- name: make sure /usr/local/bin exists
file: path=/usr/local/bin state=directory mode=755 owner=root
become: true
- name: install get_node.py
copy: src=get_node.py dest=/usr/local/bin/get_node.py mode=755 owner=root
become: true
- name: install mbp_node
copy: src=mbp_node dest=/usr/local/bin/mbp_node mode=755 owner=root
become: true
---
# This role is to fix a misconfiguration of some OpenStack Base images at Monash University.
# the misconfiguration is dev/vdb mounted in fstab of the Image and the Openstack Flavour not providing a second disk.
- name: unmount vdb if absent
mount:
path: "/mnt"
src: "/dev/vdb"
state: absent
become: true
when: 'hostvars[inventory_hostname]["ansible_devices"]["vdb"] is not defined'
- name: keep mnt present
file:
path: "/mnt"
owner: root
group: root
mode: "u=rwx,g=rx,o=rx"
state: directory
become: true
when: 'hostvars[inventory_hostname]["ansible_devices"]["vdb"] is not defined'
---
- name: restart openvpn
service: name=openvpn state=restarted
sudo: true
become: true
This diff is collapsed.
......@@ -3,6 +3,6 @@
include: installOpenVPN.yml
- name: "Start OpenVPN"
service: name=openvpn state=started
sudo: true
service: name=openvpn state=started enabled=yes
become: true
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
export PATH=$PATH:{{ additional_paths|join(":") }}
This diff is collapsed.