Skip to content
Snippets Groups Projects
Commit d8b838a5 authored by Gin Tan's avatar Gin Tan
Browse files

Merge branch 'cicd' into 'master'

Cicd

See merge request hpc-team/ansible_cluster_in_a_box!274
parents 7d99257c 93fd520f
No related branches found
No related tags found
4 merge requests!290Cron,!289Cron user,!284Cicd,!274Cicd
*.retry
# copied from luhan
extends: default
rules:
braces:
level: warning
max-spaces-inside: 1
brackets:
level: warning
max-spaces-inside: 1
colons:
level: warning
commas:
level: warning
comments: disable
comments-indentation: disable
document-start: disable
empty-lines:
level: warning
hyphens:
level: warning
indentation:
level: warning
indent-sequences: consistent
line-length:
level: warning
allow-non-breakable-inline-mappings: true
truthy: disable
trailing-spaces:
level: warning
---
extends: default
rules:
braces: {min-spaces-inside: 0, max-spaces-inside: 1}
brackets: {min-spaces-inside: 0, max-spaces-inside: 1}
comments: disable
comments-indentation: disable
document-start: disable
indentation: disable
line-length: disable
\ No newline at end of file
- hosts: 'all'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
tasks:
- { name: set use shared state, set_fact: usesharedstatedir=False }
- { name: set hostgroup, set_fact: hostgroup='ComputeNodes' }
tags: [ always ]
- hosts: 'all'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
roles:
# - { role: disable_selinux, tags: [ disableselinux ] }
- { role: upgrade }
- { role: set_password }
- { role: etcHosts, tags: [ networking ] }
# - { role: config_repos, tags: [ repos ] }
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,ManagementNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
roles:
- { role: disable_selinux, tags: [ disableselinux ] }
- { role: ldapclient, tags: [ authentication ] }
- { role: ssh-password-login, tags: [ authentication ] }
- { role: enable_sudo_group, tags: [ authentication, sudo ] }
- { role: move_homedir }
- { role: calculateKnownHosts, tags: [ calculateKnownHosts ] }
- { role: SSHKnownHosts, tags: [ known_hosts ] }
- { role: jasons_ssh_ca, tags: [ ssh_ca ] }
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,VisNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
tasks:
- { name: set use shared state, set_fact: usesharedstatedir=False }
tags: [ always ]
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
roles:
- { role: move_homedir, tags: [ authentication, filesystems ] }
- { role: nfs-client, nfsMounts: "{{ computeNfsMounts }}", tags: [ filesystems ] }
- { role: slurm-common, tags: [ slurm, slurm-common ] }
- { role: lmod, tags: [ other ] }
- { role: enable_modules, default_modules: "lmod", tags: [ other ] }
- { role: postfix, tags: [ mail, other ] }
- hosts: 'VisNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
roles:
- { role: gpu, tags: [ gpu ] }
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
roles:
- { role: slurm_config, tags: [slurm, slurm_config] }
- hosts: 'DesktopNodes,ComputeNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
roles:
- { role: slurm-start, start_slurmd: True, tags: [ slurm, slurmstart ] }
- { role: mate-de-install, tags: [ mate-de-install ] } # TODO this crashes for everything except cmca
../files
\ No newline at end of file
---
- hosts: 'all'
tasks:
- include_vars: vars/passwords.yml
- include_vars: vars/names.yml
- include_vars: vars/ldapConfig.yml
- include_vars: vars/filesystems.yml
- include_vars: vars/slurm.yml
- include_vars: vars/vars.yml
- hosts: 'all'
tasks:
- { name: setup, setup: }
- hosts: 'ManagementNodes'
roles:
- { role: calculateSlurmConf }
---
# just calculates an etc hosts
- hosts: 'all'
tasks:
- include_vars: vars/passwords.yml
- include_vars: vars/names.yml
- include_vars: vars/ldapConfig.yml
- include_vars: vars/filesystems.yml
- include_vars: vars/slurm.yml
- include_vars: vars/vars.yml
- hosts: 'all'
tasks:
- { name: setup, setup: }
- hosts: 'ManagementNodes'
roles:
- { role: calculateEtcHosts }
#- hosts: 'NFSNodes'
# roles:
# - { role: calculateExports }
# Basic stuff to make the nodes functionl
# i.e. upgrade operating systems, etc
#
- hosts: 'ManagementNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
tasks:
# - { name: set hostgroup, set_fact: hostgroup='ManagementNodes' }
- { name: set use shared state, set_fact: usesharedstatedir=True }
tags: [ always ]
- hosts: 'ManagementNodes'
strategy: free
gather_facts: False
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
roles:
# - { role: ldapclient, tags: [ authentication ] }
# - { role: ssh-password-login }
# - { role: enable_sudo_group }
# - { role: make_filesystems, volumes: "{{ glustervolumes }}" }
# - { role: gluster_server, volname: "gv", brickmnt: '/gbrick', gluster_servers: "{{ groups['ManagementNodes'] }}", replicas: 2, tags: [ gluster_server ] }
# - { role: gluster_volcreate, volname: "gv", gluster_servers: "{{ groups['ManagementNodes'] }}", brickmnt: '/gbrick', replicas: 2 }
# - { role: gluster_client, volname: "gv", gluster_servers: ['mgmt0','mgmt1','sql0'], volmnt: '/glusterVolume' }
- { role: nfs-client, nfsMounts: "{{ mgmtNfsMounts }}", tags: [ nfs ] }
- { role: slurmdb-config, tags: [ slurm, slurmdb-config ] }
- { role: slurm-common, tags: [ slurm, slurm-common ] }
- { role: slurm_config, tags: [ slurm, slurm-config ] }
- { role: slurm-start, start_slurmdbd: True, start_slurmctld: True, tags: [ slurm-start ] }
# - { role: provision_slurm, use_active_directory: False, lockpath: "/mnt/home", tags: [ slurm ] }
# - { role: provision_homedir, use_active_directory: False, mntpt: "/mnt/home", tags: [ provisioning ] }
# Role to initialize nfs and SQL Nodes
#
#
- hosts: 'all'
tasks:
- { name: setup, setup: }
tags: [ always ]
#we need this here to gather facts and fill required variables.
- hosts: 'ManagementNodes'
gather_facts: True
tasks:
- include_vars: vars/passwords.yml
- include_vars: vars/names.yml
- include_vars: vars/ldapConfig.yml
- include_vars: vars/filesystems.yml
- include_vars: vars/slurm.yml
- include_vars: vars/vars.yml
- { name: set hostgroup, set_fact: hostgroup='ManagementNodes' }
- { name: set use shared state, set_fact: usesharedstatedir=True }
tags: [ always ]
- hosts: 'SQLNodes,NFSNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
pre_tasks:
- { name: set hostgroup, set_fact: hostgroup='SQLNodes', tags: [ always ] }
- { name: set use shared state, set_fact: usesharedstatedir=True, tags: [ always ] }
- hosts: 'SQLNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
gather_facts: True
roles:
- { role: upgrade, tags: [ upgrade ] }
- { role: make_filesystems, volumes: "{{ dbvolumes }}" }
- { role: mysql, mysql_type: mysql_server, mysql_root_password: "{{ sqlrootPasswd }}", mysql_user_name: slurmdb, mysql_user_db_name: slurm_acct_db, mysql_user_hosts_group: "{{ groups['ManagementNodes'] }}", mysql_user_password: "{{ slurmdb_passwd }}", tags: [ database ] }
- { role: slurm-mysql-config, tags: [database,slurmdb] }
tags: [ sql ]
- hosts: 'NFSNodes'
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
strategy: free
gather_facts: False
roles:
- { role: make_filesystems, volumes: "{{ nfsvolumes }}" }
tasks:
- { name: make homedir, file: { path: /nfsvol/home, state: directory }, become: true, become_user: root }
- { name: make usr_local, file: { path: /nfsvol/usr_local_centos7, state: directory }, become: true, become_user: root }
- { name: make projects, file: { path: /nfsvol/projects, state: directory }, become: true, become_user: root }
- { name: make projects, file: { path: /nfsvol/scratch, state: directory }, become: true, become_user: root }
tags: [ nfs ]
- hosts: 'NFSNodes'
strategy: free
gather_facts: False
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
roles:
- { role: nfs-server }
tags: [ nfs ]
../roles
\ No newline at end of file
../vars
\ No newline at end of file
#!/usr/bin/env python
import sys, os, string, socket, re
import shlex, multiprocessing, time, shutil, json
from novaclient import client as nvclient
from cinderclient import client as cdclient
import novaclient.exceptions as nvexceptions
from keystoneclient import client as ksclient
from joblib import Parallel, delayed
from multiprocessing import Process, Manager, Pool
def gatherInfo(md_key,md_value,authDict,project_id,inventory):
## Fetch the Nova Object
from keystoneclient import client as ksclient
from keystoneauth1.identity import v3
from keystoneauth1 import session
auth = v3.Password(project_id=project_id,**authDict)
sess = session.Session(auth=auth)
nc = nvclient.Client('2.0',session=sess)
cc = cdclient.Client('2.0',session=sess)
for server in nc.servers.list():
if server.metadata and \
'ansible_host_groups' in server.metadata and \
md_key in server.metadata:
if server.metadata[md_key].strip() != md_value.strip(): continue
unwantedChars = """][")("""
rgx = re.compile('[%s]' % unwantedChars)
ansible_groups = rgx.sub('', server.metadata['ansible_host_groups']).split(',')
hostname = server.name
novaVolumes = cc.volumes.list(server.id)
# Set Ansible Host Group
for group in ansible_groups:
groupName = group.strip()
if groupName not in inventory: inventory[groupName] = []
inventory[groupName].append(hostname)
# Add other metadata
for md in server.metadata.items():
if md[0] not in (md_key,'ansible_host_groups'):
inventory['_meta']['hostvars'][hostname] = { md[0]:md[1] }
if novaVolumes:
volDict = {}
for volume in novaVolumes:
try:
if volume.attachments[0]['server_id'] == server.id:
volDict[volume.name] = {'dev':'/dev/disk/by-id/virtio-' + volume.id[:20],'uuid':volume.id}
except IndexError:
continue
if volDict: inventory['_meta']['hostvars'][hostname]['ansible_host_volumes'] = volDict
network_name=None
if len(list(server.networks.keys())) > 1:
for nn in server.networks.keys():
if 'internal' in nn:
network_name = nn
if network_name == None:
network_name = list(server.networks.keys())[0]
inventory['_meta']['hostvars'][hostname]['ansible_host'] = server.networks[network_name][0]
else:
continue
return inventory
def merge(i,j):
for k in i.keys():
v=i[k]
if k in j:
if isinstance(v,list):
j[k].extend(v)
if isinstance(v,dict):
merge(i[k],j[k])
else:
j[k]=i[k]
if __name__ == "__main__":
inventory = {}
inventory['_meta'] = { 'hostvars': {} }
authDict={}
try:
authDict['auth_url'] = os.environ['OS_AUTH_URL']
authDict['username'] = os.environ['OS_USERNAME']
authDict['password'] = os.environ['OS_PASSWORD']
authDict['user_domain_name'] = os.environ['OS_USER_DOMAIN_NAME']
except KeyError:
print("Env Variables not set, Please run: source <openstack rc file>")
sys.exit()
if sys.argv[1] == "static":
static=True
md_key="project_name"
md_value=sys.argv[2]
else:
static=False
md_key="project_name"
md_value=sys.argv[1]
from keystoneclient import client as ksclient
import keystoneclient
from keystoneauth1.identity import v3
from keystoneauth1 import session
# auth = v3.Password(username=userName, password=passwd, auth_url=authUrl,user_domain_name=domainName)
auth = v3.Password(unscoped=True,**authDict)
sess = session.Session(auth=auth)
kc = ksclient.Client(session=sess)
kc.include_metadata = False
authmgr = keystoneclient.v3.auth.AuthManager(kc)
projects = authmgr.projects()
enabled_projects = [ x for x in projects if x.enabled ]
inventory_list = Parallel(n_jobs=len(projects))(delayed(gatherInfo) (md_key,md_value, authDict, proj.id, inventory) for proj in enabled_projects)
inventory={}
for i in inventory_list:
merge(i,inventory)
if not inventory['_meta']['hostvars']:
print("I could not find any resouces tagged with {}: {}".format(md_key,md_value))
else:
if static:
print( "#!/bin/bash\necho '"+json.dumps(inventory,indent=4)+"'")
else:
print(json.dumps(inventory))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment