Skip to content
Snippets Groups Projects
Commit 006076ac authored by Chris Hines's avatar Chris Hines
Browse files

Merge pull request #148 from l1ll1/master

gluster roles and fixes to log rotate
parents 14246468 9f549a33
No related branches found
No related tags found
No related merge requests found
Showing
with 216 additions and 38 deletions
#!/usr/bin/env python
import sys, os, string, subprocess, socket, re
import copy, shlex,uuid, random, multiprocessing, time, shutil, json
import novaclient.v1_1.client as nvclient
import novaclient.exceptions as nvexceptions
#import novaclient.v1_1.client as nvclient
#import novaclient.exceptions as nvexceptions
from keystoneclient.auth.identity import v2 as v2_auth
from heatclient import client as heat_client
#from heatclient import client as heat_client
#from novaclient import client as nova_client
#from cinderclient import client as cinder_client
import heatclient
import novaclient
import cinderclient
import heatclient.client
import novaclient.client
import cinderclient.client
from keystoneclient import session as kssession
......@@ -108,13 +116,13 @@ class OpenStackConnection:
raise Exception("You have multiple heat stacks in your OpenStack Project and I'm not sure which one to use.\n You can select a stack by symlinking to a stack, for example if you have a stack called mycluster do ln -s %s mycluster\n"%stack)
def auth(self):
self.nc = nvclient.Client( auth_url=self.authUrl,
username=self.username,
api_key=self.passwd,
project_id=self.tenantName,
tenant_id=self.tenantID,
service_type="compute"
)
# self.nc = nvclient.Client( auth_url=self.authUrl,
# username=self.username,
# api_key=self.passwd,
# project_id=self.tenantName,
# tenant_id=self.tenantID,
# service_type="compute"
# )
kwargs = {
'insecure': False,
}
......@@ -131,23 +139,28 @@ class OpenStackConnection:
self.authUrl,
**kwargs)
endpoint = keystone_auth.get_endpoint(keystone_session,service_type='orchestration', region_name=None)
endpoint = keystone_auth.get_endpoint(keystone_session,service_type='compute', region_name=None)
kwargs = {
'auth': keystone_auth,
}
api_version='2'
self.nc = novaclient.client.Client(api_version, endpoint, **kwargs)
endpoint = keystone_auth.get_endpoint(keystone_session,service_type='orchestration', region_name=None)
kwargs = {
'username': self.username,
'include_pass': False,
'session': keystone_session,
'auth_url': self.authUrl,
'region_name': '',
'endpoint_type': 'publicURL',
'service_type': 'orchestration',
'password': self.passwd,
'auth': keystone_auth,
}
api_version=1
self.hc = heatclient.client.Client(api_version, endpoint, **kwargs)
self.hc = heat_client.Client(api_version, endpoint, **kwargs)
endpoint = keystone_auth.get_endpoint(keystone_session,service_type='volume', region_name=None)
kwargs = {
# 'session': keystone_session,
'auth': keystone_auth,
}
api_version=1
self.cc = cinderclient.client.Client(api_version, endpoint, **kwargs)
def recurse_resources(self,stack,resource):
......@@ -170,6 +183,7 @@ class OpenStackConnection:
instance_ids.extend(self.recurse_resources(stack=i,resource=r))
nc=self.nc
# cc=self.cc
inventory = {}
inventory['_meta'] = { 'hostvars': {} }
for server in nc.servers.list():
......@@ -190,6 +204,9 @@ class OpenStackConnection:
inventory[server.metadata['ansible_host_group']].append(hostname)
else:
inventory[server.metadata['ansible_host_group']] = [hostname]
#print dir(server)
if len(server.to_dict()['os-extended-volumes:volumes_attached']) >0:
pass
# Set the other host variables
inventory['_meta']['hostvars'][hostname] = {}
inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = server.networks.values()[0][0]
......
......@@ -4,3 +4,10 @@
- name: fetch slurm.conf
fetch: src=/tmp/slurm.conf dest=files/slurm.conf flat=yes
- name: "Templating slurmdbd.conf"
template: src=slurmdbd.conf.j2 dest=/tmp/slurmdbd.conf owner=root group=root mode=644
sudo: true
- name: fetch slurm.conf
fetch: src=/tmp/slurmdbd.conf dest=files/slurmdbd.conf flat=yes
......@@ -10,6 +10,9 @@
#
ClusterName={{ clustername }}
ControlMachine={{ slurmctrl }}
{% if slurmctrlbackup is defined %}
BackupController={{ slurmctrlbackup }}
{% endif %}
#ControlAddr=
#BackupController=
#BackupAddr=
......@@ -121,7 +124,10 @@ Epilog={{ slurmjob.epilog }}
#JobAcctGatherFrequency=30
#
AccountingStorageType=accounting_storage/slurmdbd
AccountingStorageHost={{ slurmctrl }}
AccountingStorageHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
AccountingStorageBackupHost={{ slurmdbdbackup }}
{% endif %}
#AccountingStorageEnforce=limits,safe
#AccountingStorageLoc=
#AccountingStoragePass=
......
......@@ -17,7 +17,10 @@ AuthType=auth/munge
#
# slurmDBD info
#DbdAddr=
DbdHost={{ slurmctrl }}
DbdHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
DbdBackupHost={{ slurmdbdbackup }}
{% endif %}
#DbdPort=7031
SlurmUser=slurm
#MessageTimeout=300
......@@ -36,7 +39,7 @@ PidFile=/var/run/slurmdbd.pid
#
# Database info
StorageType=accounting_storage/mysql
StorageHost=localhost
StorageHost={{ mysql_host }}
#StoragePort=1234
StoragePass={{ slurmdb_passwd }}
StorageUser=slurmdb
......
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
---
- name: add repo
copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
sudo: true
when: ansible_os_family == 'RedHat'
- name: install gluster
yum: name={{ item }} state='latest'
when: ansible_os_family == 'RedHat'
with_items:
- glusterfs-client
sudo: true
- name: install gluster
apt: name=glusterfs state='latest'
when: ansible_os_family == 'Debian'
sudo: true
- name: mount volume
#mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,_netdev,backupvolfile-server={{ gluster_servers[1] }}"
mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,backupvolfile-server={{ gluster_servers[1] }},noauto,comment=systemd.automount"
sudo: true
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
---
- name: add repo
copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
sudo: true
when: ansible_os_family == 'RedHat'
- name: install gluster
yum: name={{ item }} state='latest'
when: ansible_os_family == 'RedHat'
with_items:
- glusterfs
- glusterfs-server
sudo: true
- name: install gluster
apt: name=glusterfs state='latest'
when: ansible_os_family == 'Debian'
sudo: true
- name: start daemon
service: name=glusterd enabled=yes state=started
sudo: true
- name: make server list
set_fact:
server_list: "{{ gluster_servers|join(',') }}"
- name: echo server list
debug: var=server_list
- name: make brick dir
file: state=directory path="{{ brickmnt }}/brick"
sudo: true
- name: create volume
gluster_volume:
name: "{{ volname }}"
brick: "{{ brickmnt }}/brick"
cluster: "{{ server_list }}"
replicas: "{{ replicas }}"
state: present
sudo: true
run_once: true
---
- name: stat usrlocal
- name: stat
stat: path={{ dest }}
register: stat_usrlocal
register: stat_r
- name: mv
command: mv /usr/local /usr/local_old
when: stat_usrlocal.stat.isdir == True
command: mv "{{ dest }}" "{{ dest }}_old"
when: stat_r.stat.exists and stat_r.stat.isdir
sudo: true
- name: stat
stat: path={{ dest }}
register: stat_r
- name: link
file: src="{{ src }}" dest="{{ dest }}" state=link
when: not stat_r.stat.exists
sudo: true
---
- name: Format File Systems
filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
with_items: mkFileSystems
- name: format volumes
filesystem: fstype={{ item.fstype }} dev={{ hostvars[ansible_hostname]['ansible_host_volumes'][item.name]['dev'] }}
with_items: volumes
sudo: true
when: mkFileSystems is defined
- name: Mount device
mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
with_items: mountFileSystems
- name: format volumes
mount: name={{ item.mntpt }} fstype={{ item.fstype }} src={{ hostvars[ansible_hostname]['ansible_host_volumes'][item.name]['dev'] }} state=mounted
with_items: volumes
sudo: true
when: mountFileSystems is defined
- name: symlink volumes
file: force=yes state=link src="{{ item.mntpt }}" path="{{ item.linkto }}"
when: item.linkto is defined
with_items: volumes
sudo: true
#- name: Format File Systems
# filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
# with_items: mkFileSystems
# sudo: true
# when: mkFileSystems is defined
#
#- name: Mount device
# mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
# with_items: mountFileSystems
# sudo: true
# when: mountFileSystems is defined
#
#
---
- name: make dir
file: path="{{ provision | dirname }}" state=directory mode=755 owner=root
sudo: true
- name: copy provision template
template: src=provision.sh.j2 dest={{ provision }} mode=755 owner=root
sudo: true
......
......@@ -32,7 +32,7 @@ for user in ${user_list[*]}; do
if [ -z "${find}" ]; then
su slurm -c "$sacctmgr -i add account ${account} Description=CVL Organization=monash cluster=${cluster}" || { echo "error to create account ${account}" >> ${log_file} && exit 1; }
fi
find=$(sacctmgr list user ${user} | grep ${user})
find=$(sacctmgr list user --noheader -p ${user} | grep ${user})
if [ -z "${find}" ]; then
su slurm -c "$sacctmgr -i add user ${user} account=${account} cluster=${cluster}" || { echo "error to create user ${user}" >> ${log_file} && exit 1; }
fi
......
---
slurm_provision: "/usr/local/sbin/slurm_provision.sh"
home_dir: "/home"
provision: "/usr/local/sbin/provision.sh"
......@@ -22,13 +22,13 @@
sudo: true
- name: install slurmdb.conf
template: src=slurmdbd.conf.j2 dest={{ slurm_dir }}/etc/slurmdbd.conf
copy: src=files/slurmdbd.conf dest={{ slurm_dir }}/etc/slurmdbd.conf
sudo: true
when: slurm_dir is defined
- name: install slurmdbd.conf
template: src=slurmdbd.conf.j2 dest=/etc/slurm/slurmdbd.conf
copy: src=slurmdbd.conf dest=/etc/slurm/slurmdbd.conf
sudo: true
when: slurm_dir is not defined
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment