Commit 07032588 authored by Chris Hines's avatar Chris Hines
Browse files

Merge pull request #160 from l1ll1/master

massive pull request. Somehow history has been overwritten
parents b40e4a32 10a7c15d
#!/usr/bin/env python
import sys, os, string, subprocess, socket, re
import copy, shlex,uuid, random, multiprocessing, time, shutil, json
import novaclient.v1_1.client as nvclient
import novaclient.exceptions as nvexceptions
#import novaclient.v1_1.client as nvclient
#import novaclient.exceptions as nvexceptions
from keystoneclient.auth.identity import v2 as v2_auth
from heatclient import client as heat_client
#from heatclient import client as heat_client
#from novaclient import client as nova_client
#from cinderclient import client as cinder_client
import heatclient
import novaclient
import cinderclient
import heatclient.client
import novaclient.client
import cinderclient.client
import keystoneclient.client
from keystoneclient.auth.identity import v2
from keystoneclient import session
from novaclient import client
from keystoneclient import session as kssession
#NOVA_STANDALONE=True
NOVA_STANDALONE=False
class OpenStackConnection:
......@@ -18,82 +32,6 @@ class OpenStackConnection:
self.tenantID= os.environ['OS_TENANT_ID']
self.authUrl="https://keystone.rc.nectar.org.au:5000/v2.0"
def _get_keystone_v2_auth(self, v2_auth_url, **kwargs):
auth_token = kwargs.pop('auth_token', None)
tenant_id = kwargs.pop('project_id', None)
tenant_name = kwargs.pop('project_name', None)
if auth_token:
return v2_auth.Token(v2_auth_url, auth_token,
tenant_id=tenant_id,
tenant_name=tenant_name)
else:
return v2_auth.Password(v2_auth_url,
username=kwargs.pop('username', None),
password=kwargs.pop('password', None),
tenant_id=tenant_id,
tenant_name=tenant_name)
def _get_keystone_session(self, **kwargs):
# first create a Keystone session
cacert = kwargs.pop('cacert', None)
cert = kwargs.pop('cert', None)
key = kwargs.pop('key', None)
insecure = kwargs.pop('insecure', False)
timeout = kwargs.pop('timeout', None)
verify = kwargs.pop('verify', None)
# FIXME(gyee): this code should come from keystoneclient
if verify is None:
if insecure:
verify = False
else:
# TODO(gyee): should we do
# heatclient.common.http.get_system_ca_fle()?
verify = cacert or True
if cert and key:
# passing cert and key together is deprecated in favour of the
# requests lib form of having the cert and key as a tuple
cert = (cert, key)
return kssession.Session(verify=verify, cert=cert, timeout=timeout)
def _get_keystone_auth(self, session, auth_url, **kwargs):
# FIXME(dhu): this code should come from keystoneclient
# discover the supported keystone versions using the given url
v2_auth_url=auth_url
v3_auth_url=None
# Determine which authentication plugin to use. First inspect the
# auth_url to see the supported version. If both v3 and v2 are
# supported, then use the highest version if possible.
auth = None
if v3_auth_url and v2_auth_url:
user_domain_name = kwargs.get('user_domain_name', None)
user_domain_id = kwargs.get('user_domain_id', None)
project_domain_name = kwargs.get('project_domain_name', None)
project_domain_id = kwargs.get('project_domain_id', None)
# support both v2 and v3 auth. Use v3 if domain information is
# provided.
if (user_domain_name or user_domain_id or project_domain_name or
project_domain_id):
auth = self._get_keystone_v3_auth(v3_auth_url, **kwargs)
else:
auth = self._get_keystone_v2_auth(v2_auth_url, **kwargs)
elif v3_auth_url:
# support only v3
auth = self._get_keystone_v3_auth(v3_auth_url, **kwargs)
elif v2_auth_url:
# support only v2
auth = self._get_keystone_v2_auth(v2_auth_url, **kwargs)
else:
raise exc.CommandError(_('Unable to determine the Keystone '
'version to authenticate with using the '
'given auth_url.'))
return auth
def get_stack_name(self,stack):
stacks=[]
for s in self.hc.stacks.list():
......@@ -108,46 +46,30 @@ class OpenStackConnection:
raise Exception("You have multiple heat stacks in your OpenStack Project and I'm not sure which one to use.\n You can select a stack by symlinking to a stack, for example if you have a stack called mycluster do ln -s %s mycluster\n"%stack)
def auth(self):
self.nc = nvclient.Client( auth_url=self.authUrl,
username=self.username,
api_key=self.passwd,
project_id=self.tenantName,
tenant_id=self.tenantID,
service_type="compute"
)
kwargs = {
'insecure': False,
}
keystone_session = self._get_keystone_session(**kwargs)
kwargs = {
'username': self.username,
'password': self.passwd,
'project_id': self.tenantID,
'project_name': self.tenantName
'tenant_id': self.tenantID,
'auth_url':self.authUrl,
}
keystone_auth = self._get_keystone_auth(keystone_session,
self.authUrl,
**kwargs)
endpoint = keystone_auth.get_endpoint(keystone_session,service_type='orchestration', region_name=None)
auth = v2.Password(**kwargs)
sess = session.Session(auth=auth)
kwargs = {
'username': self.username,
'include_pass': False,
'session': keystone_session,
'auth_url': self.authUrl,
'region_name': '',
'endpoint_type': 'publicURL',
'service_type': 'orchestration',
'password': self.passwd,
'auth': keystone_auth,
'session':sess,
}
api_version='2'
self.nc = novaclient.client.Client(api_version, session=sess)
api_version=1
endpoint="https://heat.rc.nectar.org.au:8004/v1/%s"%self.tenantID
self.hc = heatclient.client.Client(api_version, endpoint, session=sess)
self.hc = heat_client.Client(api_version, endpoint, **kwargs)
api_version=1
self.cc = cinderclient.client.Client(api_version, session=sess)
def recurse_resources(self,stack,resource):
......@@ -170,6 +92,7 @@ class OpenStackConnection:
instance_ids.extend(self.recurse_resources(stack=i,resource=r))
nc=self.nc
cc=self.cc
inventory = {}
inventory['_meta'] = { 'hostvars': {} }
for server in nc.servers.list():
......@@ -190,6 +113,7 @@ class OpenStackConnection:
inventory[server.metadata['ansible_host_group']].append(hostname)
else:
inventory[server.metadata['ansible_host_group']] = [hostname]
#print dir(server)
# Set the other host variables
inventory['_meta']['hostvars'][hostname] = {}
inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = server.networks.values()[0][0]
......@@ -198,6 +122,13 @@ class OpenStackConnection:
if 'ansible_ssh' in key:
inventory['_meta']['hostvars'][hostname][key] = server.metadata[key]
inventory['_meta']['hostvars'][hostname]['ansible_ssh_user'] = 'ec2-user'
for vol in server.to_dict()['os-extended-volumes:volumes_attached']:
for cv in cc.volumes.findall():
if cv.id == vol['id']:
devname = '/dev/disk/by-id/virtio-'+cv.id[0:20]
if not 'ansible_host_volumes' in inventory['_meta']['hostvars'][hostname]:
inventory['_meta']['hostvars'][hostname]['ansible_host_volumes']={}
inventory['_meta']['hostvars'][hostname]['ansible_host_volumes'][cv.display_name]={'uuid':vol['id'],'dev':devname}
print json.dumps(inventory)
if __name__ == "__main__":
......
......@@ -3,6 +3,6 @@
include: installOpenVPN.yml
- name: "Start OpenVPN"
service: name=openvpn state=started
service: name=openvpn state=started enabled=yes
sudo: true
......@@ -3,5 +3,5 @@
include: installOpenVPN.yml
- name: "Start OpenVPN"
service: name=openvpn state=started
service: name=openvpn state=started enabled=yes
sudo: true
......@@ -29,6 +29,6 @@
-
name: "Starting Apache2"
service: name=apache2 state=started
service: name=apache2 state=started enabled=yes
sudo: true
......@@ -4,3 +4,10 @@
- name: fetch slurm.conf
fetch: src=/tmp/slurm.conf dest=files/slurm.conf flat=yes
- name: "Templating slurmdbd.conf"
template: src=slurmdbd.conf.j2 dest=/tmp/slurmdbd.conf owner=root group=root mode=644
sudo: true
- name: fetch slurm.conf
fetch: src=/tmp/slurmdbd.conf dest=files/slurmdbd.conf flat=yes
......@@ -10,6 +10,9 @@
#
ClusterName={{ clustername }}
ControlMachine={{ slurmctrl }}
{% if slurmctrlbackup is defined %}
BackupController={{ slurmctrlbackup }}
{% endif %}
#ControlAddr=
#BackupController=
#BackupAddr=
......@@ -27,7 +30,7 @@ SwitchType=switch/none
MpiDefault=pmi2
SlurmctldPidFile={{ slurmpiddir }}/slurmctld.pid
SlurmdPidFile={{ slurmpiddir }}/slurmd.pid
ProctrackType=proctrack/linuxproc
ProctrackType=proctrack/cgroup
#PluginDir=
CacheGroups=0
#FirstJobId=
......@@ -121,7 +124,10 @@ Epilog={{ slurmjob.epilog }}
#JobAcctGatherFrequency=30
#
AccountingStorageType=accounting_storage/slurmdbd
AccountingStorageHost={{ slurmctrl }}
AccountingStorageHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
AccountingStorageBackupHost={{ slurmdbdbackup }}
{% endif %}
#AccountingStorageEnforce=limits,safe
#AccountingStorageLoc=
#AccountingStoragePass=
......
......@@ -17,7 +17,10 @@ AuthType=auth/munge
#
# slurmDBD info
#DbdAddr=
DbdHost={{ slurmctrl }}
DbdHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
DbdBackupHost={{ slurmdbdbackup }}
{% endif %}
#DbdPort=7031
SlurmUser=slurm
#MessageTimeout=300
......@@ -36,7 +39,7 @@ PidFile=/var/run/slurmdbd.pid
#
# Database info
StorageType=accounting_storage/mysql
StorageHost=localhost
StorageHost={{ mysql_host }}
#StoragePort=1234
StoragePass={{ slurmdb_passwd }}
StorageUser=slurmdb
......
---
- include_vars: "{{ ansible_os_family }}.yml"
- name: Install epel-release
yum: name=epel-release-7-5.noarch state=present
sudo: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: Enable epel
command: yum-config-manager --enable epel
sudo: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: install lua
yum: name={{ item }} state=installed
with_items:
......
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
---
- name: add repo
copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
sudo: true
when: ansible_os_family == 'RedHat'
- name: install gluster
yum: name={{ item }} state='latest'
when: ansible_os_family == 'RedHat'
with_items:
- glusterfs-client
sudo: true
- name: install gluster
apt: name=glusterfs-client state='latest'
when: ansible_os_family == 'Debian'
sudo: true
- name: mount volume
#mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,_netdev,backupvolfile-server={{ gluster_servers[1] }}"
mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,backupvolfile-server={{ gluster_servers[1] }},noauto,comment=systemd.automount"
sudo: true
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
---
- name: add repo
copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
sudo: true
when: ansible_os_family == 'RedHat'
- name: install gluster
yum: name={{ item }} state='latest'
when: ansible_os_family == 'RedHat'
with_items:
- glusterfs
- glusterfs-server
sudo: true
- name: install gluster
apt: name=glusterfs-server state='latest'
when: ansible_os_family == 'Debian'
sudo: true
- name: start daemon
service: name=glusterd enabled=yes state=started
sudo: true
when: ansible_os_family == 'RedHat'
- name: start daemon
service: name=glusterfs-server enabled=yes state=started
sudo: true
when: ansible_os_family == 'Debian'
- name: make server list
set_fact:
server_list: "{{ gluster_servers|join(',') }}"
- name: echo server list
debug: var=server_list
- name: make brick dir
file: state=directory path="{{ brickmnt }}/brick"
sudo: true
- name: create volume
gluster_volume:
name: "{{ volname }}"
brick: "{{ brickmnt }}/brick"
cluster: "{{ server_list }}"
replicas: "{{ replicas }}"
state: present
sudo: true
run_once: true
......@@ -40,7 +40,5 @@
notify: restart sssd
- name: "start sssd"
service: name=sssd state=started
service: name=sssd state=started enabled=yes
sudo: true
---
- name: stat usrlocal
- name: stat
stat: path={{ dest }}
register: stat_usrlocal
register: stat_r
- name: mv
command: mv /usr/local /usr/local_old
when: stat_usrlocal.stat.isdir == True
command: mv "{{ dest }}" "{{ dest }}_old"
when: stat_r.stat.exists and stat_r.stat.isdir
sudo: true
- name: stat
stat: path={{ dest }}
register: stat_r
- name: link
file: src="{{ src }}" dest="{{ dest }}" state=link
when: not stat_r.stat.exists
sudo: true
......@@ -12,6 +12,17 @@
# sudo: true
# when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: Install epel-release
yum: name=epel-release-7-5.noarch state=present
sudo: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: Enable epel
command: yum-config-manager --enable epel
sudo: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: install lua
yum: name={{ item }} state=installed
with_items:
......
---
- include_vars: "{{ ansible_distribution }}_{{ ansible_distribution_major_version }}.yaml"
- name: copy rpms/debs
copy: dest=/tmp/ src=lustre-install/{{ item }}
with_items:
"{{ lustre_pkgs }}"
#- name: install rpms
# yum: name="/tmp/{{ item }}"
# sudo: true
# with_items: "{{ lustre_pkgs }}"
- name: install rpms
yum: name=/tmp/lustre-client-modules-2.7.0-3.10.0_229.14.1.el7.x86_64.x86_64.rpm
sudo: true
when: ansible_os_family == "RedHat"
- name: install rpms
yum: name=/tmp/lustre-client-2.7.0-3.10.0_229.14.1.el7.x86_64.x86_64.rpm
sudo: true
when: ansible_os_family == "RedHat"
# instructions to build these debs:
# Instantiate an Ubuntu 14.04 instance
# git clone git://git.hpdd.intel.com/fs/lustre-release.git
# cd lustre-release
# optionally git checkout 0754bc8f2623bea184111af216f7567608db35b6 <- I know this commit works on Ubuntu, but I had a lot of trouble with other branches
# sh autogen.sh
# ./configure --enable-dist --disable-doc --disable-server --disable-dependency-tracking --with-o2ib=/var/lib/dkms/mlnx-ofed-kernel/3.1/build/
# mkdir BUILD
# cd BUILD
# ln -s ../lustre-2.7.62.tar.gz lustre-2.7.62.orig.tar.gz
# tar zxvf ../lustre-2.7.62.tar.gz
# cd lustre-2.7.62
# ./configure --disable-doc --disable-server --disable-dependency-tracking --with-o2ib=/var/lib/dkms/mlnx-ofed-kernel/3.1/build/
# vi debian/changelog (the version number on the first line is incorrect)
# make debs
#
- name: install debs
apt: name="/tmp/{{ item }}"
sudo: true
with_items: "{{ lustre_pkgs }}"
when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version == "14"
- name: "Mount lustre filesystems"
mount: name="{{ item.mntpt }}" src="{{ item.servers }}"/"{{ item.src }}" state="mounted" fstype="lustre" opts="_netdev,flock"
sudo: true
with_items: "{{ mntlist }}"
---
lustre_pkgs:
- lustre-client-modules-2.7.0-3.10.0_229.14.1.el7.x86_64.x86_64.rpm
- lustre-client-2.7.0-3.10.0_229.14.1.el7.x86_64.x86_64.rpm
---
lustre_pkgs:
- linux-patch-lustre_2.7.62-1_all.deb
- lustre-client-modules-3.13.0-58-generic_2.7.62-1_amd64.deb
- lustre-utils_2.7.62-1_amd64.deb
---
- name: Format File Systems
filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
with_items: mkFileSystems
- name: format volumes
filesystem: fstype={{ item.fstype }} dev={{ hostvars[ansible_hostname]['ansible_host_volumes'][item.name]['dev'] }}
with_items: volumes
sudo: true
when: mkFileSystems is defined
- name: Mount device
mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
with_items: mountFileSystems
- name: format volumes
mount: name={{ item.mntpt }} fstype={{ item.fstype }} src={{ hostvars[ansible_hostname]['ansible_host_volumes'][item.name]['dev'] }} state=mounted
with_items: volumes
sudo: true
when: mountFileSystems is defined
- name: symlink volumes
file: force=yes state=link src="{{ item.mntpt }}" path="{{ item.linkto }}"
when: item.linkto is defined
with_items: volumes
sudo: true
#- name: Format File Systems
# filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
# with_items: mkFileSystems
# sudo: true
# when: mkFileSystems is defined
#
#- name: Mount device
# mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
# with_items: mountFileSystems
# sudo: true
# when: mountFileSystems is defined
#
#
#!/bin/sh
# A CRUDE Script to install Mellanox OFED drivers
# Philip.Chan@monash.edu
#
# TODO: check if MLNX_OFED is already installed!
# TODO: check kernel...
KERN=`uname -r`
if [ "$KERN" != "3.10.0-229.14.1.el7.x86_64" ]
then
echo "Oops! Did you forget to reboot?"
echo "Kernel version has to be 3.10.0-229.14.1.el7.x86_64"
exit 1
fi
sudo yum install -y pciutils gcc-gfortran libxml2-python tcsh libnl lsof tcl tk perl
sudo yum install -y gtk2 atk cairo
tar xzvf MLNX_OFED_LINUX-3.1-1.0.3-rhel7.1-x86_64-ext.tgz
cd MLNX_OFED_LINUX-3.1-1.0.3-rhel7.1-x86_64-ext
sudo ./mlnxofedinstall -q