Skip to content
Snippets Groups Projects
Commit 2c572442 authored by Jupiter Hu's avatar Jupiter Hu
Browse files

fixed conflict

parents 9ece1350 91d3cb3a
No related branches found
No related tags found
No related merge requests found
Showing
with 253 additions and 137 deletions
#!/usr/bin/env python
import sys, os, string, subprocess, socket, re
import copy, shlex,uuid, random, multiprocessing, time, shutil, json
import novaclient.v1_1.client as nvclient
import novaclient.exceptions as nvexceptions
#import novaclient.v1_1.client as nvclient
#import novaclient.exceptions as nvexceptions
from keystoneclient.auth.identity import v2 as v2_auth
from heatclient import client as heat_client
#from heatclient import client as heat_client
#from novaclient import client as nova_client
#from cinderclient import client as cinder_client
import heatclient
import novaclient
import cinderclient
import heatclient.client
import novaclient.client
import cinderclient.client
import keystoneclient.client
from keystoneclient.auth.identity import v2
from keystoneclient import session
from novaclient import client
from keystoneclient import session as kssession
#NOVA_STANDALONE=True
NOVA_STANDALONE=False
class OpenStackConnection:
......@@ -18,82 +32,6 @@ class OpenStackConnection:
self.tenantID= os.environ['OS_TENANT_ID']
self.authUrl="https://keystone.rc.nectar.org.au:5000/v2.0"
def _get_keystone_v2_auth(self, v2_auth_url, **kwargs):
auth_token = kwargs.pop('auth_token', None)
tenant_id = kwargs.pop('project_id', None)
tenant_name = kwargs.pop('project_name', None)
if auth_token:
return v2_auth.Token(v2_auth_url, auth_token,
tenant_id=tenant_id,
tenant_name=tenant_name)
else:
return v2_auth.Password(v2_auth_url,
username=kwargs.pop('username', None),
password=kwargs.pop('password', None),
tenant_id=tenant_id,
tenant_name=tenant_name)
def _get_keystone_session(self, **kwargs):
# first create a Keystone session
cacert = kwargs.pop('cacert', None)
cert = kwargs.pop('cert', None)
key = kwargs.pop('key', None)
insecure = kwargs.pop('insecure', False)
timeout = kwargs.pop('timeout', None)
verify = kwargs.pop('verify', None)
# FIXME(gyee): this code should come from keystoneclient
if verify is None:
if insecure:
verify = False
else:
# TODO(gyee): should we do
# heatclient.common.http.get_system_ca_fle()?
verify = cacert or True
if cert and key:
# passing cert and key together is deprecated in favour of the
# requests lib form of having the cert and key as a tuple
cert = (cert, key)
return kssession.Session(verify=verify, cert=cert, timeout=timeout)
def _get_keystone_auth(self, session, auth_url, **kwargs):
# FIXME(dhu): this code should come from keystoneclient
# discover the supported keystone versions using the given url
v2_auth_url=auth_url
v3_auth_url=None
# Determine which authentication plugin to use. First inspect the
# auth_url to see the supported version. If both v3 and v2 are
# supported, then use the highest version if possible.
auth = None
if v3_auth_url and v2_auth_url:
user_domain_name = kwargs.get('user_domain_name', None)
user_domain_id = kwargs.get('user_domain_id', None)
project_domain_name = kwargs.get('project_domain_name', None)
project_domain_id = kwargs.get('project_domain_id', None)
# support both v2 and v3 auth. Use v3 if domain information is
# provided.
if (user_domain_name or user_domain_id or project_domain_name or
project_domain_id):
auth = self._get_keystone_v3_auth(v3_auth_url, **kwargs)
else:
auth = self._get_keystone_v2_auth(v2_auth_url, **kwargs)
elif v3_auth_url:
# support only v3
auth = self._get_keystone_v3_auth(v3_auth_url, **kwargs)
elif v2_auth_url:
# support only v2
auth = self._get_keystone_v2_auth(v2_auth_url, **kwargs)
else:
raise exc.CommandError(_('Unable to determine the Keystone '
'version to authenticate with using the '
'given auth_url.'))
return auth
def get_stack_name(self,stack):
stacks=[]
for s in self.hc.stacks.list():
......@@ -108,46 +46,30 @@ class OpenStackConnection:
raise Exception("You have multiple heat stacks in your OpenStack Project and I'm not sure which one to use.\n You can select a stack by symlinking to a stack, for example if you have a stack called mycluster do ln -s %s mycluster\n"%stack)
def auth(self):
self.nc = nvclient.Client( auth_url=self.authUrl,
username=self.username,
api_key=self.passwd,
project_id=self.tenantName,
tenant_id=self.tenantID,
service_type="compute"
)
kwargs = {
'insecure': False,
}
keystone_session = self._get_keystone_session(**kwargs)
kwargs = {
'username': self.username,
'password': self.passwd,
'project_id': self.tenantID,
'project_name': self.tenantName
'tenant_id': self.tenantID,
'auth_url':self.authUrl,
}
keystone_auth = self._get_keystone_auth(keystone_session,
self.authUrl,
**kwargs)
endpoint = keystone_auth.get_endpoint(keystone_session,service_type='orchestration', region_name=None)
auth = v2.Password(**kwargs)
sess = session.Session(auth=auth)
kwargs = {
'username': self.username,
'include_pass': False,
'session': keystone_session,
'auth_url': self.authUrl,
'region_name': '',
'endpoint_type': 'publicURL',
'service_type': 'orchestration',
'password': self.passwd,
'auth': keystone_auth,
'session':sess,
}
api_version='2'
self.nc = novaclient.client.Client(api_version, session=sess)
api_version=1
endpoint="https://heat.rc.nectar.org.au:8004/v1/%s"%self.tenantID
self.hc = heatclient.client.Client(api_version, endpoint, session=sess)
self.hc = heat_client.Client(api_version, endpoint, **kwargs)
api_version=1
self.cc = cinderclient.client.Client(api_version, session=sess)
def recurse_resources(self,stack,resource):
......@@ -170,6 +92,7 @@ class OpenStackConnection:
instance_ids.extend(self.recurse_resources(stack=i,resource=r))
nc=self.nc
cc=self.cc
inventory = {}
inventory['_meta'] = { 'hostvars': {} }
for server in nc.servers.list():
......@@ -190,6 +113,7 @@ class OpenStackConnection:
inventory[server.metadata['ansible_host_group']].append(hostname)
else:
inventory[server.metadata['ansible_host_group']] = [hostname]
#print dir(server)
# Set the other host variables
inventory['_meta']['hostvars'][hostname] = {}
inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = server.networks.values()[0][0]
......@@ -198,6 +122,13 @@ class OpenStackConnection:
if 'ansible_ssh' in key:
inventory['_meta']['hostvars'][hostname][key] = server.metadata[key]
inventory['_meta']['hostvars'][hostname]['ansible_ssh_user'] = 'ec2-user'
for vol in server.to_dict()['os-extended-volumes:volumes_attached']:
for cv in cc.volumes.findall():
if cv.id == vol['id']:
devname = '/dev/disk/by-id/virtio-'+cv.id[0:20]
if not 'ansible_host_volumes' in inventory['_meta']['hostvars'][hostname]:
inventory['_meta']['hostvars'][hostname]['ansible_host_volumes']={}
inventory['_meta']['hostvars'][hostname]['ansible_host_volumes'][cv.display_name]={'uuid':vol['id'],'dev':devname}
print json.dumps(inventory)
if __name__ == "__main__":
......
......@@ -4,3 +4,10 @@
- name: fetch slurm.conf
fetch: src=/tmp/slurm.conf dest=files/slurm.conf flat=yes
- name: "Templating slurmdbd.conf"
template: src=slurmdbd.conf.j2 dest=/tmp/slurmdbd.conf owner=root group=root mode=644
sudo: true
- name: fetch slurm.conf
fetch: src=/tmp/slurmdbd.conf dest=files/slurmdbd.conf flat=yes
......@@ -10,6 +10,9 @@
#
ClusterName={{ clustername }}
ControlMachine={{ slurmctrl }}
{% if slurmctrlbackup is defined %}
BackupController={{ slurmctrlbackup }}
{% endif %}
#ControlAddr=
#BackupController=
#BackupAddr=
......@@ -121,7 +124,10 @@ Epilog={{ slurmjob.epilog }}
#JobAcctGatherFrequency=30
#
AccountingStorageType=accounting_storage/slurmdbd
AccountingStorageHost={{ slurmctrl }}
AccountingStorageHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
AccountingStorageBackupHost={{ slurmdbdbackup }}
{% endif %}
#AccountingStorageEnforce=limits,safe
#AccountingStorageLoc=
#AccountingStoragePass=
......
......@@ -17,7 +17,10 @@ AuthType=auth/munge
#
# slurmDBD info
#DbdAddr=
DbdHost={{ slurmctrl }}
DbdHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
DbdBackupHost={{ slurmdbdbackup }}
{% endif %}
#DbdPort=7031
SlurmUser=slurm
#MessageTimeout=300
......@@ -36,7 +39,7 @@ PidFile=/var/run/slurmdbd.pid
#
# Database info
StorageType=accounting_storage/mysql
StorageHost=localhost
StorageHost={{ mysql_host }}
#StoragePort=1234
StoragePass={{ slurmdb_passwd }}
StorageUser=slurmdb
......
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
---
- name: add repo
copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
sudo: true
when: ansible_os_family == 'RedHat'
- name: install gluster
yum: name={{ item }} state='latest'
when: ansible_os_family == 'RedHat'
with_items:
- glusterfs-client
sudo: true
- name: install gluster
apt: name=glusterfs state='latest'
when: ansible_os_family == 'Debian'
sudo: true
- name: mount volume
#mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,_netdev,backupvolfile-server={{ gluster_servers[1] }}"
mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,backupvolfile-server={{ gluster_servers[1] }},noauto,comment=systemd.automount"
sudo: true
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
---
- name: add repo
copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
sudo: true
when: ansible_os_family == 'RedHat'
- name: install gluster
yum: name={{ item }} state='latest'
when: ansible_os_family == 'RedHat'
with_items:
- glusterfs
- glusterfs-server
sudo: true
- name: install gluster
apt: name=glusterfs state='latest'
when: ansible_os_family == 'Debian'
sudo: true
- name: start daemon
service: name=glusterd enabled=yes state=started
sudo: true
- name: make server list
set_fact:
server_list: "{{ gluster_servers|join(',') }}"
- name: echo server list
debug: var=server_list
- name: make brick dir
file: state=directory path="{{ brickmnt }}/brick"
sudo: true
- name: create volume
gluster_volume:
name: "{{ volname }}"
brick: "{{ brickmnt }}/brick"
cluster: "{{ server_list }}"
replicas: "{{ replicas }}"
state: present
sudo: true
run_once: true
......@@ -154,11 +154,11 @@
sudo: true
when: karaage_db_init.stdout.find("0") == 0
-
name: "Create IDP institutes (disable it as cache is not available)"
shell: kg-idps /tmp/metadata.aaf.xml
sudo: true
when: karaage_db_init.stdout.find("0") == 0
#-
# name: "Create IDP institutes (disable it as cache is not available)"
# shell: kg-idps /tmp/metadata.aaf.xml
# sudo: true
# when: karaage_db_init.stdout.find("0") == 0
-
name: "Create projects"
......
<html><body><h3>HPC identity management</h3>
<p>To log in via AAF authentication, connect to <a href=https://{{ ansible_fqdn }}/aafbootstrap>aafbootstrap</a></p>
<p>To log in without AAF authentication, connect to <a href=https://{{ ansible_fqdn }}/users>users</a></p>
</body></html>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<body><h3>HPC identity system (The landing page is under the construction)</h3>
<br>
<p>Monash HPC identity system is a new HPC access control system. Access to the HPC ID system is done through the Australian Access Federation (AAF). This allows you to login using your Institutional username and password.
<br>
<br>
If it is the first time you are using the system, it will give your options to select your existing HPC username for creating a new user account. You'll need to join projects before you can access HPC system.
<br>
<br>
If your organisation is not a member of the AAF or if you need helps, please send HPC email support: help@massive.org.au.</p>
<br>
<p>Click following link <a href=https://{{ ansible_fqdn }}/aafbootstrap>to continue.</a></p>
</body>
</html>
......@@ -145,7 +145,7 @@ GLOBAL_DATASTORES = [
]
# The email address that error messages come from, such as those sent to ADMINS
# and MANAGERS.
SERVER_EMAIL = '{{ karaageAdminEmail }}'
SERVER_EMAIL = '{{ karaageServerEmail }}'
# The host to use for sending email.
EMAIL_HOST = 'localhost'
......
---
system_packages:
- openldap-servers
- openldap-clients
- openssl
dbname: olcDatabase={2}bdb
ldapuser: ldap
ldapgroup: ldap
---
- name: stat usrlocal
- name: stat
stat: path={{ dest }}
register: stat_usrlocal
register: stat_r
- name: mv
command: mv /usr/local /usr/local_old
when: stat_usrlocal.stat.isdir == True
command: mv "{{ dest }}" "{{ dest }}_old"
when: stat_r.stat.exists and stat_r.stat.isdir
sudo: true
- name: stat
stat: path={{ dest }}
register: stat_r
- name: link
file: src="{{ src }}" dest="{{ dest }}" state=link
when: not stat_r.stat.exists
sudo: true
---
- name: Format File Systems
filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
with_items: mkFileSystems
- name: format volumes
filesystem: fstype={{ item.fstype }} dev={{ hostvars[ansible_hostname]['ansible_host_volumes'][item.name]['dev'] }}
with_items: volumes
sudo: true
when: mkFileSystems is defined
- name: Mount device
mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
with_items: mountFileSystems
- name: format volumes
mount: name={{ item.mntpt }} fstype={{ item.fstype }} src={{ hostvars[ansible_hostname]['ansible_host_volumes'][item.name]['dev'] }} state=mounted
with_items: volumes
sudo: true
when: mountFileSystems is defined
- name: symlink volumes
file: force=yes state=link src="{{ item.mntpt }}" path="{{ item.linkto }}"
when: item.linkto is defined
with_items: volumes
sudo: true
#- name: Format File Systems
# filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
# with_items: mkFileSystems
# sudo: true
# when: mkFileSystems is defined
#
#- name: Mount device
# mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
# with_items: mountFileSystems
# sudo: true
# when: mountFileSystems is defined
#
#
---
- name: make dir
file: path="{{ provision | dirname }}" state=directory mode=755 owner=root
sudo: true
- name: copy provision template
template: src=provision.sh.j2 dest={{ provision }} mode=755 owner=root
sudo: true
......
......@@ -32,7 +32,7 @@ for user in ${user_list[*]}; do
if [ -z "${find}" ]; then
su slurm -c "$sacctmgr -i add account ${account} Description=CVL Organization=monash cluster=${cluster}" || { echo "error to create account ${account}" >> ${log_file} && exit 1; }
fi
find=$(sacctmgr list user ${user} | grep ${user})
find=$(sacctmgr list user --noheader -p ${user} | grep ${user})
if [ -z "${find}" ]; then
su slurm -c "$sacctmgr -i add user ${user} account=${account} cluster=${cluster}" || { echo "error to create user ${user}" >> ${log_file} && exit 1; }
fi
......
---
slurm_provision: "/usr/local/sbin/slurm_provision.sh"
home_dir: "/home"
provision: "/usr/local/sbin/provision.sh"
......@@ -22,13 +22,13 @@
sudo: true
- name: install slurmdb.conf
template: src=slurmdbd.conf.j2 dest={{ slurm_dir }}/etc/slurmdbd.conf
copy: src=files/slurmdbd.conf dest={{ slurm_dir }}/etc/slurmdbd.conf
sudo: true
when: slurm_dir is defined
- name: install slurmdbd.conf
template: src=slurmdbd.conf.j2 dest=/etc/slurm/slurmdbd.conf
copy: src=slurmdbd.conf dest=/etc/slurm/slurmdbd.conf
sudo: true
when: slurm_dir is not defined
......
#!/bin/sh
mkdir /local_home
usermod -m -d /local_home/ec2-user ec2-user
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment