Skip to content
Snippets Groups Projects
Commit 1e589173 authored by Chris Hines's avatar Chris Hines
Browse files

Merge branch 'master' of github.com:l1ll1/ansible_cluster_in_a_box into HEAD

parents b1ed1fb6 4132c697
No related branches found
No related tags found
No related merge requests found
Showing
with 635 additions and 34685 deletions
#!/usr/bin/env python
import sys, os, string, subprocess, socket, re
import copy, shlex,uuid, random, multiprocessing, time, shutil, json
import novaclient.v1_1.client as nvclient
import novaclient.exceptions as nvexceptions
#import novaclient.v1_1.client as nvclient
#import novaclient.exceptions as nvexceptions
from keystoneclient.auth.identity import v2 as v2_auth
from heatclient import client as heat_client
#from heatclient import client as heat_client
#from novaclient import client as nova_client
#from cinderclient import client as cinder_client
import heatclient
import novaclient
import cinderclient
import heatclient.client
import novaclient.client
import cinderclient.client
import keystoneclient.client
from keystoneclient.auth.identity import v2
from keystoneclient import session
from novaclient import client
from keystoneclient import session as kssession
#NOVA_STANDALONE=True
NOVA_STANDALONE=False
class OpenStackConnection:
......@@ -18,82 +32,6 @@ class OpenStackConnection:
self.tenantID= os.environ['OS_TENANT_ID']
self.authUrl="https://keystone.rc.nectar.org.au:5000/v2.0"
def _get_keystone_v2_auth(self, v2_auth_url, **kwargs):
auth_token = kwargs.pop('auth_token', None)
tenant_id = kwargs.pop('project_id', None)
tenant_name = kwargs.pop('project_name', None)
if auth_token:
return v2_auth.Token(v2_auth_url, auth_token,
tenant_id=tenant_id,
tenant_name=tenant_name)
else:
return v2_auth.Password(v2_auth_url,
username=kwargs.pop('username', None),
password=kwargs.pop('password', None),
tenant_id=tenant_id,
tenant_name=tenant_name)
def _get_keystone_session(self, **kwargs):
# first create a Keystone session
cacert = kwargs.pop('cacert', None)
cert = kwargs.pop('cert', None)
key = kwargs.pop('key', None)
insecure = kwargs.pop('insecure', False)
timeout = kwargs.pop('timeout', None)
verify = kwargs.pop('verify', None)
# FIXME(gyee): this code should come from keystoneclient
if verify is None:
if insecure:
verify = False
else:
# TODO(gyee): should we do
# heatclient.common.http.get_system_ca_fle()?
verify = cacert or True
if cert and key:
# passing cert and key together is deprecated in favour of the
# requests lib form of having the cert and key as a tuple
cert = (cert, key)
return kssession.Session(verify=verify, cert=cert, timeout=timeout)
def _get_keystone_auth(self, session, auth_url, **kwargs):
# FIXME(dhu): this code should come from keystoneclient
# discover the supported keystone versions using the given url
v2_auth_url=auth_url
v3_auth_url=None
# Determine which authentication plugin to use. First inspect the
# auth_url to see the supported version. If both v3 and v2 are
# supported, then use the highest version if possible.
auth = None
if v3_auth_url and v2_auth_url:
user_domain_name = kwargs.get('user_domain_name', None)
user_domain_id = kwargs.get('user_domain_id', None)
project_domain_name = kwargs.get('project_domain_name', None)
project_domain_id = kwargs.get('project_domain_id', None)
# support both v2 and v3 auth. Use v3 if domain information is
# provided.
if (user_domain_name or user_domain_id or project_domain_name or
project_domain_id):
auth = self._get_keystone_v3_auth(v3_auth_url, **kwargs)
else:
auth = self._get_keystone_v2_auth(v2_auth_url, **kwargs)
elif v3_auth_url:
# support only v3
auth = self._get_keystone_v3_auth(v3_auth_url, **kwargs)
elif v2_auth_url:
# support only v2
auth = self._get_keystone_v2_auth(v2_auth_url, **kwargs)
else:
raise exc.CommandError(_('Unable to determine the Keystone '
'version to authenticate with using the '
'given auth_url.'))
return auth
def get_stack_name(self,stack):
stacks=[]
for s in self.hc.stacks.list():
......@@ -108,46 +46,30 @@ class OpenStackConnection:
raise Exception("You have multiple heat stacks in your OpenStack Project and I'm not sure which one to use.\n You can select a stack by symlinking to a stack, for example if you have a stack called mycluster do ln -s %s mycluster\n"%stack)
def auth(self):
self.nc = nvclient.Client( auth_url=self.authUrl,
username=self.username,
api_key=self.passwd,
project_id=self.tenantName,
tenant_id=self.tenantID,
service_type="compute"
)
kwargs = {
'insecure': False,
}
keystone_session = self._get_keystone_session(**kwargs)
kwargs = {
'username': self.username,
'password': self.passwd,
'project_id': self.tenantID,
'project_name': self.tenantName
'tenant_id': self.tenantID,
'auth_url':self.authUrl,
}
keystone_auth = self._get_keystone_auth(keystone_session,
self.authUrl,
**kwargs)
endpoint = keystone_auth.get_endpoint(keystone_session,service_type='orchestration', region_name=None)
auth = v2.Password(**kwargs)
sess = session.Session(auth=auth)
kwargs = {
'username': self.username,
'include_pass': False,
'session': keystone_session,
'auth_url': self.authUrl,
'region_name': '',
'endpoint_type': 'publicURL',
'service_type': 'orchestration',
'password': self.passwd,
'auth': keystone_auth,
'session':sess,
}
api_version='2'
self.nc = novaclient.client.Client(api_version, session=sess)
api_version=1
endpoint="https://heat.rc.nectar.org.au:8004/v1/%s"%self.tenantID
self.hc = heatclient.client.Client(api_version, endpoint, session=sess)
self.hc = heat_client.Client(api_version, endpoint, **kwargs)
api_version=1
self.cc = cinderclient.client.Client(api_version, session=sess)
def recurse_resources(self,stack,resource):
......@@ -170,6 +92,7 @@ class OpenStackConnection:
instance_ids.extend(self.recurse_resources(stack=i,resource=r))
nc=self.nc
cc=self.cc
inventory = {}
inventory['_meta'] = { 'hostvars': {} }
for server in nc.servers.list():
......@@ -190,6 +113,7 @@ class OpenStackConnection:
inventory[server.metadata['ansible_host_group']].append(hostname)
else:
inventory[server.metadata['ansible_host_group']] = [hostname]
#print dir(server)
# Set the other host variables
inventory['_meta']['hostvars'][hostname] = {}
inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = server.networks.values()[0][0]
......@@ -198,6 +122,13 @@ class OpenStackConnection:
if 'ansible_ssh' in key:
inventory['_meta']['hostvars'][hostname][key] = server.metadata[key]
inventory['_meta']['hostvars'][hostname]['ansible_ssh_user'] = 'ec2-user'
for vol in server.to_dict()['os-extended-volumes:volumes_attached']:
for cv in cc.volumes.findall():
if cv.id == vol['id']:
devname = '/dev/disk/by-id/virtio-'+cv.id[0:20]
if not 'ansible_host_volumes' in inventory['_meta']['hostvars'][hostname]:
inventory['_meta']['hostvars'][hostname]['ansible_host_volumes']={}
inventory['_meta']['hostvars'][hostname]['ansible_host_volumes'][cv.display_name]={'uuid':vol['id'],'dev':devname}
print json.dumps(inventory)
if __name__ == "__main__":
......
#!/usr/bin/env python
import sys, os, string, socket, re
import shlex, multiprocessing, time, shutil, json
import novaclient.v1_1.client as nvclient
from novaclient import client as nvclient
import novaclient.exceptions as nvexceptions
import keystoneclient.v2_0.client as ksclient
from joblib import Parallel, delayed
from multiprocessing import Process, Manager, Pool
from libnmap.process import NmapProcess
from libnmap.parser import NmapParser, NmapParserException
def gatherInfo(tenantName, tenantID, userName, passwd, authUrl, inventory):
## Fetch the Nova Object
projectName = os.path.basename(sys.argv[0])
nc = nvclient.Client( auth_url=authUrl,
......@@ -16,24 +17,23 @@ def gatherInfo(tenantName, tenantID, userName, passwd, authUrl, inventory):
api_key=passwd,
project_id=tenantName,
tenant_id=tenantID,
service_type="compute"
version="2"
)
for server in nc.servers.list():
if server.metadata and \
'ansible_host_groups' in server.metadata and \
server.metadata['project_name'] == projectName.strip():
'project_name' in server.metadata:
if server.metadata['project_name'].strip() != projectName.strip(): continue
unwantedChars = """][")("""
rgx = re.compile('[%s]' % unwantedChars)
ansible_groups = rgx.sub('', server.metadata['ansible_host_groups']).split(',')
hostname = socket.gethostbyaddr(server.networks.values()[0][0])[0]
closed=True
while closed:
hostSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not hostSocket.connect_ex((hostname, 22)):
closed = False
break
time.sleep(5)
hostSocket.close()
novaVolumes = nc.volumes.get_server_volumes(server.id)
# Let's do some port scanning using nmap
nmproc = NmapProcess(hostname, "-p 22 -sV -Pn")
rc = nmproc.run()
if rc != 0: continue
parsed = NmapParser.parse(nmproc.stdout)
# Set Ansible Host Group
for group in ansible_groups:
groupName = group.strip()
......@@ -43,11 +43,12 @@ def gatherInfo(tenantName, tenantID, userName, passwd, authUrl, inventory):
for key, value in server.metadata.iteritems():
if key not in ('project_name','ansible_host_groups'):
inventory['_meta']['hostvars'][hostname] = { key:value }
if novaVolumes:
inventory['_meta']['hostvars'][hostname]['volumeList'] = [ volume.id for volume in novaVolumes ]
inventory['_meta']['hostvars'][hostname]['status'] = parsed.hosts[0].status
else:
continue
#print inventory
#inventoryList.append(inventory)
#print json.dumps(inventory)
if __name__ == "__main__":
inventory = {}
......
......@@ -4,3 +4,10 @@
- name: fetch slurm.conf
fetch: src=/tmp/slurm.conf dest=files/slurm.conf flat=yes
- name: "Templating slurmdbd.conf"
template: src=slurmdbd.conf.j2 dest=/tmp/slurmdbd.conf owner=root group=root mode=644
sudo: true
- name: fetch slurm.conf
fetch: src=/tmp/slurmdbd.conf dest=files/slurmdbd.conf flat=yes
......@@ -10,6 +10,9 @@
#
ClusterName={{ clustername }}
ControlMachine={{ slurmctrl }}
{% if slurmctrlbackup is defined %}
BackupController={{ slurmctrlbackup }}
{% endif %}
#ControlAddr=
#BackupController=
#BackupAddr=
......@@ -121,7 +124,10 @@ Epilog={{ slurmjob.epilog }}
#JobAcctGatherFrequency=30
#
AccountingStorageType=accounting_storage/slurmdbd
AccountingStorageHost={{ slurmctrl }}
AccountingStorageHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
AccountingStorageBackupHost={{ slurmdbdbackup }}
{% endif %}
#AccountingStorageEnforce=limits,safe
#AccountingStorageLoc=
#AccountingStoragePass=
......
......@@ -17,7 +17,10 @@ AuthType=auth/munge
#
# slurmDBD info
#DbdAddr=
DbdHost={{ slurmctrl }}
DbdHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
DbdBackupHost={{ slurmdbdbackup }}
{% endif %}
#DbdPort=7031
SlurmUser=slurm
#MessageTimeout=300
......@@ -36,7 +39,7 @@ PidFile=/var/run/slurmdbd.pid
#
# Database info
StorageType=accounting_storage/mysql
StorageHost=localhost
StorageHost={{ mysql_host }}
#StoragePort=1234
StoragePass={{ slurmdb_passwd }}
StorageUser=slurmdb
......
---
-
name: Removing the RDO repository
file: path=/etc/yum.repos.d/rdo-release.repo state=absent
sudo: true
-
name: Install epel-release
yum: name=epel-release-7-5.noarch state=present
sudo: true
-
name: Enable epel
command: yum-config-manager --enable epel
sudo: true
-
name: Installing Base Packages
yum: name={{ item }} state=present
with_items:
- yum-utils
- deltarpm-3.6-3.el7.x86_64
- yum-plugin-versionlock
sudo: true
-
name: Installing Core packages
yum: name="{{ item.software }}-{{ item.version }}.{{ item.arch }}" state=present
with_items: package_list
sudo: true
-
name: Performing version lock on the packages
shell: yum versionlock \*
sudo: true
---
- include: installBasePackages.yml
This diff is collapsed.
---
- name: grab cacert
shell: cat /etc/openldap/certs/cacert.pem
shell: cat {{ ldapCARootDest }}
register: ldapCaCertContents
- name: dump vars
......
......@@ -5,7 +5,7 @@ ldapCaCertContents: |
{{ l }}
{% endfor %}
ldapCaCertFile: /etc/ssl/certs/cacert.crt
ldapDomain: "{{ ldapDomain }}"
ldapDomain: "{{ domain }}"
ldapURI: "ldaps://{{ ansible_fqdn }}:636"
ldapBindDN: "{{ ldapBindDN }}"
ldapBindDNPassword: "{{ ldapBindDNPassword }}"
......
---
- include_vars: "{{ ansible_os_family }}.yml"
- name: Install epel-release
yum: name=epel-release-7-5.noarch state=present
sudo: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: Enable epel
command: yum-config-manager --enable epel
sudo: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: install lua
yum: name={{ item }} state=installed
with_items:
......
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
---
- name: add repo
copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
sudo: true
when: ansible_os_family == 'RedHat'
- name: install gluster
yum: name={{ item }} state='latest'
when: ansible_os_family == 'RedHat'
with_items:
- glusterfs-client
sudo: true
- name: install gluster
apt: name=glusterfs-client state='latest'
when: ansible_os_family == 'Debian'
sudo: true
- name: mount volume
#mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,_netdev,backupvolfile-server={{ gluster_servers[1] }}"
mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,backupvolfile-server={{ gluster_servers[1] }},noauto,comment=systemd.automount"
sudo: true
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
---
- name: add repo
copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
sudo: true
when: ansible_os_family == 'RedHat'
- name: install gluster
yum: name={{ item }} state='latest'
when: ansible_os_family == 'RedHat'
with_items:
- glusterfs
- glusterfs-server
sudo: true
- name: install gluster
apt: name=glusterfs-server state='latest'
when: ansible_os_family == 'Debian'
sudo: true
- name: start daemon
service: name=glusterd enabled=yes state=started
sudo: true
when: ansible_os_family == 'RedHat'
- name: start daemon
service: name=glusterfs-server enabled=yes state=started
sudo: true
when: ansible_os_family == 'Debian'
- name: make server list
set_fact:
server_list: "{{ gluster_servers|join(',') }}"
- name: echo server list
debug: var=server_list
- name: make brick dir
file: state=directory path="{{ brickmnt }}/brick"
sudo: true
- name: create volume
gluster_volume:
name: "{{ volname }}"
brick: "{{ brickmnt }}/brick"
cluster: "{{ server_list }}"
replicas: "{{ replicas }}"
state: present
sudo: true
run_once: true
......@@ -51,7 +51,7 @@
-
name: "Getting Karaage from Github"
git: repo="https://github.com/monash-merc/karaage.git" dest="/root/karaage3.1.7" force=yes
git: repo="https://github.com/monash-merc/karaage.git" dest="/root/karaage3.1.7" {% if karaage_source_version is defined %}version="{{ karaage_source_version }}" {% endif %} force=yes
sudo: true
-
......@@ -116,7 +116,7 @@
sudo: true
- name: install shibboleth cache file
template: src=metadata.aaf.xml.j2 dest=/tmp/metadata.aaf.xml
template: src="files/{{ shibboleth_deploy }}_metadata.aaf.xml.j2" dest=/tmp/metadata.aaf.xml
-
name: "enabling Karaage configuration"
......@@ -191,3 +191,10 @@
cron: name=idps job=/usr/bin/kg-idps user=root day=*/1 state=present
sudo: true
-
name: "Templating username list"
template: src=files/{{ item }} dest=/{{ user_id_file_dir }}/{{ item }}
with_items: user_id_file
sudo: true
when: user_id_file is defined and user_id_file_dir is defined
<html><body><h3>HPC identity management</h3>
<p>To log in via AAF authentication, connect to <a href=https://{{ ansible_fqdn }}/aafbootstrap>aafbootstrap</a></p>
<p>To log in without AAF authentication, connect to <a href=https://{{ ansible_fqdn }}/users>users</a></p>
</body></html>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<body><h3>HPC identity system (The landing page is under the construction)</h3>
<br>
<p>Monash HPC identity system is a new HPC access control system. Access to the HPC ID system is done through the Australian Access Federation (AAF). This allows you to login using your Institutional username and password.
<br>
<br>
If it is the first time you are using the system, it will give your options to select your existing HPC username for creating a new user account. You'll need to join projects before you can access HPC system.
<br>
<br>
If your organisation is not a member of the AAF or if you need helps, please send HPC email support: help@massive.org.au.</p>
<br>
<p>Click following link <a href=https://{{ ansible_fqdn }}/aafbootstrap>to continue.</a></p>
</body>
</html>
#!/usr/bin/python
import os, sys
import os, sys, time
os.environ['DJANGO_SETTINGS_MODULE'] = "karaage.conf.settings"
from django.conf import settings
from karaage.projects.models import Project
......@@ -8,6 +8,8 @@ from karaage.institutes.models import Institute
from karaage.machines.models import MachineCategory
from karaage.people.models import Person, Group
CONSOLE_DEBUG = False
class HpcIdInit():
import django
django.setup()
......@@ -17,127 +19,28 @@ class HpcIdInit():
self.path = configfile
self.password = password
self.debug = debug
self.logfile = None
if not debug:
self.logfile = open("/tmp/kg_init.log", "w")
if self.path and os.path.exists(self.path):
with open(self.path) as data:
config_data = json.load(data)
self.project = config_data["project"]
self.mc = config_data["machine_category"]
self.user = config_data["superuser"]
else:
log("Invalid input data")
def __del__(self):
if self.logfile:
if not self.logfile.closed():
self.logfile.close()
def log(self, message):
if self.debug:
print message
def getGroup(self, name):
group = None
try:
group =Group.objects.get(name = name)
if group:
self.log("Find group %s" %(name))
except:
self.log("Group %s not found" %(name))
finally:
return group
def getProject(self, name):
self.log("Get Project 1 %s" %(name))
project = None
try:
project = Project.objects.get(name = name)
if project:
self.log("Find project %s" %(project.name))
group = project.group
if group:
self.log("Group name = %s" %(group.name))
else:
self.log("Project %s not found" %(project.name))
except Project.DoesNotExist:
self.log("project %s does not exists" %(name))
except:
self.log("Exception: ", traceback.format_exc())
finally:
return project
def createProject(self, pid, name, institute_name, superuser):
project = None
try:
institute = self.getInstitute(institute_name)
if institute:
self.log("Find insititute %s" %(institute.name))
project = Project.objects.create(pid = pid, name = name, institute = institute, group = institute.group, is_active = True, is_approved = True, approved_by = superuser)
if project:
project.leaders.add(superuser)
self.log("Create project OK")
else:
self.log("Create project failed")
else:
self.log("Insititute %s does not exist" %(institute_name))
except:
self.log("Exception: ", traceback.format_exc())
finally:
return project
def getInstitute(self, name):
institute = None
try:
institute = Institute.objects.get(name = name)
if institute:
self.log("Institute %s exist" %(institute.name))
group = institute.group
if group:
self.log("Group name = %s" %(group.name))
else:
self.log("Institute %s not found" %(name))
except Institute.DoesNotExist:
self.log("Institute %s not found" %(name))
finally:
return institute
def getDefaultDatastore(self):
for key, value in settings.MACHINE_CATEGORY_DATASTORES.items():
if value:
return key
return None
def getMachineCategory(self, name):
mc = None
self.log("Running getMachineGategory %s" %(name))
try:
mc = MachineCategory.objects.get(name = name)
if mc:
self.log("Find machine category %s" %(mc.name))
else:
slef.log("Not found machine category %s" %(name))
except MachineCategory.DoesNotExist:
self.log("Machine category %s dose not exist" %(name))
except:
self.log("Except to create machine category %s" %(traceback.format_exc()))
finally:
return mc
def getOrCreateMachineCategory(self, name):
mc = None
try:
self.log("getOrCreateMachineCategory %s" %(name))
mc = self.getMachineCategory(name)
if not mc:
datastore = self.getDefaultDatastore()
self.log("datastore = '%s'" %(datastore))
mc = MachineCategory.objects.get_or_create(name = name, datastore = datastore)
self.log("after create machine catetory '%s'" %(name))
if mc:
self.log("Create MachineCategory %s OK" %(mc.name))
else:
self.log("Create MachineCategory failed")
except:
self.log("Except to create machine category %s" %(traceback.format_exc()))
finally:
return mc
else:
now = time.strftime("%c")
self.logfile.write(now + ": " + message + "\n")
def getUser(self, username):
person = None
......@@ -157,31 +60,13 @@ class HpcIdInit():
if person:
person.set_password(self.password)
person.save()
result = self.addInstituteDelegate(person, institute)
if result:
log("Add super user %s to institute %s delegate" %(person.username, institute.name))
else:
log("Faired to add super user %s to institute %s delegate" %(person.username, institute.name))
person.full_clean()
except:
log("Create super user exception: %s" %(traceback.format_exc()))
finally:
return person
def addInstituteDelegate(self, su, institute):
result = True
try:
delegates = institute.delegates.all().filter(username = su.username)
if len(delegates) == 0:
institute.delegates.add(su)
except:
result = False
self.log("Create institution delegate exception: %s" %(traceback.format_exc()))
finally:
return result
def setup(self):
self.log("Password = %s, debug = %s" %(self.password, self.debug))
su = self.getUser(self.user["username"])
if su:
self.log("Find super user %s" %(su.username))
......@@ -191,36 +76,16 @@ class HpcIdInit():
self.log("Create super user %s OK" %(su.username))
else:
self.log("Create super user %s failed" %(self.user["username"]))
if self.mc:
mc = self.getOrCreateMachineCategory(self.mc)
if mc:
self.log("Get machine category = '%s'" %(self.mc))
else:
self.log("Failed to get machine category = '%s'" %(self.mc))
if su:
for p in self.project:
project = self.getProject(p["project_name"])
if project:
self.log("Find project %s" %(project.name))
else:
self.log("Create project name = %s, pid = %s, institute name = %s" %(p["project_name"], p["pid"], p["institute_name"]))
project = self.createProject(p["pid"], p["project_name"], p["institute_name"], su)
if project:
self.log("Create project %s OK." %(project.name))
else:
self.log("Create project %s failed." %(p["project_name"]))
break
def main(argv):
config_path = None
if len(sys.argv) > 2:
config_path = argv[0]
password = argv[1]
debug = True
debug = CONSOLE_DEBUG
if len(sys.argv) > 3:
debug = argv[2]
init = HpcIdInit(config_path, password, debug)
init.log("Password = %s, debug = %s" %(password, debug))
init.setup()
else:
print "Usage: kg_init <config file> <superuser password> <option: debug True | False>"
......
{"superuser": {"username": "admin", "email": "jupiter.hu@monash.edu", "institute_name": "Monash University", "short_name": "admin", "full_name": "admin"}}
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment