diff --git a/dynamicInventory b/dynamicInventory
index bd89abf0607902f4daf9912b2821bdf5f1689735..5ada7f57b7544089b04deef1de67a7b92b1fd1a9 100755
--- a/dynamicInventory
+++ b/dynamicInventory
@@ -1,12 +1,26 @@
 #!/usr/bin/env python
 import sys, os, string, subprocess, socket, re
 import copy, shlex,uuid, random, multiprocessing, time, shutil, json
-import novaclient.v1_1.client as nvclient
-import novaclient.exceptions as nvexceptions
+#import novaclient.v1_1.client as nvclient
+#import novaclient.exceptions as nvexceptions
 from keystoneclient.auth.identity import v2 as v2_auth
-from heatclient import client as heat_client
+#from heatclient import client as heat_client
+#from novaclient import client as nova_client
+#from cinderclient import client as cinder_client
+import heatclient
+import novaclient
+import cinderclient
+import heatclient.client
+import novaclient.client
+import cinderclient.client
+import keystoneclient.client
+from keystoneclient.auth.identity import v2
+from keystoneclient import session
+from novaclient import client
 
 from keystoneclient import session as kssession
+#NOVA_STANDALONE=True
+NOVA_STANDALONE=False
 
 
 class OpenStackConnection:
@@ -18,82 +32,6 @@ class OpenStackConnection:
 		self.tenantID= os.environ['OS_TENANT_ID']
 		self.authUrl="https://keystone.rc.nectar.org.au:5000/v2.0"
 
-        def _get_keystone_v2_auth(self, v2_auth_url, **kwargs):
-            auth_token = kwargs.pop('auth_token', None)
-            tenant_id = kwargs.pop('project_id', None)
-            tenant_name = kwargs.pop('project_name', None)
-            if auth_token:
-                return v2_auth.Token(v2_auth_url, auth_token,
-                                     tenant_id=tenant_id,
-                                     tenant_name=tenant_name)
-            else:
-                return v2_auth.Password(v2_auth_url,
-                                        username=kwargs.pop('username', None),
-                                        password=kwargs.pop('password', None),
-                                        tenant_id=tenant_id,
-                                        tenant_name=tenant_name)
-
-
-        def _get_keystone_session(self, **kwargs):
-            # first create a Keystone session
-            cacert = kwargs.pop('cacert', None)
-            cert = kwargs.pop('cert', None)
-            key = kwargs.pop('key', None)
-            insecure = kwargs.pop('insecure', False)
-            timeout = kwargs.pop('timeout', None)
-            verify = kwargs.pop('verify', None)
-
-            # FIXME(gyee): this code should come from keystoneclient
-            if verify is None:
-                if insecure:
-                    verify = False
-                else:
-                    # TODO(gyee): should we do
-                    # heatclient.common.http.get_system_ca_fle()?
-                    verify = cacert or True
-            if cert and key:
-                # passing cert and key together is deprecated in favour of the
-                # requests lib form of having the cert and key as a tuple
-                cert = (cert, key)
-            return kssession.Session(verify=verify, cert=cert, timeout=timeout)
-
-        def _get_keystone_auth(self, session, auth_url, **kwargs):
-            # FIXME(dhu): this code should come from keystoneclient
-
-            # discover the supported keystone versions using the given url
-            v2_auth_url=auth_url
-            v3_auth_url=None
-
-            # Determine which authentication plugin to use. First inspect the
-            # auth_url to see the supported version. If both v3 and v2 are
-            # supported, then use the highest version if possible.
-            auth = None
-            if v3_auth_url and v2_auth_url:
-                user_domain_name = kwargs.get('user_domain_name', None)
-                user_domain_id = kwargs.get('user_domain_id', None)
-                project_domain_name = kwargs.get('project_domain_name', None)
-                project_domain_id = kwargs.get('project_domain_id', None)
-
-                # support both v2 and v3 auth. Use v3 if domain information is
-                # provided.
-                if (user_domain_name or user_domain_id or project_domain_name or
-                        project_domain_id):
-                    auth = self._get_keystone_v3_auth(v3_auth_url, **kwargs)
-                else:
-                    auth = self._get_keystone_v2_auth(v2_auth_url, **kwargs)
-            elif v3_auth_url:
-                # support only v3
-                auth = self._get_keystone_v3_auth(v3_auth_url, **kwargs)
-            elif v2_auth_url:
-                # support only v2
-                auth = self._get_keystone_v2_auth(v2_auth_url, **kwargs)
-            else:
-                raise exc.CommandError(_('Unable to determine the Keystone '
-                                         'version to authenticate with using the '
-                                         'given auth_url.'))
-
-            return auth
-
         def get_stack_name(self,stack):
             stacks=[]
             for s in self.hc.stacks.list():
@@ -108,46 +46,30 @@ class OpenStackConnection:
                 raise Exception("You have multiple heat stacks in your OpenStack Project and I'm not sure which one to use.\n You can select a stack by symlinking to a stack, for example if you have a stack called mycluster do ln -s %s mycluster\n"%stack)
 
         def auth(self):
-		self.nc = nvclient.Client(	auth_url=self.authUrl,
-			username=self.username,
-			api_key=self.passwd,
-			project_id=self.tenantName,
-			tenant_id=self.tenantID,
-			service_type="compute"
-			)
-                kwargs = {
-                    'insecure': False,
-                }
-                keystone_session = self._get_keystone_session(**kwargs)
+		
 
                 kwargs = {
                     'username': self.username,
                     'password': self.passwd,
-                    'project_id': self.tenantID,
-                    'project_name': self.tenantName 
+                    'tenant_id': self.tenantID,
+	            'auth_url':self.authUrl,
                 }
 
-                keystone_auth = self._get_keystone_auth(keystone_session,
-                                                    self.authUrl,
-                                                    **kwargs)
-
-                endpoint = keystone_auth.get_endpoint(keystone_session,service_type='orchestration', region_name=None)
-
-
+		auth = v2.Password(**kwargs)
+		sess = session.Session(auth=auth)
                 kwargs = {
-                    'username': self.username,
-                    'include_pass': False,
-                    'session': keystone_session,
-                    'auth_url': self.authUrl,
-                    'region_name': '',
-                    'endpoint_type': 'publicURL',
-                    'service_type': 'orchestration',
-                    'password': self.passwd,
-                    'auth': keystone_auth,
+		    'session':sess,
+		
                 }
+                api_version='2'
+                self.nc = novaclient.client.Client(api_version, session=sess)
+
                 api_version=1
+		endpoint="https://heat.rc.nectar.org.au:8004/v1/%s"%self.tenantID
+                self.hc = heatclient.client.Client(api_version, endpoint, session=sess)
 
-                self.hc = heat_client.Client(api_version, endpoint, **kwargs)
+                api_version=1
+                self.cc = cinderclient.client.Client(api_version, session=sess)
 
 	
         def recurse_resources(self,stack,resource):
@@ -170,6 +92,7 @@ class OpenStackConnection:
                             instance_ids.extend(self.recurse_resources(stack=i,resource=r))
 
                 nc=self.nc
+                cc=self.cc
 		inventory = {}
 		inventory['_meta'] = { 'hostvars': {} }
 		for server in nc.servers.list():
@@ -190,6 +113,7 @@ class OpenStackConnection:
                                             inventory[server.metadata['ansible_host_group']].append(hostname)
                                     else:
                                             inventory[server.metadata['ansible_host_group']] = [hostname]
+                            #print dir(server)
                             # Set the other host variables
                             inventory['_meta']['hostvars'][hostname] = {}
                             inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = server.networks.values()[0][0]
@@ -198,6 +122,13 @@ class OpenStackConnection:
                                 if 'ansible_ssh' in key:
                                     inventory['_meta']['hostvars'][hostname][key] = server.metadata[key]
                             inventory['_meta']['hostvars'][hostname]['ansible_ssh_user'] = 'ec2-user'
+                            for vol in server.to_dict()['os-extended-volumes:volumes_attached']:
+                                for cv in cc.volumes.findall():
+                                    if cv.id == vol['id']:
+                                        devname = '/dev/disk/by-id/virtio-'+cv.id[0:20]
+                                        if not 'ansible_host_volumes' in inventory['_meta']['hostvars'][hostname]:
+                                            inventory['_meta']['hostvars'][hostname]['ansible_host_volumes']={}
+                                        inventory['_meta']['hostvars'][hostname]['ansible_host_volumes'][cv.display_name]={'uuid':vol['id'],'dev':devname}
 		print json.dumps(inventory)
 
 if __name__ == "__main__":
diff --git a/roles/calculateSlurmConf/tasks/main.yml b/roles/calculateSlurmConf/tasks/main.yml
index ed39703643e3a03d10fb766aac80a9a03b77e644..800ad4a5db8148fe6ff6a02d2906285ba157deed 100644
--- a/roles/calculateSlurmConf/tasks/main.yml
+++ b/roles/calculateSlurmConf/tasks/main.yml
@@ -4,3 +4,10 @@
 
 - name: fetch slurm.conf
   fetch: src=/tmp/slurm.conf dest=files/slurm.conf flat=yes
+
+- name: "Templating slurmdbd.conf"
+  template: src=slurmdbd.conf.j2 dest=/tmp/slurmdbd.conf owner=root group=root mode=644
+  sudo: true
+
+- name: fetch slurm.conf
+  fetch: src=/tmp/slurmdbd.conf dest=files/slurmdbd.conf flat=yes
diff --git a/roles/calculateSlurmConf/templates/slurm.conf.j2 b/roles/calculateSlurmConf/templates/slurm.conf.j2
index f9332b0e633cae26395306ce7d2d218710c64e1c..f8818eca480935c56a5974c7241d5f66f0d99155 100644
--- a/roles/calculateSlurmConf/templates/slurm.conf.j2
+++ b/roles/calculateSlurmConf/templates/slurm.conf.j2
@@ -10,6 +10,9 @@
 #
 ClusterName={{ clustername }}
 ControlMachine={{ slurmctrl }}
+{% if slurmctrlbackup is defined %}
+BackupController={{ slurmctrlbackup }}
+{% endif %}
 #ControlAddr=
 #BackupController=
 #BackupAddr=
@@ -121,7 +124,10 @@ Epilog={{ slurmjob.epilog }}
 #JobAcctGatherFrequency=30
 #
 AccountingStorageType=accounting_storage/slurmdbd
-AccountingStorageHost={{ slurmctrl }}
+AccountingStorageHost={{ slurmdbd }}
+{% if slurmdbdbackup is defined %}
+AccountingStorageBackupHost={{ slurmdbdbackup }}
+{% endif %}
 #AccountingStorageEnforce=limits,safe
 #AccountingStorageLoc=
 #AccountingStoragePass=
diff --git a/roles/slurmdb-config/templates/slurmdbd.conf.j2 b/roles/calculateSlurmConf/templates/slurmdbd.conf.j2
similarity index 85%
rename from roles/slurmdb-config/templates/slurmdbd.conf.j2
rename to roles/calculateSlurmConf/templates/slurmdbd.conf.j2
index 5a6cd9aed308aa8982c173a2c8d9fc3db82d90f5..dc471330d5cdf3368efac17a85ba168e1ed4eab2 100644
--- a/roles/slurmdb-config/templates/slurmdbd.conf.j2
+++ b/roles/calculateSlurmConf/templates/slurmdbd.conf.j2
@@ -17,7 +17,10 @@ AuthType=auth/munge
 #
 # slurmDBD info
 #DbdAddr=
-DbdHost={{ slurmctrl }}
+DbdHost={{ slurmdbd }}
+{% if slurmdbdbackup is defined %}
+DbdBackupHost={{ slurmdbdbackup }}
+{% endif %}
 #DbdPort=7031
 SlurmUser=slurm
 #MessageTimeout=300
@@ -36,7 +39,7 @@ PidFile=/var/run/slurmdbd.pid
 #
 # Database info
 StorageType=accounting_storage/mysql
-StorageHost=localhost
+StorageHost={{ mysql_host }}
 #StoragePort=1234
 StoragePass={{ slurmdb_passwd }}
 StorageUser=slurmdb
diff --git a/roles/gluster_client/files/glusterfs-epel.repo b/roles/gluster_client/files/glusterfs-epel.repo
new file mode 100644
index 0000000000000000000000000000000000000000..843b4baef3cf4d81aca369e49c44b92c1599c3cf
--- /dev/null
+++ b/roles/gluster_client/files/glusterfs-epel.repo
@@ -0,0 +1,22 @@
+# Place this file in your /etc/yum.repos.d/ directory
+
+[glusterfs-epel]
+name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
+baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
+enabled=1
+skip_if_unavailable=1
+gpgcheck=0
+
+[glusterfs-noarch-epel]
+name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
+baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
+enabled=1
+skip_if_unavailable=1
+gpgcheck=0
+
+[glusterfs-source-epel]
+name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
+baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
+enabled=0
+skip_if_unavailable=1
+gpgcheck=0
diff --git a/roles/gluster_client/tasks/main.yml b/roles/gluster_client/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..75964d8f9f61391b0c6c7b1c1f7fa6664e9dbed6
--- /dev/null
+++ b/roles/gluster_client/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+- name: add repo
+  copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
+  sudo: true
+  when: ansible_os_family == 'RedHat'
+
+- name: install gluster
+  yum: name={{ item }} state='latest'
+  when: ansible_os_family == 'RedHat'
+  with_items:
+  - glusterfs-client
+  sudo: true
+
+- name: install gluster
+  apt: name=glusterfs state='latest'
+  when: ansible_os_family == 'Debian'
+  sudo: true
+
+- name: mount volume
+#mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,_netdev,backupvolfile-server={{ gluster_servers[1] }}"
+  mount: name="{{ volmnt }}" src="{{ gluster_servers[0] }}:/{{ volname }}" state="mounted" fstype="glusterfs" opts="defaults,acl,backupvolfile-server={{ gluster_servers[1] }},noauto,comment=systemd.automount"
+  sudo: true
diff --git a/roles/gluster_server/files/glusterfs-epel.repo b/roles/gluster_server/files/glusterfs-epel.repo
new file mode 100644
index 0000000000000000000000000000000000000000..843b4baef3cf4d81aca369e49c44b92c1599c3cf
--- /dev/null
+++ b/roles/gluster_server/files/glusterfs-epel.repo
@@ -0,0 +1,22 @@
+# Place this file in your /etc/yum.repos.d/ directory
+
+[glusterfs-epel]
+name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
+baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
+enabled=1
+skip_if_unavailable=1
+gpgcheck=0
+
+[glusterfs-noarch-epel]
+name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
+baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
+enabled=1
+skip_if_unavailable=1
+gpgcheck=0
+
+[glusterfs-source-epel]
+name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
+baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
+enabled=0
+skip_if_unavailable=1
+gpgcheck=0
diff --git a/roles/gluster_server/tasks/main.yml b/roles/gluster_server/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2abeee927077069549007176b14e3719e2182931
--- /dev/null
+++ b/roles/gluster_server/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+
+- name: add repo
+  copy: src=glusterfs-epel.repo dest=/etc/yum.repos.d/glusterfs-epel.repo
+  sudo: true
+  when: ansible_os_family == 'RedHat'
+
+- name: install gluster
+  yum: name={{ item }} state='latest'
+  when: ansible_os_family == 'RedHat'
+  with_items:
+  - glusterfs
+  - glusterfs-server
+
+  sudo: true
+
+- name: install gluster
+  apt: name=glusterfs state='latest'
+  when: ansible_os_family == 'Debian'
+  sudo: true
+
+- name: start daemon
+  service: name=glusterd enabled=yes state=started
+  sudo: true
+
+- name: make server list
+  set_fact: 
+    server_list: "{{ gluster_servers|join(',') }}"
+
+
+- name: echo server list
+  debug: var=server_list
+
+- name: make brick dir
+  file: state=directory path="{{ brickmnt }}/brick"
+  sudo: true
+
+- name: create volume
+  gluster_volume: 
+    name: "{{ volname }}"
+    brick: "{{ brickmnt }}/brick"
+    cluster: "{{ server_list }}"
+    replicas: "{{ replicas }}"
+    state: present
+  sudo: true
+  run_once: true
+
diff --git a/roles/karaage3.1.17/tasks/karaage.yml b/roles/karaage3.1.17/tasks/karaage.yml
index 93aeb6ba97f7a9821678785e5c77ff238620e863..465e8fd36e948c01b6eff96cbebd5b0dd4a93953 100644
--- a/roles/karaage3.1.17/tasks/karaage.yml
+++ b/roles/karaage3.1.17/tasks/karaage.yml
@@ -154,11 +154,11 @@
  sudo: true
  when: karaage_db_init.stdout.find("0") == 0
 
--
- name: "Create IDP institutes (disable it as cache is not available)"
- shell: kg-idps /tmp/metadata.aaf.xml 
- sudo: true
- when: karaage_db_init.stdout.find("0") == 0
+#-
+# name: "Create IDP institutes (disable it as cache is not available)"
+# shell: kg-idps /tmp/metadata.aaf.xml 
+# sudo: true
+# when: karaage_db_init.stdout.find("0") == 0
 
 -
  name: "Create projects"
diff --git a/roles/karaage3.1.17/templates/index.html.j2 b/roles/karaage3.1.17/templates/index.html.j2
index 4e1fda227b355c5e60c9f80b410a7873d07ca4ec..61c24b3954b21f5ebd0fa0f50abda8fd76611eac 100644
--- a/roles/karaage3.1.17/templates/index.html.j2
+++ b/roles/karaage3.1.17/templates/index.html.j2
@@ -1,4 +1,15 @@
-<html><body><h3>HPC identity management</h3>
-<p>To log in via AAF authentication, connect to <a href=https://{{ ansible_fqdn }}/aafbootstrap>aafbootstrap</a></p>
-<p>To log in without AAF authentication, connect to <a href=https://{{ ansible_fqdn }}/users>users</a></p>
-</body></html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html>
+<body><h3>HPC identity system (The landing page is under the construction)</h3>
+<br>
+<p>Monash HPC identity system is a new HPC access control system. Access to the HPC ID system is done through the Australian Access Federation (AAF). This allows you to login using your Institutional username and password.
+<br>
+<br>
+If it is the first time you are using the system, it will give your options to select your existing HPC username for creating a new user account. You'll need to join projects before you can access HPC system.
+<br>
+<br>
+If your organisation is not a member of the AAF or if you need helps, please send HPC email support: help@massive.org.au.</p>
+<br>
+<p>Click following link <a href=https://{{ ansible_fqdn }}/aafbootstrap>to continue.</a></p>
+</body>
+</html>
diff --git a/roles/karaage3.1.17/templates/settings.py.j2 b/roles/karaage3.1.17/templates/settings.py.j2
index 84b7af6eb9e503ff8f88dc7d3163ff30b743bd0e..928d33f84e79cc2c52c8d8664406f41d7489089b 100644
--- a/roles/karaage3.1.17/templates/settings.py.j2
+++ b/roles/karaage3.1.17/templates/settings.py.j2
@@ -145,7 +145,7 @@ GLOBAL_DATASTORES = [
 ]
 # The email address that error messages come from, such as those sent to ADMINS
 # and MANAGERS.
-SERVER_EMAIL = '{{ karaageAdminEmail }}'
+SERVER_EMAIL = '{{ karaageServerEmail }}'
 
 # The host to use for sending email.
 EMAIL_HOST = 'localhost'
diff --git a/roles/ldapserver/vars/CentOS_6.6_x86_64.yml b/roles/ldapserver/vars/CentOS_6.6_x86_64.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ae41ae86c9d53c509d1464ef8d21b1b18b1f1267
--- /dev/null
+++ b/roles/ldapserver/vars/CentOS_6.6_x86_64.yml
@@ -0,0 +1,8 @@
+---
+ system_packages:
+  - openldap-servers
+  - openldap-clients
+  - openssl
+ dbname: olcDatabase={2}bdb
+ ldapuser: ldap
+ ldapgroup: ldap
diff --git a/roles/link_usr_local/tasks/main.yml b/roles/link_usr_local/tasks/main.yml
index 7f3e211f98ec1ec266cf0117b663e77f05e5c232..72847692aec9dcbd7d0a654449cfac308243c549 100644
--- a/roles/link_usr_local/tasks/main.yml
+++ b/roles/link_usr_local/tasks/main.yml
@@ -1,13 +1,19 @@
 ---
-- name: stat usrlocal
+- name: stat
   stat: path={{ dest }}
-  register: stat_usrlocal
+  register: stat_r
 
 - name: mv
-  command: mv /usr/local /usr/local_old
-  when: stat_usrlocal.stat.isdir == True
+  command: mv "{{ dest }}" "{{ dest }}_old"
+  when: stat_r.stat.exists and stat_r.stat.isdir
   sudo: true
 
+- name: stat 
+  stat: path={{ dest }}
+  register: stat_r
+
+
 - name: link
   file: src="{{ src }}" dest="{{ dest }}" state=link
+  when: not stat_r.stat.exists
   sudo: true
diff --git a/roles/make_filesystems/tasks/main.yml b/roles/make_filesystems/tasks/main.yml
index b25a554d6960ca9736a1dcee46ec0d08276a9eff..34b0c6ed5ac859fbed246f88cce09b8eb3263d81 100644
--- a/roles/make_filesystems/tasks/main.yml
+++ b/roles/make_filesystems/tasks/main.yml
@@ -1,13 +1,31 @@
 ---
-- name: Format File Systems
-  filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
-  with_items: mkFileSystems
+- name: format volumes
+  filesystem: fstype={{ item.fstype }} dev={{ hostvars[ansible_hostname]['ansible_host_volumes'][item.name]['dev'] }}
+  with_items: volumes
   sudo: true
-  when: mkFileSystems is defined
 
-- name: Mount device 
-  mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
-  with_items: mountFileSystems
+- name: format volumes
+  mount: name={{ item.mntpt }} fstype={{ item.fstype }} src={{ hostvars[ansible_hostname]['ansible_host_volumes'][item.name]['dev'] }} state=mounted
+  with_items: volumes
   sudo: true
-  when: mountFileSystems is defined
 
+- name: symlink volumes
+  file: force=yes state=link src="{{ item.mntpt }}" path="{{ item.linkto }}"
+  when: item.linkto is defined
+  with_items: volumes
+  sudo: true
+
+
+#- name: Format File Systems
+#  filesystem: fstype={{ item.fstype }} dev={{ item.dev }} opts={{ item.opts }}
+#  with_items: mkFileSystems
+#  sudo: true
+#  when: mkFileSystems is defined
+#
+#- name: Mount device 
+#  mount: name={{ item.name }} src={{ item.dev }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted
+#  with_items: mountFileSystems
+#  sudo: true
+#  when: mountFileSystems is defined
+#
+#
diff --git a/roles/provision/tasks/main.yml b/roles/provision/tasks/main.yml
index 27a2cbbd47537625c2e13efa54ff0132b0ab8c4a..c5d13aadd3c4aea253aaec4c556f65acdfe7d5a9 100644
--- a/roles/provision/tasks/main.yml
+++ b/roles/provision/tasks/main.yml
@@ -1,4 +1,8 @@
 ---
+- name: make  dir
+  file: path="{{ provision | dirname }}" state=directory mode=755 owner=root
+  sudo: true
+
 - name: copy provision template 
   template: src=provision.sh.j2 dest={{ provision }} mode=755 owner=root
   sudo: true
diff --git a/roles/provision/templates/provision.sh.j2 b/roles/provision/templates/provision.sh.j2
index d4082c8ae41b59824252396bbc178bdeaf7931ef..0c70a397b8cadb0ab6792a56260948f83a863ee6 100644
--- a/roles/provision/templates/provision.sh.j2
+++ b/roles/provision/templates/provision.sh.j2
@@ -32,7 +32,7 @@ for user in ${user_list[*]}; do
             if [ -z "${find}" ]; then
                 su slurm -c "$sacctmgr -i add account ${account} Description=CVL Organization=monash cluster=${cluster}" || { echo "error to create account ${account}" >> ${log_file} && exit 1; }
             fi
-            find=$(sacctmgr list user ${user} | grep ${user})
+            find=$(sacctmgr list user --noheader -p ${user} | grep ${user})
             if [ -z "${find}" ]; then
                 su slurm -c "$sacctmgr -i add user ${user} account=${account} cluster=${cluster}" || { echo "error to create user ${user}" >> ${log_file} && exit 1; }
             fi
diff --git a/roles/provision/vars/main.yml b/roles/provision/vars/main.yml
index ed97d539c095cf1413af30cc23dea272095b97dd..b1cfa091c1d226185fcff5b4ec03c902db11bfe1 100644
--- a/roles/provision/vars/main.yml
+++ b/roles/provision/vars/main.yml
@@ -1 +1,5 @@
 ---
+slurm_provision: "/usr/local/sbin/slurm_provision.sh"
+home_dir: "/home"
+provision: "/usr/local/sbin/provision.sh"
+
diff --git a/roles/slurmdb-config/tasks/main.yml b/roles/slurmdb-config/tasks/main.yml
index becf45105eff2e253c41da10feb35d8afa05f620..a31f5ad72b0a21cc1ebb67d654eea977205b33b1 100644
--- a/roles/slurmdb-config/tasks/main.yml
+++ b/roles/slurmdb-config/tasks/main.yml
@@ -22,13 +22,13 @@
   sudo: true
 
 - name: install slurmdb.conf
-  template: src=slurmdbd.conf.j2 dest={{ slurm_dir }}/etc/slurmdbd.conf
+  copy: src=files/slurmdbd.conf dest={{ slurm_dir }}/etc/slurmdbd.conf
   sudo: true
   when: slurm_dir is defined
 
 
 - name: install slurmdbd.conf
-  template: src=slurmdbd.conf.j2 dest=/etc/slurm/slurmdbd.conf
+  copy: src=slurmdbd.conf dest=/etc/slurm/slurmdbd.conf
   sudo: true
   when: slurm_dir is not defined
 
diff --git a/scripts/userData.sh b/scripts/userData.sh
new file mode 100644
index 0000000000000000000000000000000000000000..545e92248baa6d17f64115cc634c87b689ad0ae8
--- /dev/null
+++ b/scripts/userData.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+mkdir /local_home
+usermod -m -d /local_home/ec2-user ec2-user
+