diff --git a/roles/nfs-client/tasks/mountFileSystem.yml b/roles/nfs-client/tasks/mountFileSystem.yml index 0dc48290c5dfa850ef847654c5d9c4708ce61e2c..53abd33de55622d44c001ab2f186a0335198bc50 100644 --- a/roles/nfs-client/tasks/mountFileSystem.yml +++ b/roles/nfs-client/tasks/mountFileSystem.yml @@ -3,11 +3,8 @@ service: name=fail2ban state=stopped sudo: true - - - - name: "Mounting NFS mounts" - mount: name={{ item.mntpt }} src={{ item.src }} fstype={{ item.fstype }} opts={{ item.opts }} state=mounted + mount: name={{ item.name }} src=" {{ item.ipv4 }}:{{ item.src }} " fstype={{ item.fstype }} opts={{ item.opts }} state=mounted with_items: nfsMounts notify: "restart authentication" notify: "restart rpcbind" diff --git a/roles/openLdapClient/templates/sssd.j2 b/roles/openLdapClient/templates/sssd.j2 index 05c9acf40eb1062dc8255042ff398ca9ea63ae98..1dac0d88f2df3e1b5c67e9881e8919913f309849 100644 --- a/roles/openLdapClient/templates/sssd.j2 +++ b/roles/openLdapClient/templates/sssd.j2 @@ -4,7 +4,8 @@ domains = {{ ansible_domain }} services = nss, pam, autofs [nss] -filter_users = root +filter_users = root, slurm, munge +filter_groups = slurm, munge [pam] diff --git a/roles/slurm-from-source/tasks/installMungeFromSource.yml b/roles/slurm-from-source/tasks/installMungeFromSource.yml index 11d376a5e6c11db63673c9b52b37959292b580b7..438497c1fe1e5b784f0448c21cb5a6c936e4012a 100644 --- a/roles/slurm-from-source/tasks/installMungeFromSource.yml +++ b/roles/slurm-from-source/tasks/installMungeFromSource.yml @@ -26,8 +26,3 @@ - name: copy init script template: dest=/etc/init.d/munge src=munge.initd.j2 mode=755 sudo: true - -- name: start on boot - shell: update-rc.d munge defaults - sudo: true - when: ansible_distribution == "Ubuntu" diff --git a/roles/slurm-from-source/tasks/main.yml b/roles/slurm-from-source/tasks/main.yml index 9021ff0f7804dfc5e7b9877a4c298bc6b3e0f5eb..b49cf008a1029a71078ab1f63eced9f40272908d 100644 --- a/roles/slurm-from-source/tasks/main.yml +++ b/roles/slurm-from-source/tasks/main.yml @@ -98,6 +98,14 @@ sudo: true notify: restart munge +- name: enable munge on boot + service: name=munge enabled=yes + sudo: true + +- name: start munge + service: name=munge state=started + sudo: true + - include: installSlurmFromSource.yml - name: install slurm.conf diff --git a/roles/slurm-from-source/templates/slurm.conf.j2 b/roles/slurm-from-source/templates/slurm.conf.j2 index 096c7d08f1fa7e2c42f23572650a9055ba65e49b..e45f1318d036f2bdab9a5c588907f2d1fc35077b 100644 --- a/roles/slurm-from-source/templates/slurm.conf.j2 +++ b/roles/slurm-from-source/templates/slurm.conf.j2 @@ -98,7 +98,7 @@ SlurmSchedLogFile={{ slurm_dir }}{{ slurmschedlog.log }} JobCompType=jobcomp/none #JobCompLoc= # -{% if slurmjob %} +{% if slurmjob is defined %} Prolog={{ slurmjob.prolog }} Epilog={{ slurmjob.epilog }} {% endif %} diff --git a/roles/slurmdb/templates/slurmdbd.conf.j2 b/roles/slurmdb/templates/slurmdbd.conf.j2 index 2309481cdf73a67f7aaec8f07be0a407b9ca134e..5a6cd9aed308aa8982c173a2c8d9fc3db82d90f5 100644 --- a/roles/slurmdb/templates/slurmdbd.conf.j2 +++ b/roles/slurmdb/templates/slurmdbd.conf.j2 @@ -22,7 +22,7 @@ DbdHost={{ slurmctrl }} SlurmUser=slurm #MessageTimeout=300 #DefaultQOS=normal,standby -{% if slurmdbdlog %} +{% if slurmdbdlog is defined %} DebugLevel={{ slurmdbdlog.level }} LogFile={{ slurmdbdlog.log }} {% else %} @@ -36,7 +36,7 @@ PidFile=/var/run/slurmdbd.pid # # Database info StorageType=accounting_storage/mysql -StorageHost={{ slurmctrl }} +StorageHost=localhost #StoragePort=1234 StoragePass={{ slurmdb_passwd }} StorageUser=slurmdb diff --git a/roles/syncExports/tasks/addExports.yml b/roles/syncExports/tasks/addExports.yml index 8853541bcd81ebbe805949044d2d030b89dc5bdf..79484a27f8091d2390df226a0b62216c8c110d85 100644 --- a/roles/syncExports/tasks/addExports.yml +++ b/roles/syncExports/tasks/addExports.yml @@ -8,11 +8,15 @@ template: src=exports.j2 dest=/etc/exports owner=root group=root mode=644 sudo: true -# Do not do this as a handler, instead do this here as a task so that it happens imediatly after the exports file is created before any clients -# attempt a mount -- name : "Reload exports" - command: exportfs -ra +- name: "Start the Server" + service: "name=nfs state=restarted" sudo: true + when: ansible_os_family == "RedHat" + +- name: "Start the Server" + service: "name=nfs-kernel-server state=restarted" + sudo: true + when: ansible_os_family == "Debian" - name : "Pause ... clients sometimes have errors" command: sleep 60