diff --git a/roles/nfs-client/tasks/mountFileSystem.yml b/roles/nfs-client/tasks/mountFileSystem.yml index 70cc74b6ec75619c5fd1352e79c9b21d9b99686a..e74665b73613410f268aae6e5d3dd6542e49c735 100644 --- a/roles/nfs-client/tasks/mountFileSystem.yml +++ b/roles/nfs-client/tasks/mountFileSystem.yml @@ -4,7 +4,7 @@ sudo: true - name: "Mounting NFS mounts" - mount: name={{ item.name }} src=" {{ item.ipv4 }}:{{ item.src }} " fstype={{ item.fstype }} opts={{ item.opts }} state=mounted + mount: name={{ item.name }} src="{{ item.ipv4 }}:{{ item.src }} " fstype={{ item.fstype }} opts={{ item.opts }} state=mounted with_items: nfsMounts notify: "restart rpcbind" notify: "restart idmap" diff --git a/roles/slurm-from-source/tasks/installSlurmFromSource.yml b/roles/slurm-from-source/tasks/installSlurmFromSource.yml index 1041a22c5e7f2b414205cef19e5745778387054f..c01b5f805e91f53ed9918ddd1a95d6945271384e 100644 --- a/roles/slurm-from-source/tasks/installSlurmFromSource.yml +++ b/roles/slurm-from-source/tasks/installSlurmFromSource.yml @@ -1,5 +1,6 @@ - name: get slurm - shell: wget http://www.schedmd.com/download/archive/slurm-{{ slurm_version }}.tar.bz2 + shell: wget https://cvl.massive.org.au/slurm-{{ slurm_version }}.tar.bz2 +# shell: wget http://www.schedmd.com/download/archive/slurm-{{ slurm_version }}.tar.bz2 args: chdir: /tmp creates: /tmp/slurm-{{ slurm_version }}.tar.bz2 diff --git a/roles/slurm-from-source/tasks/main.yml b/roles/slurm-from-source/tasks/main.yml index c44fb08a7b4f4c6bfa005530d34fb49b3830f9ce..02a3b6eb6d8732843b9b9cebcedf8873171d0c51 100644 --- a/roles/slurm-from-source/tasks/main.yml +++ b/roles/slurm-from-source/tasks/main.yml @@ -43,6 +43,10 @@ sudo: true when: slurmlogdir is defined +- name: create greps directory + file: path={{ slurm_dir }}/etc/gres state=directory owner=slurm group=slurm mode=755 + sudo: true + - name: install deps yum: name={{ item }} state=latest with_items: @@ -107,11 +111,27 @@ - include: installSlurmFromSource.yml +- name: check slurm generic resource + shell: "{{ slurm_gres_check }}" + register: slurm_generic_resource + ignore_errors: true + when: slurm_gres_check is defined + +- name: install gres config file + template: src=gres.conf.j2 dest={{ slurm_dir }}/etc/gres.conf mode=644 + sudo: true + when: slurm_generic_resource.stdout + +- name: install gres sub config file + template: src=gres_sub.conf.j2 dest={{ slurm_dir }}/etc/gres/gres.conf mode=644 + sudo: true + when: slurm_gres_list is defined + - name: install slurm.conf template: src=slurm.conf.j2 dest={{ slurm_dir }}/etc/slurm.conf sudo: true notify: restart slurm - when: slurm_use_vpn==False + when: slurm_use_vpn==False and slurm_gres_list is defined - name: install slurm.conf template: src=slurm-vpn.conf.j2 dest={{ slurm_dir }}/etc/slurm.conf diff --git a/roles/slurm-from-source/templates/slurm.conf.j2 b/roles/slurm-from-source/templates/slurm.conf.j2 index 20cb0cd8b5dea984ee6ef1904169cbedfd4fb833..82ffcf6658a438381ee65a86ad3cbc7fc253d1f9 100644 --- a/roles/slurm-from-source/templates/slurm.conf.j2 +++ b/roles/slurm-from-source/templates/slurm.conf.j2 @@ -141,7 +141,7 @@ MpiParams=ports=12000-12999 {% endfor %} {% endfor %} {% for node in nodelist|unique %} -NodeName={{ node }} Procs={{ hostvars[node]['ansible_processor_vcpus'] }} RealMemory={{ hostvars[node].ansible_memory_mb.real.total }} Sockets={{ hostvars[node]['ansible_processor_vcpus'] }} CoresPerSocket=1 ThreadsPerCore={{ hostvars[node].ansible_processor_threads_per_core }} {% if hostvars[node].ansible_hostname.find('vis') != -1 %}Gres=gpu{% if hostvars[node].ansible_hostname.find('k1') > 0 %}:k1m{% endif %}{% if hostvars[node].ansible_hostname.find('k2') > 0 %}:k2m{% endif %}:1{% endif %} {% if hostvars[node]['ansible_processor_vcpus'] == 1 %}Weight=1{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 1 and hostvars[node]['ansible_processor_vcpus'] <= 16 %}Weight=3{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 16 and hostvars[node]['ansible_processor_vcpus'] <= 20 %}Weight=5{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 20 and hostvars[node]['ansible_processor_vcpus'] <= 40 %}Weight=7{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 40 and hostvars[node]['ansible_processor_vcpus'] <= 64 %}Weight=8{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 64 and hostvars[node]['ansible_processor_vcpus'] <= 128 %}Weight=9{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 128 %}Weight=10{% endif %} Feature=stage1 State=UNKNOWN +NodeName={{ node }} Procs={{ hostvars[node]['ansible_processor_vcpus'] }} RealMemory={{ hostvars[node].ansible_memory_mb.real.total }} Sockets={{ hostvars[node]['ansible_processor_vcpus'] }} CoresPerSocket=1 ThreadsPerCore={{ hostvars[node].ansible_processor_threads_per_core }} {% if hostvars[node].ansible_hostname.find('vis') != -1 %}Gres=gpu{% if hostvars[node].ansible_hostname.find('k1') > 0 %}:k1{% endif %}{% if hostvars[node].ansible_hostname.find('k2') > 0 %}:k2{% endif %}:1{% endif %} {% if hostvars[node]['ansible_processor_vcpus'] == 1 %}Weight=1{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 1 and hostvars[node]['ansible_processor_vcpus'] <= 16 %}Weight=3{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 16 and hostvars[node]['ansible_processor_vcpus'] <= 20 %}Weight=5{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 20 and hostvars[node]['ansible_processor_vcpus'] <= 40 %}Weight=7{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 40 and hostvars[node]['ansible_processor_vcpus'] <= 64 %}Weight=8{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 64 and hostvars[node]['ansible_processor_vcpus'] <= 128 %}Weight=9{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 128 %}Weight=10{% endif %} Feature=stage1 State=UNKNOWN {% endfor %} {% for queue in slurmqueues %}