Skip to content
Snippets Groups Projects
Commit 95cb012b authored by Andreas Hamacher's avatar Andreas Hamacher
Browse files

Merge branch 'misc' into 'master'

post fs04 migration maintenance checking.

See merge request !588
parents bd231385 dafa3f21
No related branches found
No related tags found
1 merge request!588post fs04 migration maintenance checking.
......@@ -34,7 +34,6 @@ SlurmdPidFile={{ slurmpiddir }}/slurmd.pid
#ProctrackType=proctrack/linuxproc
ProctrackType=proctrack/cgroup
#PluginDir=
CacheGroups=0
#FirstJobId=
ReturnToService=1
RebootProgram=/sbin/reboot
......
......@@ -21,7 +21,9 @@
- lua-filesystem
- lua-posix
become: true
when: ansible_os_family == 'RedHat'
when:
- ansible_os_family == 'RedHat'
- '"DGX" not in ansible_product_name'
- name: install lua RHEL7
yum:
......@@ -54,7 +56,7 @@
- name: Download LMOD
get_url:
url=http://{{ reposerverip }}/src/Lmod-{{ lmod_version }}.tar.bz2
url=https://object-store.rc.nectar.org.au/v1/AUTH_56ccfd36d0ad454a883a98e8489c97b5/hpc-repo/src/Lmod-{{ lmod_version }}.tar.bz2
dest={{ source_dir }}/Lmod-{{ lmod_version }}.tar.bz2
mode=0444
when: ansible_os_family == 'RedHat' and not lmodstat.stat.exists
......
......@@ -70,6 +70,16 @@
creates: "{{ munge_dir }}/bin/munge"
when: not munge_binary.stat.exists
- name: Create munge socket directory if it does not exist
file:
path: "{{ munge_dir }}/var/run/munge"
state: directory
owner: munge
group: root
mode: u=rwx
become: true
tags: CHRISTMAS
- name: set use_systemd Redhat
set_fact:
use_systemd: True
......
......@@ -24,7 +24,7 @@
path: /mnt/nvme
become: true
- name: Set /mnt/nvme as spankprivatetmpdir if present
- name: Set /mnt/nvme as spankprivatetmpmount if present
file:
src: /mnt/nvme
dest: "{{ spankprivatetmpmount }}"
......@@ -33,9 +33,9 @@
mode: u=rwx,g=rx,o=rx
state: link
become: true
when: spankprivatetmpdir is defined and hostvars[inventory_hostname]['ansible_devices']['nvme0n1'] is defined
when: spankprivatetmpmount is defined and hostvars[inventory_hostname]['ansible_devices']['nvme0n1'] is defined
- name: Link /raid as spankprivatetmpdir if present
- name: Link /raid as spankprivatetmpmount if present
file:
src: /raid
dest: "{{ spankprivatetmpmount }}"
......@@ -44,9 +44,9 @@
mode: u=rwx,g=rx,o=rx
state: link
become: true
when: spankprivatetmpdir is defined and raiddir.stat.isdir is defined and raiddir.stat.isdir == True
when: spankprivatetmpmount is defined and raiddir.stat.isdir is defined and raiddir.stat.isdir == True
- name: create spankprivatetmpdir as directory if there is not a fast drive present
- name: create spankprivatetmpmount as directory if there is not a fast drive present
file:
path: "{{ spankprivatetmpmount }}"
owner: root
......@@ -54,7 +54,7 @@
mode: u=rwx,g=rx,o=rx
state: directory
become: true
when: spankprivatetmpdir is defined and hostvars[inventory_hostname]['ansible_devices']['nvme0n1'] is not defined and raiddir.stat.isdir is not defined
when: spankprivatetmpmount is defined and hostvars[inventory_hostname]['ansible_devices']['nvme0n1'] is not defined and raiddir.stat.isdir is not defined
- name: create munge group
group: name=munge system=yes gid=498
......
......@@ -29,7 +29,6 @@ SlurmctldPidFile=/var/run/slurmctld.pid
SlurmdPidFile=/var/run/slurmd.pid
ProctrackType=proctrack/pgid
#PluginDir=
CacheGroups=0
#FirstJobId=
ReturnToService=0
#MaxJobCount=
......
......@@ -147,7 +147,7 @@
port: 6817
delay: 5
timeout: 300
when: start_slurmctld is defined and start_slurmctld
- name: start slurmctld on secondary
service: name=slurmctld state=started
......
......@@ -34,7 +34,7 @@
ansible.builtin.copy:
src: files/job_submit.lua
dest: "{{ slurm_dir }}/etc/job_submit.lua"
mode: 755
mode: u+rwx,g+rx,o+rx
become: true
become_user: root
when: local_lua_file is defined and local_lua_file.stat.exists==True
......
......@@ -15,13 +15,6 @@
become: true
when: services["lustre-client.service"] is defined
- name: ensure the lustre module is absent
modprobe:
name: lustre
state: absent
become: true
when: services["lustre-client.service"] is defined
- name: count lustre mounts
shell:
cmd: mount -t lustre | wc -l
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment