--- desktopNodeList: - { name : 'DesktopNodes', interface : 'eth0' } clustername: "cicd" projectname: "cicd" slurm_version: 19.05.4 munge_version: 0.5.13 nhc_version: 1.4.2 munge_dir: /opt/munge-{{ munge_version }} slurm_dir: /opt/slurm-{{ slurm_version }} nhc_dir: /opt/nhc-{{ nhc_version }} nhc_config_file: nhc.conf nhc_log_level: 0 nhc_emails: nobody@nowhere.nowhere nhc_email_subject: "Node Health Check" openmpi_version: 1.8.3 mysql_host: "{{ groups['SQLNodes'][0] }}" slurmctrl: "{{ groups['ManagementNodes'][0] }}" slurmctrlbackup: "{{ groups['ManagementNodes'][1] }}" slurmdbd: "{{ groups['ManagementNodes'][0] }}" slurmdbdpiddir: "/opt/slurm/var/run" slurmdbdbackup: "{{ groups['ManagementNodes'][1] }}" slurm_use_vpn: false slurm_lua: true slurmqueues: - {name: batch, group: ComputeNodes, default: yes} # - {name: vis, group: DesktopNodes, default: no} slurmlogin: "{{ groups['LoginNodes'][0] }}" slurmlogdir: "/var/log" slurmctlddebug: {level: 5, log: '/mnt/slurm-logs/slurmctld.log'} slurmddebug: {level: 5, log: '/var/log/slurmd.log'} slurmschedlog: {level: 5, log: '/mnt/slurm-logs/slurmsched.log'} slurmdbdlog: {level: 5, log: '/mnt/slurm-logs/slurmdbd.log'} slurmfairshare: {def: false, val: 10000} slurmdatadir: "/opt/slurm/var/spool" slurmstatedir: "/opt/slurm/var/state" slurmsharedstatedir: "/slurmstate" slurmpiddir: "/opt/slurm-latest/var/run" slurmaccount_create_user: "/usr/local/sbin/slurmuseraccount.sh" slurm_provision: "/cinderVolume/local/sbin/slurm_provision.sh" slurmselecttype: "select/linear" slurmfastschedule: "1" slurmschedulertype: "sched/backfill" restartServerList: - slurm