Commit 3c58d37d authored by Chris Hines's avatar Chris Hines
Browse files

Merge branch 'master' into 'master'

Master

fixed up setupKnownHosts

See merge request !3
parents df168ad1 17b43371
......@@ -30,7 +30,8 @@ SwitchType=switch/none
MpiDefault=pmi2
SlurmctldPidFile={{ slurmpiddir }}/slurmctld.pid
SlurmdPidFile={{ slurmpiddir }}/slurmd.pid
ProctrackType=proctrack/linuxproc
#ProctrackType=proctrack/linuxproc
ProctrackType=proctrack/cgroup
#PluginDir=
CacheGroups=0
#FirstJobId=
......@@ -78,16 +79,16 @@ SelectType={{ slurmselecttype }}
SelectTypeParameters=CR_Core_Memory
{% endif %}
FastSchedule={{ slurmfastschedule }}
#PriorityType=priority/multifactor
PriorityType=priority/multifactor
#PriorityFlags=Ticket_Based
#PriorityCalcPeriod=5
#PriorityDecayHalfLife=0
#PriorityUsageResetPeriod=14-0
##PriorityWeightFairshare=10000
#PriorityWeightAge=10000
#PriorityWeightPartition=10000
#PriorityWeightJobSize=10000
#PriorityMaxAge=14-0
PriorityWeightFairshare=10000
PriorityWeightAge=10000
PriorityWeightPartition=10000
PriorityWeightJobSize=10000
PriorityMaxAge=14-0
#
# LOGGING
{% if slurmctlddebug %}
......@@ -117,24 +118,27 @@ JobCompType=jobcomp/none
{% if slurmjob is defined %}
Prolog={{ slurmjob.prolog }}
Epilog={{ slurmjob.epilog }}
{% else %}
Prolog={{ slurm_dir }}/bin/slurm.prolog
Epilog={{ slurm_dir }}/bin/slurm.epilog
{% endif %}
#
# ACCOUNTING
#JobAcctGatherType=jobacct_gather/linux
#JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/linux
JobAcctGatherFrequency=30
#
AccountingStorageType=accounting_storage/slurmdbd
AccountingStorageHost={{ slurmdbd }}
{% if slurmdbdbackup is defined %}
AccountingStorageBackupHost={{ slurmdbdbackup }}
{% endif %}
#AccountingStorageEnforce=limits,safe
AccountingStorageEnforce=limits,safe
#AccountingStorageLoc=
#AccountingStoragePass=
#AccountingStorageUser=
#
#GRES
GresTypes=gpu
#GresTypes=gpu
# Fair share
{% if slurmfairshare.def %}
......@@ -155,6 +159,10 @@ MpiParams=ports=12000-12999
NodeName={{ hostvars[node]['ansible_hostname'] }} Procs={{ hostvars[node]['ansible_processor_vcpus'] }} RealMemory={{ hostvars[node].ansible_memory_mb.real.total }} Sockets={{ hostvars[node]['ansible_processor_vcpus'] }} CoresPerSocket=1 ThreadsPerCore={{ hostvars[node].ansible_processor_threads_per_core }} {% if hostvars[node].ansible_hostname.find('vis') != -1 %}Gres=gpu:1{% endif %} {% if hostvars[node]['ansible_processor_vcpus'] == 1 %}Weight=1{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 1 and hostvars[node]['ansible_processor_vcpus'] <= 16 %}Weight=3{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 16 and hostvars[node]['ansible_processor_vcpus'] <= 20 %}Weight=5{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 20 and hostvars[node]['ansible_processor_vcpus'] <= 40 %}Weight=7{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 40 and hostvars[node]['ansible_processor_vcpus'] <= 64 %}Weight=8{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 64 and hostvars[node]['ansible_processor_vcpus'] <= 128 %}Weight=9{% endif %}{% if hostvars[node]['ansible_processor_vcpus'] > 128 %}Weight=10{% endif %} Feature=stage1 State=UNKNOWN
{% endfor %}
#monarch specific to stop stupid warning messages
NodeName={{ hostvars[groups['LoginNodes'][0]]['ansible_hostname'] }} State=DOWN
NodeName={{ slurmctrl }} State=DOWN
{% for queue in slurmqueues %}
{% set nodenames = [] %}
{% for node in groups[queue.group] %}
......
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<body><h3>HPC identity system (The landing page is under the construction)</h3>
<br>
<p>Monash HPC identity system is a new HPC access control system. Access to the HPC ID system is done through the Australian Access Federation (AAF). This allows you to login using your Institutional username and password.
<br>
<br>
If it is the first time you are using the system, it will give your options to select your existing HPC username for creating a new user account. You'll need to join projects before you can access HPC system.
<br>
<br>
If your organisation is not a member of the AAF or if you need helps, please send HPC email support: help@massive.org.au.</p>
<br>
<p>Click following link <a href=https://{{ ansible_fqdn }}/aafbootstrap>to continue.</a></p>
</body>
</html>
<html><body><h3>HPC identity management</h3>
<p>To log in via AAF authentication, connect to <a href=https://{{ hpchostname }}.erc.monash.edu.au/aafbootstrap>aafbootstrap</a></p>
<p>To log in without AAF authentication, connect to <a href=https://{{ hpchostname }}.erc.monash.edu.au/users>users</a></p>
</body></html>
- name: "Templating /etc/ssh/known_hosts"
template: src=known_hosts.j2 dest=/etc/ssh/known_hosts owner=root group=root mode=600
template: src=known_hosts.j2 dest=/etc/ssh/ssh_known_hosts owner=root group=root mode=644
sudo: true
register: sshknowhost
register: sshknownhost
- name: ecrypt the hosts file
shell: ssh-keygen -H -f /etc/ssh/known_hosts
- name: encrypt the hosts file
shell: ssh-keygen -H -f /etc/ssh/ssh_known_hosts
sudo: true
when: sshknownhost.changed
- name: set read permissions
file: path=/etc/ssh/ssh_known_hosts owner=root group=root mode=644 state=file
sudo: true
- name: delete ssh_known_hosts.old
file: path=/etc/ssh/ssh_known_hosts.old state=absent
sudo: true
when: sshknowhost.changed
......@@ -2,14 +2,22 @@
{% for node in groups['all'] %}
{% for interface in hostvars[node]['ansible_interfaces'] %}
{% if interface != "lo" %}
{% set host = {'name': node, 'ip': hostvars[node]['ansible_'+interface]['ipv4']['address'], 'rsa': hostvars[node]['ansible_ssh_host_key_rsa_public']} %}
{% if hostvars[node]['ansible_ssh_host_key_rsa_public'] %}
{% set host = {'name': node, 'ip': hostvars[node]['ansible_'+interface]['ipv4']['address'], 'keytype':'ssh-rsa', 'key': hostvars[node]['ansible_ssh_host_key_rsa_public']} %}
{% if nodelist.append(host) %}
{% endif %}
{% endif %}
{% if hostvars[node]['ansible_ssh_host_key_ecdsa_public'] %}
#{% set host = {'name': node, 'ip': hostvars[node]['ansible_'+interface]['ipv4']['address'], 'keytype':'ssh-ecdsa', 'key': hostvars[node]['ansible_ssh_host_key_ecdsa_public']} %}
{% set host = {'name': node, 'ip': hostvars[node]['ansible_'+interface]['ipv4']['address'], 'keytype':'ecdsa-sha2-nistp256', 'key': hostvars[node]['ansible_ssh_host_key_ecdsa_public']} %}
{% if nodelist.append(host) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
{% endfor %}
{% for host in nodelist|unique %}
{{ host.ip }} ssh-rsa {{ host.rsa }}
{{ host.name }} ssh-rsa {{ host.rsa }}
{% for host in nodelist %}
{{ host.ip }} {{ host.keytype }} {{ host.key }}
{{ host.name }} {{ host.keytype }} {{ host.key }}
{% endfor %}
......@@ -123,6 +123,14 @@
sudo: true
when: slurm_gres_list is defined
- name: install slurm prolog
template: src=slurm.prolog.j2 dest={{ slurm_dir }}/bin/slurm.prolog
sudo: true
- name: install slurm epilog
template: src=slurm.epilog.j2 dest={{ slurm_dir }}/bin/slurm.epilog
sudo: true
- name: install slurm.conf
copy: src=files/slurm.conf dest={{ slurm_dir }}/etc/slurm.conf
sudo: true
......
#!/bin/sh
find /tmp -user ${SLURM_JOB_USER} | xargs rm -rf
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment