Skip to content
Snippets Groups Projects
Commit d333a398 authored by Andreas Hamacher's avatar Andreas Hamacher
Browse files

Merge remote-tracking branch 'origin' into mockldap

Former-commit-id: 376913dd
parents 21e93ea6 e1f23a76
No related branches found
No related tags found
No related merge requests found
Showing
with 208 additions and 136 deletions
......@@ -13,6 +13,7 @@ stages:
- ansible_create_cluster_stage
- push_button_spawn_cluster
- tests
- integration_test #https://docs.gitlab.com/ee/ci/triggers/
- clean
......@@ -73,7 +74,6 @@ ansiblelint:
- cd CICD
- python3 ansiblelint/run_lint.py --targets master_playbook.yml
build_cluster_cicd:
stage: heat
allow_failure: false
......@@ -111,12 +111,14 @@ ansible_create_cluster_stage:
- echo "ansible_create_cluster_stage"
- bash -x ./CICD/ansible_create_cluster_script.sh
- cd CICD
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem --skip-tags monitoring master_playbook.yml
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../gc_key.pem --skip-tags SiteSpecific master_playbook.yml
- sleep 15
- echo uglyuglyfix
- ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -b -a "systemctl restart slurmdbd" ManagementNodes
- sleep 60
- cd plays
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../../gc_key.pem --skip-tags monitoring computenodes.yml | tee nochange.log
- echo [ `grep changed= ./nochange.log -c` = `grep changed=0 ./nochange.log -c` ] > bashtest.sh # a crude way to make sure all changed lines are equal to changed=0
- bash ./bashtest.sh
- ansible-playbook -i files/inventory.$STACKNAME --key-file ../../gc_key.pem --skip-tags monitoring --check computenodes.yml
......@@ -137,6 +139,7 @@ tests:
- grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME #fail if inventory file is empty
- ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
- ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
- echo -e '[defaults]\r\nallow_world_readable_tmpfiles = True' > ansible.cfg
# Need to find a better check for sinfo
#- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "find /opt/ -name sinfo -type f" ManagementNodes
#- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "find /opt/ -name squeue -type f" ManagementNodes
......@@ -234,3 +237,4 @@ clean:
# after_script:
# - sleep 20 # artifically wait a bit to make sure it is really dead
nhc.conf
ssh_known_hosts
*.conf
slurm.conf
slurmdbd.conf
etcHosts
inventory.*
......@@ -65,6 +65,7 @@ parameters:
resources:
SQLNode0:
type: "OS::Nova::Server"
properties:
......@@ -121,7 +122,7 @@ resources:
MgmtNodesCentos7:
type: "OS::Heat::ResourceGroup"
properties:
count: 2
count: 1
resource_def:
type: My::Server::MgmtNode
properties:
......@@ -137,7 +138,7 @@ resources:
MgmtNodesU:
type: "OS::Heat::ResourceGroup"
properties:
count: 0
count: 1
resource_def:
type: My::Server::MgmtNode
properties:
......@@ -174,7 +175,7 @@ resources:
LoginNodesU:
type: "OS::Heat::ResourceGroup"
properties:
count: 0
count: 1
resource_def:
type: "OS::Nova::Server"
properties:
......@@ -270,12 +271,33 @@ resources:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'gpudesktopu%index%' ]]
security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: LDAPSecGroupID } ]
metadata:
ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, K1, VisNodes ]
ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, VisNodes ]
ansible_ssh_user: ubuntu
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
CentosDesktopNodes:
type: "OS::Heat::ResourceGroup"
properties:
count: 0
resource_def:
type: "OS::Nova::Server"
properties:
#availability_zone: { get_param: avz }
flavor: mon.c10r35.gpu-k2
image: { get_param: centos_7_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'gpudesktopc%index%' ]]
security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata:
ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, K1, VisNodes ]
ansible_ssh_user: ec2-user
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
ComputeNodeRHEL:
type: "OS::Heat::ResourceGroup"
properties:
......
......@@ -10,13 +10,16 @@ resources:
name: "heatslurmsecgroup"
rules: [ { protocol: tcp,
port_range_min: 12000,
port_range_max: 12999},
port_range_max: 12999,
remote_mode: "remote_group_id"},
{ protocol: tcp,
port_range_min: 6817,
port_range_max: 6819},
port_range_max: 6819,
remote_mode: "remote_group_id"},
{ protocol: tcp,
port_range_min: 1019,
port_range_max: 1019}]
port_range_max: 1019,
remote_mode: "remote_group_id"}]
NFSSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
......
......@@ -77,6 +77,8 @@ case "$1" in
echo "I cannot update a stack which does not exist"
exit -45
fi
openstack stack check --wait $STACKNAME
sleep 2
openstack stack update --wait --template ./heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./heat/resource_registry.yaml $STACKNAME
ret=$?
exit $ret
......@@ -84,6 +86,8 @@ case "$1" in
create_or_update)
if check_stack_exists
then
openstack stack check --wait $STACKNAME
sleep 2
openstack stack update --wait --template ./heat/gc_HOT.yaml --parameter "project_name=$STACKNAME" -e ./heat/resource_registry.yaml $STACKNAME
ret=$?
exit $ret
......
......@@ -46,4 +46,5 @@
- { role: calculateKnownHosts, tags: [ calculateKnownHosts ] }
- { role: SSHKnownHosts, tags: [ known_hosts ] }
- { role: jasons_ssh_ca, tags: [ ssh_ca ] }
- { role: ntp }
- { role: set_timezone }
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes,VisNodes'
gather_facts: True
vars_files:
- vars/passwords.yml
- vars/names.yml
......@@ -8,10 +9,17 @@
- vars/slurm.yml
- vars/vars.yml
tasks:
- include_vars: vars/passwords.yml
- include_vars: vars/names.yml
- include_vars: vars/ldapConfig.yml
- include_vars: vars/filesystems.yml
- include_vars: vars/slurm.yml
- include_vars: vars/vars.yml
- { name: set use shared state, set_fact: usesharedstatedir=False }
tags: [ always ]
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
gather_facts: False
vars_files:
- vars/passwords.yml
- vars/names.yml
......@@ -24,26 +32,24 @@
- { role: move_homedir, tags: [ authentication, filesystems ] }
- { role: nfs-client, nfsMounts: "{{ computeNfsMounts }}", tags: [ filesystems ] }
- { role: slurm-common, tags: [ slurm, slurm-common ] }
- { role: lmod, tags: [ other ] }
#- { role: lmod, tags: [ other ] } # actually preffered on ubuntu but mutually exclusive with environment-modules
- { role: enable_modules, default_modules: "modulecmd", tags: [ other ] }
- { role: postfix, tags: [ mail, other ] }
- { role: set_semaphore_count, tags: [ semaphore ] }
- { role: ldapclient, ssl: false, tags: [ ldapclient ] }
- { role: pam_sshd, computenodepam: true, tags: [ authentication, pamd ] }
- { role: ssh-keepalive, tags: [ ssh ] }
- { role: enable_sudo_group, tags: [ authentication ] }
- hosts: 'VisNodes'
gather_facts: False
vars_files:
- vars/passwords.yml
- vars/names.yml
- vars/ldapConfig.yml
- vars/filesystems.yml
- vars/slurm.yml
- vars/vars.yml
roles:
- { role: gpu, tags: [ gpu ] }
- hosts: 'DesktopNodes,ComputeNodes,LoginNodes'
gather_facts: False
vars_files:
- vars/passwords.yml
- vars/names.yml
......@@ -55,6 +61,7 @@
- { role: slurm_config, tags: [slurm, slurm_config] }
- hosts: 'DesktopNodes,ComputeNodes'
gather_facts: False
vars_files:
- vars/passwords.yml
- vars/names.yml
......@@ -65,4 +72,40 @@
strategy: free
roles:
- { role: slurm-start, start_slurmd: True, tags: [ slurm, slurmstart ] }
#- { role: mate-de-install, tags: [ mate-de-install ] } # TODO this crashes for everything except cmca
\ No newline at end of file
#- { role: mate-de-install, tags: [ mate-de-install ] } # TODO this crashes for everything except cmca
- hosts: 'K1Nodes'
tasks:
- { name: set nvidia driver version, set_fact: nvidia_version='367.130' }
tags: [ always ]
- hosts: 'VisNodes'
tasks:
- { name: set cuda monitoring, set_fact: cudamonitor=true }
tags: [ always ]
- hosts: 'ComputeNodes'
vars_files:
- vars/slurm.yml
roles:
- { role: slurm-common, tags: [ slurm, slurmbuild ] }
- { role: slurm_config, tags: [ slurm_config, slurm ] }
- { role: calculateNhcConfig, tags: [ nhc, slurm ] }
- { role: nhc, tags: [ nhc, slurm ] }
- { role: slurm-start, start_slurmd: True, tags: [ slurm, slurm-start ] }
- { role: vncserver, tags: [ other ] }
- { role: jasons_ssh_ca, tags: [ other ] }
- { role: lmod, tags: [ other ] }
#- { role: extra_packages, tags: [ other, extra_packages ] } # commented because it takes forever! good enough if this gets tested on clusterbuild
- { role: enable_modules, default_modules: "modulecmd", tags: [ other ] }
- { role: postfix, tags: [ mail, other ] }
- { role: set_semaphore_count, tags: [ semaphore ] }
- { role: telegraf, telegraf_install_rpm_url: 'http://consistency0/src/telegraf-1.12.6-1.x86_64.rpm', tags: [ monitoring,SiteSpecific ] }
- hosts: 'VisNodes'
roles:
- { role: systemd-nvidia-uvm, tags: [ uvm,SiteSpecific ] }
- hosts: 'VisNodes'
roles:
- { role: deploy-xorg, tags: [ deploy-xorg ] }
\ No newline at end of file
......@@ -35,7 +35,7 @@
- { role: slurm-common, tags: [ slurm, slurm-common ] }
- { role: slurm_config, tags: [ slurm, slurm-config ] }
- { role: slurm-start, start_slurmdbd: True, start_slurmctld: True, tags: [ slurm-start ] }
- { role: telegraf, tags: [ monitoring ] }
- { role: telegraf, tags: [ monitoring, SiteSpecific ] }
# - { role: provision_slurm, use_active_directory: False, lockpath: "/mnt/home", tags: [ slurm ] }
# - { role: provision_homedir, use_active_directory: False, mntpt: "/mnt/home", tags: [ provisioning ] }
#!/bin/bash
#source /etc/profile.d/modulecmd.sh
#source /etc/profile.d/modules.sh
#ubuntu is very picky so lets skip it
/bin/grep Ubuntu -q /etc/issue && exit 0
module purge
module load gcc/8.1.0
module list
gcc --version | grep 8.1.0
gcc --version | grep 8.1.0
\ No newline at end of file
......@@ -9,7 +9,7 @@
state: present
become: true
- name: Create a parent account
command: ./sacctmgr -i add account parentAccount cluster=m3 Description="Test parent account" Organization="Monash"
command: ./sacctmgr -i add account parentAccount cluster=cicd Description="Test parent account" Organization="Monash"
args:
chdir: '/opt/slurm-latest/bin'
become: true
......@@ -17,7 +17,7 @@
failed_when: result.rc != 0 and result.stdout != " Nothing new added."
- name: Create a project associated with a given parent
command: ./sacctmgr -i add account testProject parent=parentAccount cluster=m3 Organization="Monash"
command: ./sacctmgr -i add account testProject parent=parentAccount cluster=cicd Organization="Monash"
args:
chdir: '/opt/slurm-latest/bin'
become: true
......@@ -25,7 +25,7 @@
failed_when: result.rc != 0 and result.stdout != " Nothing new added."
- name: Create a user and associate them with a project
command: ./sacctmgr -i create user hpctest cluster=m3 account=testProject partition=batch
command: ./sacctmgr -i create user hpctest cluster=cicd account=testProject partition=batch
args:
chdir: '/opt/slurm-latest/bin'
become: true
......
---
domain: massive.org.au
domain: cicd.test.au
smtp_smarthost: smtp.monash.edu.au
---
desktopNodeList:
- { name : 'DesktopNodes', interface : 'eth0' }
clustername: "m3"
projectname: "m3"
clustername: "cicd"
projectname: "cicd"
slurm_version: 19.05.4
munge_version: 0.5.13
nhc_version: 1.4.2
......
......@@ -23,7 +23,7 @@ for group in d['groups'].keys():
else:
hosts[h] = ['%s.%s %s'%(name,domain,name)]
for h in hosts.keys():
for h in sorted(hosts.keys()):
if d['hostvars'].has_key(h):
for addr in d['hostvars'][h]['ansible_all_ipv4_addresses']:
if "172.16.200" in addr:
......@@ -32,14 +32,14 @@ for h in hosts.keys():
string=string+" %s"%(name)
print string
for h in hosts.keys():
for h in sorted(hosts.keys()):
if d['hostvars'].has_key(h):
string="%s"%(d['hostvars'][h]['ansible_default_ipv4']['address'])
for name in hosts[h]:
string=string+" %s"%(name)
print string
for h in hosts.keys():
for h in sorted(hosts.keys()):
if d['hostvars'].has_key(h):
if d['hostvars'][h].has_key('ansible_tun0'):
string="%s"%(d['hostvars'][h]['ansible_tun0']['ipv4']['address'])
......
---
- name: make sure out repo server is resolvable
- name: make sure our repo server is resolvable
lineinfile:
dest: /etc/hosts
line: "{{ reposerverip }} {{ reposervername }}" #this is duplicated in the role calculateEtcHosts
path: /etc/hosts
line: "{{ reposerverip }} {{ reposervername }}"
owner: root
group: root
become: True
......
......@@ -11,14 +11,18 @@ from subprocess import call
import re
import json
def grab_card_ids():
# This method runs nvidia-smi to grab the card ids, then returns a list
if not os.path.isfile("/bin/nvidia-smi"):
def getNvidia_smi_path():
if os.path.isfile("/bin/nvidia-smi"):
return "/bin/nvidia-smi"
elif os.path.isfile("/usr/bin/nvidia-smi"):
return "/usr/bin/nvidia-smi"
else:
print("nvidia-smi binary not found!")
exit(1)
exit(1)
cmd = ["/bin/nvidia-smi", "--query-gpu=pci.bus_id","--format=csv,noheader"]
def grab_card_ids():
# This method runs nvidia-smi to grab the card ids, then returns a list
cmd = [getNvidia_smi_path(), "--query-gpu=pci.bus_id","--format=csv,noheader"]
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cards = []
......@@ -27,15 +31,11 @@ def grab_card_ids():
line = line.rstrip().split(":")[2]
pcibus_num = int(re.sub('[.:]', '', line).rstrip("0"),16)
card = "PCI:0:{}:0".format(str(pcibus_num))
cards.append(card)
cards.append(card)
return cards
def grab_card_boardname():
if not os.path.isfile("/bin/nvidia-smi"):
print("nvidia-smi binary not found!")
exit(1)
cmd = ["/bin/nvidia-smi", "--query-gpu=name","--format=csv,noheader"]
cmd = [getNvidia_smi_path(), "--query-gpu=name","--format=csv,noheader"]
cards = []
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
......
......@@ -3,7 +3,7 @@
- name: make sure environment modules are installed
package:
name: environment-modules
state: installed
state: present
become: true
- name: template lmod bash
......@@ -30,6 +30,9 @@
become_user: root
when: default_modules == "lmod"
# vars:
# MODULESHOMEvar: '/usr/share/modules'
- name: template modulecmd bash
template: src=modulecmd.sh.j2 dest=/etc/profile.d/modulecmd.sh
become: true
......@@ -59,3 +62,14 @@
become: true
become_user: root
when: default_modules == "modulecmd"
- name: Create a symbolic link
file:
src: /usr/share/modules
dest: /usr/share/Modules
owner: root
group: root
state: link
mode: u=rwx,g=rx,o=rx
become: true
when: ansible_os_family == 'Debian' and default_modules == 'modulecmd'
\ No newline at end of file
......@@ -7,8 +7,6 @@
register: sysctl_hostname
check_mode: no
changed_when: False
become: true
become_user: root
- name: set hostname by sysctl
shell: sysctl kernel.hostname="{{ inventory_hostname }}"
......
......@@ -8,12 +8,16 @@
become: true
become_user: root
when: ansible_os_family == 'RedHat'
changed_when: false
- name: "Clear yum pending transactions"
command: yum-complete-transaction --cleanup-only
become: true
become_user: root
register: yumCompleteTransactioncall
when: ansible_os_family == 'RedHat'
changed_when: '"No unfinished transactions left." not in yumCompleteTransactioncall.stdout'
- name: "Install extra packages"
yum: "name={{ item }} exclude={{ excludes|join(',') }} update_cache=yes state=present"
......
---
- name: install deps
yum: name={{ item }} state=installed
become: true
with_items:
- gcc
- perl
- wget
- pciutils
- kernel-headers
- kernel-devel
- xterm
- libX11-common
- libX11-devel
- libX11
- libglvnd-devel
- xorg-x11-server-common
- xorg-x11-util-macros
- xorg-x11-server-utils
- xorg-x11-font-utils
- xorg-x11-server-Xorg
- xorg-x11-glamor
- xorg-x11-xinit
- xorg-x11-utils
- xorg-x11-xauth
- xorg-x11-proto-devel
- xorg-x11-xkb-utils
- name: install deps
package:
state: present
name:
- gcc
- perl
- wget
- pciutils
- kernel-headers
- kernel-devel
- xterm
- libX11-common
- libX11-devel
- libX11
- libglvnd-devel
- xorg-x11-server-common
- xorg-x11-util-macros
- xorg-x11-server-utils
- xorg-x11-font-utils
- xorg-x11-server-Xorg
- xorg-x11-glamor
- xorg-x11-xinit
- xorg-x11-utils
- xorg-x11-xauth
- xorg-x11-proto-devel
- xorg-x11-xkb-utils
- python-jinja2
become: true
when: ansible_os_family == 'RedHat'
- name: install deps
apt:
name:
- name: install deps
apt:
name:
- 'gcc'
- 'perl'
- 'wget'
......@@ -41,40 +43,14 @@
- 'libglvnd-dev'
- 'xserver-xorg'
- 'vim'
- 'python-jinja2'
- 'python3-jinja2'
state: present
update_cache: yes
become: true
become_user: root
when: ansible_distribution == 'Ubuntu'
- name: install deps
yum: name={{ item }} state=installed
become: true
with_items:
- gcc
- perl
- wget
- pciutils
- kernel-headers
- kernel-devel
- xterm
- libX11-common
- libX11-devel
- libX11
- libglvnd-devel
- xorg-x11-server-common
- xorg-x11-util-macros
- xorg-x11-server-utils
- xorg-x11-font-utils
- xorg-x11-server-Xorg
- xorg-x11-glamor
- xorg-x11-xinit
- xorg-x11-utils
- xorg-x11-xauth
- xorg-x11-proto-devel
- xorg-x11-xkb-utils
when: ansible_os_family == 'RedHat'
- name: install development tools
yum: name="@Development Tools" state=installed
become: true
......@@ -100,7 +76,7 @@
- name: remove nouveau
modprobe: name=nouveau state=absent
become: true
become: true
become_user: root
- name: get kernel version
......@@ -116,7 +92,7 @@
ignore_errors: true
- name: set default driver version
set_fact:
set_fact:
installed_driver_version: '0.0'
- name: check nvidia driver version
......@@ -127,20 +103,20 @@
changed_when: False
- name: set install default
set_fact:
set_fact:
install_driver: false
- name: set uninstall default
set_fact:
set_fact:
uninstall_driver: false
- name: set install
set_fact:
set_fact:
install_driver: true
when: not nvidia_driver.stat.exists or not installed_driver_version.stdout == nvidia_version
- name: set uninstall
set_fact:
set_fact:
uninstall_driver: true
when: nvidia_driver.stat.exists and not installed_driver_version.stdout == nvidia_version
......@@ -161,18 +137,18 @@
become_user: root
when: uninstall_driver
- name: get nvidia driver
- name: get nvidia driver
get_url: url=http://consistency0/src/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run dest=/tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run
become: true
become_user: root
when: install_driver
#- name: Copy boot file
# template: src=grub.conf.j2 dest=/boot/grub/grub.conf
# template: src=grub.conf.j2 dest=/boot/grub/grub.conf
# become: true
#
#- name: Copy X config file
# template: src=xorg.conf.j2 dest=/etc/X11/xorg.conf
# template: src=xorg.conf.j2 dest=/etc/X11/xorg.conf
# become: true
- name: Copy xserver file
......@@ -195,6 +171,8 @@
shell: nvidia-smi --gom=0
become: true
become_user: root
register: nvidiagomcall
changed_when: '"cannot be changed" not in nvidiagomcall.stdout' # only tested on a k80
- name: enable persistenced on boot
service: name=nvidia-persistenced state=started enabled=yes
......@@ -205,7 +183,7 @@
shell: /usr/bin/nvidia-xconfig -a --use-display-device=none --preserve-busid
become: true
become_user: root
args:
args:
creates: /etc/X11/xorg.conf
#- name: Template xorg.conf for nodes with one GPU
......@@ -213,15 +191,7 @@
# become: true
# become_user: root
# when: template_xorgconf is defined and template_xorgcon
- name: install dependencies for nvidia-xconf-gen
apt:
name:
- python-jinja2
- python3-jinja2
update_cache: yes
state: present
become: true
become_user: root
- name: run nvidia-xconf-gen
script: scripts/nvidia-xconf-gen.py
......@@ -230,7 +200,7 @@
changed_when: False
- name: set env for nvidia_card_lists
set_fact:
set_fact:
nvidiacardslist: "{{ nvidiacards.stdout | from_json }}"
- name: generate nvidia-xorg-conf
......
......@@ -2,20 +2,21 @@
- include_vars: "{{ ansible_os_family }}.yml"
- name: install lua centos
yum: name={{ item }} state=installed update_cache=yes
with_items:
- lua
- lua-filesystem
- lua-posix
- tcl
- rsync
- gcc
- lua-devel
package:
state: present
name:
- lua
- lua-filesystem
- lua-posix
- tcl
- rsync
- gcc
- lua-devel
become: true
when: ansible_os_family == 'RedHat'
- name: install lua RHEL7
yum: name={{ item }} state=installed update_cache=yes enablerepo="Monash_University_EPEL7_EPEL_7_-_x86_64"
yum: name={{ item }} state=present update_cache=yes enablerepo="Monash_University_EPEL7_EPEL_7_-_x86_64"
with_items:
- lua
- lua-filesystem
......@@ -30,7 +31,9 @@
become: true
- name: install lua debian
apt: name=lmod state=installed
package:
name: lmod
state: present
become: true
when: ansible_os_family == 'Debian'
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment