Skip to content
Snippets Groups Projects
Commit 859420a8 authored by Simon Michnowicz's avatar Simon Michnowicz
Browse files
parents aca531bb 9a2346f8
No related branches found
No related tags found
1 merge request!6Master
Showing
with 186 additions and 84 deletions
......@@ -3,7 +3,7 @@
-
name: "Installing Apache"
sudo: true
yum: name={{ item }} state=latest
yum: name={{ item }} state=present
with_items:
- mod_ssl
- mod_wsgi
......
---
- name: "Install extra packages"
yum: "name={{ item }} state=latest"
yum: "name={{ item }} state=present"
with_items:
pkgs
sudo: true
......
......@@ -11,3 +11,10 @@
command: yum makecache
sudo: true
when: ansible_os_family == 'RedHat'
# For some reason ed went missing from the NeCTAR official CentOS 7 image
# This meant that fail2ban could ban you, but could never unban you
- name: "make sure ed is installed"
yum: name=ed state=installed
sudo: true
when: ansible_os_family == 'RedHat'
......@@ -2,21 +2,21 @@
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/$basearch/
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.7/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/noarch
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.7/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=0
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.6/LATEST/EPEL.repo/epel-$releasever/SRPMS
baseurl=http://download.gluster.org/pub/gluster/glusterfs/3.7/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
......@@ -14,7 +14,7 @@
when: ansible_distribution == 'CentOS' and importRepo is defined
- name: Install yum packages
yum: name={{ item }} state=latest
yum: name={{ item }} state=present
with_items: yumPackageList
sudo: true
when: ansible_distribution == 'CentOS' and yumPackageList is defined
......
......@@ -2,7 +2,7 @@
- include_vars: "{{ ansible_distribution }}_{{ ansible_distribution_version }}_{{ ansible_architecture }}.yml"
- name: install system packages apt
apt: name={{ item }} state=installed update_cache=true
apt: name={{ item }} state=installed
sudo: true
with_items: system_packages
when: ansible_os_family == 'Debian'
......
......@@ -26,7 +26,7 @@
-
name: "Installing prerequisites Redhat"
yum: name={{ item }} state=latest
yum: name={{ item }} state=present
sudo: true
with_items:
- libxml2-devel
......
---
- name: "Install open ldap package yum"
yum: name={{ item }} state=latest
yum: name={{ item }} state=present
with_items:
- openldap
- openldap-clients
......@@ -13,7 +13,7 @@
when: ansible_os_family == 'RedHat'
- name: "Install open ldap package apt"
action: apt pkg={{ item }} state=installed
action: apt pkg={{ item }} state=present
with_items:
- ldap-utils
- sssd
......
......@@ -3,6 +3,10 @@
stat: path={{ dest }}
register: stat_r
- name: debug1
debug: var=stat_r
- name: mv
command: mv "{{ dest }}" "{{ dest }}_old"
when: stat_r.stat.exists and stat_r.stat.isdir
......@@ -12,6 +16,8 @@
stat: path={{ dest }}
register: stat_r
- name: debug2
debug: var=stat_r
- name: link
file: src="{{ src }}" dest="{{ dest }}" state=link
......
......@@ -14,30 +14,32 @@
register: drivers_installed
ignore_errors: true
- name: yum update to upgrade kernel
shell: "yum update -y"
sudo: true
ignore_errors: true
when: ansible_os_family == "RedHat" and drivers_installed|failed
# This is NASTY. Don't upgrade production systems without taking them out of the queue first.
#- name: yum update to upgrade kernel
# shell: "yum update -y"
# sudo: true
# ignore_errors: true
# when: ansible_os_family == "RedHat" and drivers_installed|failed
#
# A REBOOT IS NEEDED AFTER a KERNEL UPDATE
#
- name: restart machine
shell: sleep 5; sudo shutdown -r now "Ansible updates triggered"
async: 2
poll: 0
ignore_errors: true
sudo: true
when: ansible_os_family == "RedHat" and drivers_installed|failed
- name: waiting for server to come back
local_action: wait_for host={{ ansible_ssh_host }} state=started port=22 delay=10 search_regex=OpenSSH
sudo: false
- name: waiting for server to come back number 2
local_action: wait_for host={{ ansible_ssh_host }} state=started port=22 delay=10 search_regex=OpenSSH
sudo: false
#- name: restart machine
# shell: sleep 5; sudo shutdown -r now "Ansible updates triggered"
# async: 2
# poll: 0
# ignore_errors: true
# sudo: true
# when: ansible_os_family == "RedHat" and drivers_installed|failed
#
#- name: waiting for server to come back
# local_action: wait_for host={{ ansible_ssh_host }} state=started port=22 delay=10 search_regex=OpenSSH
# sudo: false
#
#- name: waiting for server to come back number 2
# local_action: wait_for host={{ ansible_ssh_host }} state=started port=22 delay=10 search_regex=OpenSSH
# sudo: false
- name: copy driver source
......@@ -89,6 +91,7 @@
insertafter: "auto {{ MELLANOX_DEVICE_NAME }}"
sudo: true
when: ansible_os_family=="Debian" and drivers_installed|failed
- name: Ubuntu network interfaces - line 3
lineinfile:
args:
......@@ -107,7 +110,7 @@
poll: 0
ignore_errors: true
sudo: true
when: ansible_os_family=="Centos" and drivers_installed|failed
when: ansible_os_family=="RedHat" and drivers_installed|failed
- name: restart machine for Ubuntu -cos it is 'special'
shell: "sleep 5; sudo shutdown -r now"
......@@ -115,19 +118,21 @@
poll: 1
ignore_errors: true
sudo: true
when: ansible_os_family=="Debian"
when: ansible_os_family=="Debian" and drivers_installed|failed
- name: waiting for server to come back
local_action: wait_for host={{ ansible_ssh_host }} state=started port=22 delay=10 search_regex=OpenSSH
sudo: false
when: drivers_installed|failed
- name: waiting for server to come back 2
local_action: wait_for host={{ ansible_ssh_host }} state=started port=22 delay=10 search_regex=OpenSSH
when: drivers_installed|failed
- name: bring up interface
#variable=eth0 or ens6
command: ifup {{ MELLANOX_DEVICE_NAME }}
sudo: true
when: ansible_distribution_major_version == "7"
when: ansible_os_family=="RedHat" and ansible_distribution_major_version == "7"
......@@ -13,43 +13,12 @@
lineinfile:
args:
dest: /etc/passwd
regexp: "{{ ansible_ssh_user }}:x:1001:1001::/home/{{ ansible_ssh_user }}:.*"
line: "{{ ansible_ssh_user }}:x:1001:1001::/local_home/{{ ansible_ssh_user }}:/bin/bash"
regexp: '{{ ansible_ssh_user }}:x:(.*):(.*):(.*):/home/{{ ansible_ssh_user }}:(.*)'
line: '{{ ansible_ssh_user }}:x:\1:\2:\3:/local_home/{{ ansible_ssh_user }}:\4'
backrefs: yes
sudo: true
register: edit1
register: edit
- name: edit passwd file
lineinfile:
args:
dest: /etc/passwd
regexp: "{{ ansible_ssh_user }}:x:500:500::/home/{{ ansible_ssh_user }}:.*"
line: "{{ ansible_ssh_user }}:x:500:500::/local_home/{{ ansible_ssh_user }}:/bin/bash"
backrefs: yes
sudo: true
register: edit2
- name: edit passwd file
lineinfile:
args:
dest: /etc/passwd
regexp: "{{ ansible_ssh_user }}:x:1000:1000::/home/{{ ansible_ssh_user }}:.*"
line: "{{ ansible_ssh_user }}:x:1000:1000::/local_home/{{ ansible_ssh_user }}:/bin/bash"
backrefs: yes
sudo: true
register: edit3
# ubuntu:x:1000:1000:Ubuntu:/home/ubuntu:/bin/bash
- name: edit passwd file for ubuntu 14
lineinfile:
args:
dest: /etc/passwd
regexp: "{{ ansible_ssh_user }}:x:1000:1000:Ubuntu:/home/{{ ansible_ssh_user }}:.*"
line: "{{ ansible_ssh_user }}:x:1000:1000:Ubuntu:/local_home/{{ ansible_ssh_user }}:/bin/bash"
backrefs: yes
sudo: true
register: edit4
......@@ -6,7 +6,7 @@
when: ansible_os_family == "Debian"
- name: Installing MySQL RedHat
yum: name="{{ item }}" state=latest
yum: name="{{ item }}" state=present
with_items: client_packages
sudo: true
when: ansible_os_family == "RedHat"
......@@ -4,6 +4,6 @@
with_items:
- nfs-common
- nfs-kernel-server
apt: "name={{ item }} state=latest"
apt: "name={{ item }} state=present"
sudo: true
......@@ -4,5 +4,5 @@
with_items:
- bind-utils
- nfs-utils
yum: "name={{ item }} state=latest"
yum: "name={{ item }} state=present"
sudo: true
---
- name: install nfs kernel server
apt: name=nfs-kernel-server state=latest
apt: name=nfs-kernel-server state=present
sudo: true
when: ansible_os_family == "Debian"
......
---
- name: "update cache centos"
shell: yum update -y
sudo: true
when: ansible_os_family == 'RedHat'
- name: "update cache debian"
shell: apt-get update -y
sudo: true
when: ansible_os_family == 'Debian'
---
- name: make dir
file: path="{{ provision_homedir | dirname }}" state=directory mode=755 owner=root
sudo: true
run_once: true
- name: install python packages
yum: name=python-ldap state=installed
sudo: true
when: ansible_os_family == 'RedHat'
- name: install python packages
apt: name=python-ldap state=installed
sudo: true
when: ansible_os_family == 'Debian'
- name: copy provision_homedir template
template: src=provision_homedir.py.j2 dest={{ provision_homedir }} mode=700 owner=root
sudo: true
run_once: true
# the lockfile for makeing home directories should be located on the shared directory where the home directories will be created. Otherwise it will be racey
- name: provision_homedir cron job
cron: name=provision_homedir job="/usr/bin/flock -x -n /home/provision.lck -c {{ provision_homedir }}" user=root minute=*/30 state=present
sudo: true
#!/usr/bin/python
import ldap
import traceback
import os
import stat
class ldapSearchConfig:
def __init__(self):
self.ldapserver=""
self.binddn=""
self.bindpw=""
self.baseDN=""
self.searchFilter=""
self.cacertfile=''
class genericUser:
def __init__(self):
self.dn=""
self.cn=""
self.entry=""
self.uid=""
def get_users(server):
# ldap.set_option(ldap.OPT_X_TLS_CACERTFILE,server.cacertfile)
ldap.set_option( ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER )
l=ldap.initialize(server.ldapserver)
l.simple_bind_s(server.binddn,server.bindpw)
retrieveAttributes = ["*"]
searchScope=ldap.SCOPE_SUBTREE
try:
ldap_result_id = l.search(server.baseDN,searchScope,server.searchFilter,retrieveAttributes)
except ldap.LDAPError, e:
pass
rtype,rdata = l.result(ldap_result_id,1)
allusers={}
for user in rdata:
dn=user[0]
attrs=user[1]
allusers[dn]=genericUser()
allusers[dn].dn=dn
allusers[dn].entry=attrs
return allusers
def mk_homedir(path,uidNumber,gidNumber):
try:
statinfo = os.stat(path)
except OSError as e:
if 'No such file or directory' in e:
os.mkdir(path,0700)
statinfo = os.stat(path)
if stat.S_ISDIR(statinfo.st_mode):
if statinfo.st_gid != gidNumber or statinfo.st_uid!=uidNumber:
os.chown(path,uidNumber,gidNumber)
else:
raise Exception("users homedirectory is not a directory %s"%path)
s=ldapSearchConfig()
s.ldapserver="{{ ldapURI }}"
s.binddn="{{ ldapBindDN }}"
s.bindpw="{{ ldapBindDNPassword }}"
s.baseDN="{{ ldapBase }}"
s.searchFilter = "{{ search_filter }}"
homeDirEntry= "{{ homeDirEntry }}"
users=get_users(s)
for user in users:
try:
mk_homedir(users[user].entry[homeDirEntry][0],int(users[user].entry['uidNumber'][0]),int(users[user].entry['gidNumber'][0]))
except:
print traceback.format_exc()
pass
---
use_active_directory: False
provision_homedir: /usr/local/sbin/provision_homedir.py
homeDirEntry: "{% if use_active_directory %}unixHomeDirectory{% else %}homeDirectory {% endif %}"
search_filter: "{% if use_active_directory %}(unixHomeDirectory=*){% else %} (objectClass=posixAccount) {% endif %}"
---
- name: make dir
file: path="{{ provision_slurm | dirname }}" state=directory mode=755 owner=root
sudo: true
run_once: true
- name: install python packages
yum: name=python-ldap state=installed
sudo: true
when: ansible_os_family == 'RedHat'
- name: install python packages
apt: name=python-ldap state=installed
sudo: true
when: ansible_os_family == 'Debian'
- name: copy provision_slurm template
template: src=provision_slurm.py.j2 dest={{ provision_slurm }} mode=700 owner=root
sudo: true
run_once: true
# the lockfile for makeing home directories should be located on the shared directory where the home directories will be created. Otherwise it will be racey
- name: provision_slurm cron job
cron: name=provision_slurm job="/usr/bin/flock -x -n /home/provision.lck -c {{ provision_slurm }}" user=root minute=*/30 state=present
sudo: true
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment