Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • hpc-team/HPCasCode
  • chines/ansible_cluster_in_a_box
2 results
Show changes
Showing
with 710 additions and 27 deletions
---
- name: make sure environment modules are installed
package:
name: environment-modules
state: present
become: true
when: default_modules == "modulecmd"
- name: template lmod bash
template: src=lmod_{{ ansible_os_family }}.sh.j2 dest=/etc/profile.d/lmod.sh
become: true
become_user: root
when: default_modules == "lmod"
- name: template lmod csh
template: src=lmod_{{ ansible_os_family }}.csh.j2 dest=/etc/profile.d/lmod.csh
become: true
become_user: root
when: default_modules == "lmod"
- name: remove modulecmd bash
file: path=/etc/profile.d/zz_modulecmd.sh state=absent
become: true
become_user: root
when: default_modules == "lmod"
- name: remove modulcmd csh
file: path=/etc/profile.d/zz_modulecmd.csh state=absent
become: true
become_user: root
when: default_modules == "lmod"
# vars:
# MODULESHOMEvar: '/usr/share/modules'
#note, alias profile here is over written
- name: template modulecmd bash
template: src=modulecmd.sh.j2 dest=/etc/profile.d/zz_runlast_modulecmd.sh
become: true
become_user: root
when: default_modules == "modulecmd"
#simon: this is to redefine bash `module` function as it is overwritten in module.sh
#- name: template patchmodulecmd bash
# template: src=patchmodulecmd.sh.j2 dest=/etc/profile.d/patchmodulecmd.sh
# become: true
# become_user: root
# when: default_modules == "modulecmd"
- name: delete anachronistic file simon to remove later
file:
path: /etc/profile.d/patchmodulecmd.sh.j2
state: absent
become: true
become_user: root
ignore_errors: true
- name: template modulecmd csh
template: src=modulecmd.csh.j2 dest=/etc/profile.d/zz_runlast_modulecmd.csh
become: true
become_user: root
when: default_modules == "modulecmd"
- name: remove lmod bash
file: path=/etc/profile.d/lmod.sh state=absent
become: true
become_user: root
when: default_modules == "modulecmd"
- name: remove modulcmd csh
file: path=/etc/profile.d/lmod.csh state=absent
become: true
become_user: root
when: default_modules == "modulecmd"
- name: Create a symbolic link
file:
src: /usr/share/modules
dest: /usr/share/Modules
owner: root
group: root
state: link
mode: u=rwx,g=rx,o=rx
become: true
when: ansible_os_family == 'Debian' and default_modules == 'modulecmd'
- name: load modulecmd in /etc/bash.bashrc this is to get module command to work from terminal on the desktop
lineinfile:
path: /etc/bash.bashrc
line: 'if [ -f /etc/profile.d/modulecmd.sh ]; then source /etc/profile.d/modulecmd.sh; fi'
become: true
when: ansible_os_family=="Debian" and default_modules == 'modulecmd'
#!/bin/csh
# -*- shell-script -*-
########################################################################
# This is the system wide source file for setting up
# modules:
#
########################################################################
set MY_NAME="{{ lmoddir }}/lmod/lmod/init/cshrc"
if ( ! $?MODULEPATH_ROOT ) then
if ( $?USER) then
setenv USER $LOGNAME
endif
set UNAME = `uname`
setenv LMOD_sys $UNAME
setenv LMOD_arch `uname -m`
if ( "x$UNAME" == xAIX ) then
setenv LMOD_arch rs6k
endif
setenv TARG_TITLE_BAR_PAREN " "
setenv LMOD_FULL_SETTARG_SUPPORT no
setenv LMOD_SETTARG_CMD :
setenv LMOD_COLORIZE yes
setenv LMOD_PREPEND_BLOCK normal
setenv MODULEPATH_ROOT "{{ lmoddir }}/modulefiles"
setenv MODULEPATH `{{ lmoddir }}/lmod/lmod/libexec/addto --append MODULEPATH $MODULEPATH_ROOT/$LMOD_sys $MODULEPATH_ROOT/Core`
setenv MODULEPATH `{{ lmoddir }}/lmod/lmod/libexec/addto --append MODULEPATH {{ lmoddir }}/lmod/lmod/modulefiles/Core`
setenv MODULEPATH "/usr/local/Modules/modulefiles"
setenv MODULESHOME "{{ lmoddir }}/lmod/lmod"
setenv BASH_ENV "$MODULESHOME/init/bash"
#
# If MANPATH is empty, Lmod is adding a trailing ":" so that
# the system MANPATH will be found
if ( ! $?MANPATH ) then
setenv MANPATH :
endif
setenv MANPATH `{{ lmoddir }}/lmod/lmod/libexec/addto MANPATH {{ lmoddir }}/lmod/lmod/share/man`
endif
if ( -f {{ lmoddir }}/lmod/lmod/init/csh ) then
source {{ lmoddir }}/lmod/lmod/init/csh
endif
lmod.csh.j2
\ No newline at end of file
#!/bin/sh
# -*- shell-script -*-
########################################################################
# This is the system wide source file for setting up
# modules:
#
########################################################################
if [ -z "${USER_IS_ROOT:-}" ]; then
if [ -z "${MODULEPATH_ROOT:-}" ]; then
export USER=${USER-${LOGNAME}} # make sure $USER is set
export LMOD_sys=`uname`
LMOD_arch=`uname -m`
if [ "x$LMOD_sys" = xAIX ]; then
LMOD_arch=rs6k
fi
export LMOD_arch
export MODULEPATH_ROOT="/usr/modulefiles:/usr/local/Modulefiles"
export LMOD_SETTARG_CMD=":"
export LMOD_FULL_SETTARG_SUPPORT=no
export LMOD_COLORIZE=yes
export LMOD_PREPEND_BLOCK=normal
MODULEPATH=`sed -n 's/[ #].*$//; /./H; $ { x; s/^\n//; s/\n/:/g; p; }' /etc/lmod/modulespath`
export MODULEPATH=/usr/local/Modules/modulefiles/:$MODULEPATH
export MODULESHOME=/usr/share/lmod/lmod
export BASH_ENV=$MODULESHOME/init/bash
#
# If MANPATH is empty, Lmod is adding a trailing ":" so that
# the system MANPATH will be found
if [ -z "${MANPATH:-}" ]; then
export MANPATH=:
fi
export MANPATH=$(/usr/share/lmod/lmod/libexec/addto MANPATH /usr/share/lmod/lmod/share/man)
fi
PS_CMD=/bin/ps
if [ ! -x $PS_CMD ]; then
if [ -x /bin/ps ]; then
PS_CMD=/bin/ps
elif [ -x /usr/bin/ps ]; then
PS_CMD=/usr/bin/ps
fi
fi
EXPR_CMD=/usr/bin/expr
if [ ! -x $EXPR_CMD ]; then
if [ -x /usr/bin/expr ]; then
EXPR_CMD=/usr/bin/expr
elif [ -x /bin/expr ]; then
EXPR_CMD=/bin/expr
fi
fi
BASENAME_CMD=/usr/bin/basename
if [ ! -x $BASENAME_CMD ]; then
if [ -x /bin/basename ]; then
BASENAME_CMD=/bin/basename
elif [ -x /usr/bin/basename ]; then
BASENAME_CMD=/usr/bin/basename
fi
fi
my_shell=$($PS_CMD -p $$ -ocomm=)
my_shell=$($EXPR_CMD "$my_shell" : '-*\(.*\)')
my_shell=$($BASENAME_CMD $my_shell)
if [ -f /usr/share/lmod/lmod/init/$my_shell ]; then
. /usr/share/lmod/lmod/init/$my_shell >/dev/null # Module Support
else
. /usr/share/lmod/lmod/init/sh >/dev/null # Module Support
fi
unset my_shell PS_CMD EXPR_CMD BASENAME_CMD
fi
# Local Variables:
# mode: shell-script
# indent-tabs-mode: nil
# End:
lmod.csh.j2
\ No newline at end of file
#!/bin/bash
# -*- shell-script -*-
LMOD_PKG={{ lmoddir}}/lmod/lmod
LMOD_DIR={{ lmoddir }}/lmod/lmod/libexec
LMOD_CMD={{ lmoddir }}/lmod/lmod/libexec/lmod
MODULESHOME={{ lmoddir }}/lmod/lmod
MODULEPATH=/usr/local/Modules/modulefiles
export LMOD_PKG
export LMOD_CMD
export LMOD_DIR
export MODULESHOME
########################################################################
# Define the module command: The first line runs the "lmod" command
# to generate text:
# export PATH="..."
# then the "eval" converts the text into changes in the current shell.
#
# The second command is the settarg command. Normally LMOD_SETTARG_CMD
# is undefined or is ":". Either way the eval does nothing. When the
# settarg module is loaded, it defines LMOD_SETTARG_CMD. The settarg
# command knows how to read the ModuleTable that Lmod maintains and
# generates a series of env. vars that describe the current state of
# loaded modules. So if one is on a x86_64 linux computer with gcc/4.7.2
# and openmpi/1.6.3 loaded, then settarg will assign:
#
# TARG=_x86_64_gcc-4.7.2_openmpi-1.6.3
# TARG_COMPILER=gcc-4.7.2
# TARG_COMPILER_FAMILY=gcc
# TARG_MACH=x86_64
# TARG_MPI=openmpi-1.6.3
# TARG_MPI_FAMILY=openmpi
# TARG_SUMMARY=x86_64_gcc-4.7.2_openmpi-1.6.3
# TARG_TITLE_BAR=gcc-4.7.2 O-1.6.3
# TARG_TITLE_BAR_PAREN=(gcc-4.7.2 O-1.6.3)
#
# unloading openmpi/1.6.3 automatically changes these vars to be:
#
# TARG=_x86_64_gcc-4.6.3
# TARG_COMPILER=gcc-4.6.3
# TARG_COMPILER_FAMILY=gcc
# TARG_MACH=x86_64
# TARG_SUMMARY=x86_64_gcc-4.6.3
# TARG_TITLE_BAR=gcc-4.6.3
# TARG_TITLE_BAR_PAREN=(gcc-4.6.3)
#
# See Lmod web site for more details.
module()
{
eval $($LMOD_CMD bash "$@")
[ $? = 0 ] && eval $(${LMOD_SETTARG_CMD:-:} -s sh)
}
if [ "${LMOD_SETTARG_CMD:-:}" != ":" ]; then
settarg () {
eval $(${LMOD_SETTARG_CMD:-:} -s sh "$@" )
}
fi
########################################################################
# ml is a shorthand tool for people who can't type moduel, err, module
# It is also a combination command:
# ml -> module list
# ml gcc -> module load gcc
# ml -gcc intel -> module unload gcc; module load intel
# It does much more do: "ml --help" for more information.
unalias ml > /dev/null 2>&1
ml()
{
eval $($LMOD_DIR/ml_cmd "$@")
}
export_module=$(echo "YES" | tr '[:upper:]' '[:lower:]')
if [ -n "$BASH_VERSION" -a "$export_module" != no ]; then
export -f module
export -f ml
fi
unset export_module
########################################################################
# clearMT removes the ModuleTable from your environment. It is rarely
# needed but it useful sometimes.
clearMT()
{
eval $($LMOD_DIR/clearMT_cmd bash)
}
########################################################################
# The following make the action of the settarg available to the titlebar
# for both xterm's and screen but only for interactive shells.
if [ "$PS1" ]; then
if [ -n "$LMOD_FULL_SETTARG_SUPPORT" -a "$LMOD_FULL_SETTARG_SUPPORT" != no ]; then
xSetTitleLmod()
{
builtin echo -n -e "\033]2;$1\007";
}
SET_TITLE_BAR=:
case $TERM in
xterm*)
SET_TITLE_BAR=xSetTitleLmod
;;
esac
SHOST=${SHOST-${HOSTNAME%%.*}}
precmd()
{
eval $(${LMOD_SETTARG_CMD:-:} -s bash)
${SET_TITLE_BAR:-:} "${TARG_TITLE_BAR_PAREN}${USER}@${SHOST}:${PWD/#$HOME/~}"
${USER_PROMPT_CMD:-:}
}
# define the PROMPT_COMMAND to be precmd iff it isn't defined already.
: ${PROMPT_COMMAND:=precmd}
fi
fi
########################################################################
# Make tab completions available to bash users.
if [ ${BASH_VERSINFO:-0} -ge 3 ] && [ -r {{ lmoddir }}/lmod/lmod/init/lmod_bash_completions ] && [ -n "$PS1" ]; then
. {{ lmoddir }}/lmod/lmod/init/lmod_bash_completions
fi
if ($?tcsh) then
set modules_shell="tcsh"
else
set modules_shell="csh"
endif
set exec_prefix='/usr/bin'
set prefix=""
set postfix=""
if ( $?histchars ) then
set histchar = `echo $histchars | cut -c1`
set _histchars = $histchars
set prefix = 'unset histchars;'
set postfix = 'set histchars = $_histchars;'
else
set histchar = \!
endif
if ($?prompt) then
set prefix = "$prefix"'set _prompt="$prompt";set prompt="";'
set postfix = "$postfix"'set prompt="$_prompt";unset _prompt;'
endif
if ($?noglob) then
set prefix = "$prefix""set noglob;"
set postfix = "$postfix""unset noglob;"
endif
set postfix = "set _exit="'$status'"; $postfix; /usr/bin/test 0 = "'$_exit;'
alias module $prefix'eval `'$exec_prefix'/modulecmd '$modules_shell' '$histchar'*`; '$postfix
unset exec_prefix
unset prefix
unset postfix
setenv MODULESHOME /usr/share/Modules
if (! $?MODULEPATH ) then
setenv MODULEPATH `sed -n 's/[ #].*$//; /./H; $ { x; s/^\n//; s/\n/:/g; p; }' ${MODULESHOME}/init/.modulespath`:/usr/local/Modules/modulefiles
endif
if (! $?LOADEDMODULES ) then
setenv LOADEDMODULES ""
endif
module() { eval `/usr/bin/modulecmd bash $*`; /usr/local/bin/modulelog $*;}
export -f module
MODULESHOME=/usr/share/Modules
export MODULESHOME
if [ "${LOADEDMODULES:-}" = "" ]; then
LOADEDMODULES=
export LOADEDMODULES
fi
if [ "${MODULEPATH:-}" = "" ]; then
MODULEPATH=`sed -n 's/[ #].*$//; /./H; $ { x; s/^\n//; s/\n/:/g; p; }' ${MODULESHOME}/init/.modulespath`:/usr/local/Modules/modulefiles
export MODULEPATH
fi
if [ ${BASH_VERSINFO:-0} -ge 3 ] && [ -r ${MODULESHOME}/init/bash_completion ]; then
. ${MODULESHOME}/init/bash_completion
fi
module() { eval `/usr/bin/modulecmd bash $*`; /usr/local/bin/modulelog $*;}
export -f module
---
source_dir: /tmp
soft_dir: /usr/local
lmoddir: /opt/lmod
modulecmddir: /usr/share
lmod_version: 5.8.6
- name: add key to root ssh
template: dest=/root/.ssh/authorized_keys mode=600 owner=root group=root src=authorized_keys.j2
sudo: true
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvjn5cQuMkqTo04ZnkuDXfUBeAt7oZ6xrT4phfMemqx12dDqLyFrMgUWOoVMFj+TNyR5M8WOCI6CRT6EXOMtqaxhPtWB1QlDNo0Ml8xTzSKckUO0EhdqNKh+nlQfVeaVIx0DZZeWWNpPCrKPCM4TSAXXiwtZuImd6/Zo4RI1x+oTcFR9zQulUGUuX8rf7+4c/oKr58B+La8bXP8QujtfLm29pl1kawSouCfdxt93wRfbISM7mGs/WqzttRXL9m5AeOMuo5S4Ia0GPMcIEUfsQhEyEU7tiTpEq5lDdf6H7a9SlHXzhd9f2Dn3mlv3mmQHaGBJvUuWmVwydxkdtCRQhOQ== root@m2-m
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2xrAkFRdYBpYs14AYSzdPFcIOt2zKXIgjPpyj/6eg/yl3y8N84T9VNw9ATRzb3+PJEw1lOfah6xLkFl7FueT6359y14c7wkNByGHgcL022SludkhM2zBe/3ebhcBs11L4Z725rqVnGDSKdKuwZjbCmUtu/nHwGYU/BnLKbQXMVyq53L5cbIyWGfvItPnwCF2ZMy1v0lmnFs1O3qDK9U/qcwc/77MTB0Z/ey0zsoXvmxjkdYr+zgQLRNm2+fkCXn+ZorbeDwWjhHE21arhMym5x3VG0XU2Ob9nL1Z2xEGQVSnBVWeadTMNzkfM8U07Md2tSOIC5B3ePETxk97puxbEQ== root@m2-m
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPijQ597uLqEPAvVZXQlSjrUfFl2h7SRBTCRhH4hQJMVu55dhFYiojJZ0tjjV3jTcgWs1AsyRp3wDtNp8iQxbwEY2JPxCOjNuH0et4I/y3y6VUjcVWanSaIkdPf5AFNb9KIXo3Hvdyvav8SfFpioRQ0FKp8SZs1JYXpuQ0mZY26oKCKcNsWXv9ZN7knUN0xvYNMycpCnI2Nl666Zrs0gGyJ6e+Xq5bpk1lm8nuK9q52bTRjxqtdEBuSGwkZea+NBJzpYw5rEucteQI66y6tzFuYJk2WC4bUifffIxnkQXKYVynJg1MJ2CGI69r9hXt9eUtH3WrDxrJGmCau8jD3lib hines@sparge
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAnakq6Lgq2n6yjcMaC7xQXMDMRdN33T6mPCqRy+TPdu0aPvVty0UFeAWsCyTxHeVfst9Vr0HwRRBvNihp1CJuOWGbk0H5a8yALDhLqoHazv2jlMQcLDgTktw0Jgo38+tcBShJyey1iHh8X5WgsS5/hgxR3OzoNBEzqzHUidMO/EI0ahNlM60l8EYL8Ww799NmPgqdPbwxK9nHsoFmx/NKhnUdronSg33L0CJZT3t2fccXAq+4Pbm7uYEkL3T/NgMdgpG5mKS3mKDtKyyKm2gOf3fVzExFew2etBxB3ANPEWvSuJ2XwXQv8sFE1722XQVR4RFgilCWUqXSN7EmqoHkNQ== jupiter@cvlproject
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAsBtPRJtDJzyW+Utu0v03wklUpvzS5c1E34ysGDMepGU8VT1phJQ2EwRPWVLdRjVHnuhrEeeUHMyQwOtLEdvTPFnw5u/4bHQ+37iwtAeTV6oyPARJVzJLRGuDUuFdkQbXN7xxi/0KUljWgswLN34UV+p5PL79kQlErh1QCN06z5k=
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2xrAkFRdYBpYs14AYSzdPFcIOt2zKXIgjPpyj/6eg/yl3y8N84T9VNw9ATRzb3+PJEw1lOfah6xLkFl7FueT6359y14c7wkNByGHgcL022SludkhM2zBe/3ebhcBs11L4Z725rqVnGDSKdKuwZjbCmUtu/nHwGYU/BnLKbQXMVyq53L5cbIyWGfvItPnwCF2ZMy1v0lmnFs1O3qDK9U/qcwc/77MTB0Z/ey0zsoXvmxjkdYr+zgQLRNm2+fkCXn+ZorbeDwWjhHE21arhMym5x3VG0XU2Ob9nL1Z2xEGQVSnBVWeadTMNzkfM8U07Md2tSOIC5B3ePETxk97puxbEQ== root@m2-m
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEApJTDPfappcYbNE3Z0+5Vsm4Sw2xD3PdcW+V1w6X6tpebG/bpUhnn9XsALkZYyKttql2vV3bqL6Fx5ZAFhHRhH0exdQEgc8hSvpX5gCCCUNqrL+mP8f4S59E0ha5+nBmMaf4WABHiZYfeoGhn7HHNQY0Up/qfzDPSvWo+ZaVQAqXcYLGTxaP70yywHOYABakJtBVKKkI1YPu83HFDVfw1PoYVaS5GAmEscq6nwoyC0Jm/pDirUtMoRibG2iiV6uYKQDvWrO9fBrGmavpmUT/ECtmcnrWj7V9zXzSi17HJhkq6gYc68iu6h8TBNJrIUE9Kgi07aWFRM9fbIM1ZVD/aEQ== ec2-user@cvl23server
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpuXUhr1Vzl1WljuFYSFIArU8wtlKWpkVLF4hCUb4dVWNEPU/FM1gkg4hPH+rTNPManRAJ8vxiCtMgLtvae2j1elO0edkM6BZReVDFMYo0fZVBbVR8fzvXRWD5ArXJeNI2uZ4fYTil3SoC3N0n+ySjqFriIkcBpxthKVcoOlK+yccOvCPGNWgqcSGFfMEKTR8P18ED83i7sOF2nzpH0RBo2/N7ry5Gzvfw859W7KScw/3uI7fzog6hW/P4niOQIZfG56enHHos0l7oymxeQRiYITqvf9Es1VatEfybk+tJhTVf1LcIqoM9v9bc0yd6QqST0+6ZiTJXCQCthmS0JVX1 hines@tun
---
- name: set mode on /etc/sudoers.d
file: path=/etc/sudoers.d state=directory mode=755
become: true
become_user: root
- name: template sudoers file
template: src=10-admin_group.j2 dest=/etc/sudoers.d/10-admin_group mode=440
become: true
become_user: root
%{{ sudo_group }} ALL=(ALL) ALL
{% if nopasswd_user is defined %}
{{ nopasswd_user }} ALL=(ALL) NOPASSWD:ALL
{% endif %}
Install enroot on a ubuntu machine
See https://github.com/NVIDIA/enroot
Centos 7 requires a Kernel setting mod so is not supported at this time.
enroot is not meant for installation across a cluster. Config files live only on
installed machine, and container files need to be owned by the user using them.
Usage
- { roles: enroot, tags: [ enroot ] }
Be aware, large files are created. You can/should set the following environment
variables to a suitable file system.
export ENROOT_RUNTIME_PATH=/mnt/enroot/runtime
export ENROOT_CONFIG_PATH=/mnt/enroot/config
export ENROOT_CACHE_PATH=/mnt/enroot/cache
export ENROOT_DATA_PATH=/mnt/enroot/data
export ENROOT_TEMP_PATH=/mnt/enroot/tmp
---
#see https://nvidia.github.io/libnvidia-container/
#DIST=$(. /etc/os-release; echo $ID$VERSION_ID)
#curl -s -L https://nvidia.github.io/libnvidia-container/gpgkey | \
# sudo apt-key add -
#curl -s -L https://nvidia.github.io/libnvidia-container/$DIST/libnvidia-container.list | \
# sudo tee /etc/apt/sources.list.d/libnvidia-container.list
#sudo apt-get update
#
- name: configure Nivida repos
ansible.builtin.apt_key:
url: https://nvidia.github.io/libnvidia-container/gpgkey
state: present
become: True
become_user: root
when: ansible_os_family == "Debian"
- name: get DIST variable for repolist
shell: . /etc/os-release; echo $ID$VERSION_ID
register: DIST
when: ansible_os_family == "Debian"
- name: Print out value of DIST
debug: msg={{ DIST.stdout }}
when: ansible_os_family == "Debian"
#- name: Add specified repository into sources list
# ansible.builtin.apt_repository:
# #repo: "deb https://nvidia.github.io/libnvidia-container/{{ DIST.stdout }}/libnvidia-container.list"
# repo: "deb https://nvidia.github.io/libnvidia-container/ubuntu20.04 libnvidia-container"
# state: present
- name: Add specified repository into sources list
shell: "curl -s -L https://nvidia.github.io/libnvidia-container/{{ DIST.stdout }}/libnvidia-container.list | sudo tee /etc/apt/sources.list.d/libnvidia-container.list"
become: True
become_user: root
when: ansible_os_family == "Debian"
- name: Update apt-get repo and cache
apt: update_cache=yes force_apt_get=yes cache_valid_time=3600
become: True
become_user: root
when: ansible_os_family == "Debian"
- name: install Nivida container tools
package:
state: present
name:
- libnvidia-container1
- libnvidia-container-tools
become: true
become_user: root
when: ansible_os_family == "Debian"
- name: get architecture
shell: dpkg --print-architecture
register: ARCH
when: ansible_os_family == "Debian"
- name: Print out value of ARCH
debug: msg={{ ARCH.stdout }}
when: ansible_os_family == "Debian"
- name: install hardened enroot deb 1 of 2
apt:
deb: "https://github.com/NVIDIA/enroot/releases/download/v3.3.0/enroot-hardened_3.3.0-1_{{ ARCH.stdout }}.deb"
become: true
become_user: root
when: ansible_os_family == "Debian"
- name: install hardened enroot deb 2 of 2
apt:
deb: "https://github.com/NVIDIA/enroot/releases/download/v3.3.0/enroot-hardened+caps_3.3.0-1_{{ ARCH.stdout }}.deb"
become: true
become_user: root
when: ansible_os_family == "Debian"
- name: install hosts file
copy: src=files/etcHosts dest=/etc/hosts owner=root mode=644
sudo: true
- name: see if cloud.cfg exists
stat:
path: /etc/cloud/cloud.cfg
register: cloudcfg
- name: set hostname by sysctl
shell: sysctl kernel.hostname="{{ ansible_hostname }}"
sudo: true
- name: set preserve hostname on CentOSVM
lineinfile:
args:
dest: /etc/cloud/cloud.cfg
line: "preserve_hostname: true"
state: present
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7" and cloudcfg.stat.islnk is defined
- name: set domainname by sysctl
shell: sysctl kernel.domainname="{{ domain }}"
sudo: true
- name: remove preserve_hostname_false on CentOS
lineinfile:
args:
dest: /etc/cloud/cloud.cfg
line: "preserve_hostname: false"
state: absent
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: get hostname by sysctl
shell: /sbin/sysctl kernel.hostname | /usr/bin/cut -f 3 -d " "
register: sysctl_hostname
check_mode: no
changed_when: False
- name: set hostname by sysctl
shell: /sbin/sysctl kernel.hostname="{{ inventory_hostname }}"
become: true
when: not sysctl_hostname.stdout == inventory_hostname
- name: set /etc/sysconfig/network on CentOS 6
lineinfile: dest=/etc/sysconfig/network line='HOSTNAME={{ ansible_hostname }}' regexp='^HOSTNAME'
sudo: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "6"
lineinfile: dest=/etc/sysconfig/network line='HOSTNAME={{ inventory_hostname }}' regexp='^HOSTNAME'
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "6"
- name: set /etc/sysctl.conf on Debian 8
lineinfile: dest=/etc/sysctl.conf line='kernel.domainname = {{ domain }}' regexp='^#kernel.domainname'
sudo: true
become: true
when: ansible_distribution == "Debian" and ansible_distribution_major_version == "8"
- name: set /etc/hostname
template: src=hostname dest=/etc/hostname
become: true
become_user: root
- name: install hosts file
copy: src=files/etcHosts dest=/etc/hosts owner=root mode=644
become: true
\ No newline at end of file
{{ inventory_hostname }}
This role adds all the packages we think are useful but aren't clearly a dependency of some function.
Before calling, you may want to define the following lists:
extra_packages #list of yum packages. Or see code on how to it loads an alternative file if not defined
exclude #list of packages to exclude
Usage:
roles:
- { role: extra_packages, tags: [ other, extra_packages ] }
---
- name: load extra_packages from play
include_vars: "vars/extra_packages_{{ ansible_distribution }}_{{ ansible_distribution_major_version }}.yml"
when: extra_packages is not defined
- name: load vars from role
include_vars: "{{ ansible_distribution }}_{{ ansible_distribution_major_version }}.yml"
when: extra_packages is not defined
- name: "Clear yum cache"
command: yum clean all
become: true
become_user: root
when: ansible_os_family == 'RedHat'
changed_when: false
- name: "Clear yum pending transactions"
command: yum-complete-transaction --cleanup-only
become: true
become_user: root
register: yumCompleteTransactioncall
when: ansible_os_family == 'RedHat'
changed_when: '"No unfinished transactions left." not in yumCompleteTransactioncall.stdout'
- name: "Install extra packages Centos"
yum:
name: "{{ extra_packages }}"
exclude: "{{ excludes|join(',') }}"
update_cache: yes
state: present
become: true
become_user: root
when:
- '"CentOS" in ansible_distribution'
register: result
- name: "Install extra packages Redhat"
yum:
name: "{{ extra_packages }}"
exclude: "{{ excludes|join(',') }}"
update_cache: yes
state: present
enablerepo: "org_monash_uni_EPEL_7_EPEL_7_-_x86_64"
become: true
become_user: root
when:
- '"RedHat" in ansible_distribution'
- '"DGX" in ansible_product_name'
register: result
- name: "Install extra packages from epel only"
yum:
name: "{{ extra_packages_epel }}"
update_cache: yes
state: present
enablerepo: epel
become: true
become_user: root
when:
- '"CentOS" in ansible_distribution'
- name: "Install extra packages"
apt: "name={{ extra_packages }} update_cache=yes state=present"
become: true
become_user: root
when: ansible_os_family == 'Debian'
- name: "Install admin packages packages centos"
yum:
name:
- git
- centos-release-ansible-29.noarch
- centos-release-configmanagement
update_cache: yes
state: latest
enablerepo:
- extras
become: true
become_user: root
when:
- '"CentOS" in ansible_distribution'
- name: "Install admin packages packages centos"
yum:
name:
- ansible
update_cache: yes
state: latest
enablerepo:
- centos-ansible-29
become: true
become_user: root
when:
- '"CentOS" in ansible_distribution'
- name: "Install admin packages packages ubuntu"
package:
name:
- git
- ansible
state: latest
become: true
become_user: root
when:
- '"Ubuntu" in ansible_distribution'