Skip to content
Snippets Groups Projects
Commit a6cf708b authored by Andreas Hamacher's avatar Andreas Hamacher
Browse files

Merge branch 'ubuntu' into 'master'

Ubuntu

See merge request hpc-team/ansible_cluster_in_a_box!293

Former-commit-id: bc27708d
parents 6340a752 627b3229
No related branches found
No related tags found
No related merge requests found
Showing
with 360 additions and 81 deletions
......@@ -23,7 +23,7 @@ trigger_pipeline_in_Clusterbuild:
- ansible
script:
- echo ${CI_JOB_TOKEN}
- curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=aciab_upstream https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline # ID is from clusterbuild
- curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline # ID is from clusterbuild
trigger_pipeline_in_monarch:
......@@ -32,7 +32,7 @@ trigger_pipeline_in_monarch:
- ansible
script:
- echo ${CI_JOB_TOKEN}
- curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=cicd https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline # ID is from monarch
- curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline # ID is from monarch
yamllint:
......@@ -134,9 +134,9 @@ tests:
- grep -qv "I could not find any resouces tagged with project_name:" ./files/inventory.$STACKNAME #fail if inventory file is empty
- ansible -m ping -i files/inventory.$STACKNAME --key-file ../gc_key.pem all
- ansible -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sudo ls" all
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "sinfo" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "squeue" ManagementNodes
# Need to find a better check for sinfo
#- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "find /opt/ -name sinfo -type f" ManagementNodes
#- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "find /opt/ -name squeue -type f" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet mariadb" SQLNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmctld" ManagementNodes
- ansible -B 30 -i files/inventory.$STACKNAME --key-file ../gc_key.pem -a "systemctl is-active --quiet slurmdbd" ManagementNodes
......@@ -147,6 +147,7 @@ tests:
- bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh slurm "files/inventory.$STACKNAME" "../gc_key.pem"
extended:
stage: extended
......@@ -159,7 +160,7 @@ extended:
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME
- bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME ${CI_PROJECT_NAME}
only:
variables:
- $EXTENDED != null
......@@ -180,7 +181,7 @@ manual_cluster_spawn:
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script:
- source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME
- bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME ${CI_PROJECT_NAME}
- openstack stack list
- export STACKNAME=$MANUAL_STACKNAME
- sleep 25
......
......@@ -114,7 +114,7 @@ resources:
volume_id: { get_resource: DBVolume }
instance_uuid: { get_resource: SQLNode0 }
MgmtNodes:
MgmtNodesCentos7:
type: "OS::Heat::ResourceGroup"
properties:
count: 2
......@@ -130,7 +130,23 @@ resources:
security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
project_name: { get_param: project_name }
LoginNodes:
MgmtNodesU:
type: "OS::Heat::ResourceGroup"
properties:
count: 0
resource_def:
type: My::Server::MgmtNode
properties:
#avz: { get_param: avz }
image: { get_param: ubuntu_1804_image_id }
ansible_ssh_user: ubuntu
mynodename:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmtU%index%' ]]
ssh_key: { get_param: ssh_key }
security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
project_name: { get_param: project_name }
LoginNodesC:
type: "OS::Heat::ResourceGroup"
properties:
count: 1
......@@ -151,6 +167,27 @@ resources:
networks:
- network: { get_param: NetID }
LoginNodesU:
type: "OS::Heat::ResourceGroup"
properties:
count: 0
resource_def:
type: "OS::Nova::Server"
properties:
#availability_zone: { get_param: avz }
flavor: m3.xsmall
image: { get_param: ubuntu_1804_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'loginU%index%' ]]
security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata:
ansible_host_groups: [ LoginNodes ]
ansible_ssh_user: ubuntu
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
DesktopNodes:
type: "OS::Heat::ResourceGroup"
properties:
......@@ -172,7 +209,28 @@ resources:
networks:
- network: { get_param: NetID }
ComputeNodes:
ComputeNodesU:
type: "OS::Heat::ResourceGroup"
properties:
count: 0
resource_def:
type: "OS::Nova::Server"
properties:
#availability_zone: { get_param: avz }
flavor: m3.xsmall
image: { get_param: ubuntu_1804_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computeU%index%' ]]
security_groups: [ default, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: SSHMonashSecGroupID } ]
metadata:
ansible_host_groups: [ ComputeNodes ]
ansible_ssh_user: ubuntu
project_name: { get_param: project_name }
networks:
- network: { get_param: NetID }
ComputeNodesCentos7:
type: "OS::Heat::ResourceGroup"
properties:
count: 1
......@@ -184,8 +242,8 @@ resources:
image: { get_param: centos_7_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec%index%' ]]
security_groups: [ default, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: SSHMonashSecGroupID } ]
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec7%index%' ]]
security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata:
ansible_host_groups: [ ComputeNodes ]
ansible_ssh_user: ec2-user
......@@ -201,14 +259,14 @@ resources:
type: "OS::Nova::Server"
properties:
#availability_zone: { get_param: avz }
flavor: m3.xsmall
flavor: mon.c10r35.gpu-k2
image: { get_param: ubuntu_1804_image_id }
key_name: { get_param: ssh_key }
name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopu%index%' ]]
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'gpudesktopu%index%' ]]
security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata:
ansible_host_groups: [ DesktopNodes ]
ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, K1, VisNodes ]
ansible_ssh_user: ubuntu
project_name: { get_param: project_name }
networks:
......
......@@ -8,14 +8,8 @@ function usage {
exit 1
}
if [ "$#" -ne 2 ]; then
echo "Illegal number of parameters expecting 2"
usage
fi
STACKNAME=$2
if [[ "$STACKNAME" == "CICD"* ]]; then
echo "CICD found in stackname. doing nothing"
else
......
resource_registry:
My::Server::MgmtNode: mgmtnode_HOT.yaml
My::Server::MgmtNode: ./mgmtnode_HOT.yaml
......@@ -24,7 +24,7 @@
# - { role: disable_selinux, tags: [ disableselinux ] }
- { role: etcHosts, tags: [ networking ] }
- { role: config_repos, tags: [ repos ] }
- { role: upgrade }
- { role: upgrade, tags: [ upgrade ]}
- { role: set_password }
......
......@@ -29,10 +29,7 @@
# - { role: ldapclient, tags: [ authentication ] }
# - { role: ssh-password-login }
# - { role: enable_sudo_group }
# - { role: make_filesystems, volumes: "{{ glustervolumes }}" }
# - { role: gluster_server, volname: "gv", brickmnt: '/gbrick', gluster_servers: "{{ groups['ManagementNodes'] }}", replicas: 2, tags: [ gluster_server ] }
# - { role: gluster_volcreate, volname: "gv", gluster_servers: "{{ groups['ManagementNodes'] }}", brickmnt: '/gbrick', replicas: 2 }
# - { role: gluster_client, volname: "gv", gluster_servers: ['mgmt0','mgmt1','sql0'], volmnt: '/glusterVolume' }
- { role: nfs-client, nfsMounts: "{{ mgmtNfsMounts }}", tags: [ nfs ] }
- { role: slurmdb-config, tags: [ slurm, slurmdb-config ] }
- { role: slurm-common, tags: [ slurm, slurm-common ] }
......
---
- hosts: ManagementNodes
gather_facts: false
tasks:
- name: have ssh running
service:
name: sshd
state: started
- hosts: ComputeNodes
gather_facts: false
tasks:
- name: have munge service running
service:
name: munge
state: started
\ No newline at end of file
#!/bin/bash
function usage {
echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql}" INVENTORY_FILE KEY
echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql, slurm}" INVENTORY_FILE KEY
exit 1
}
......@@ -23,22 +23,4 @@ function run_them ()
done
}
# I think I am just checking the if $1 is one of the listes strings (see usage) not proud of this at all but works
case "$1" in
all)
;;
ComputeNodes)
;;
ManagementNodes)
;;
NFSNodes)
;;
SQLNodes)
;;
LoginNodes)
;;
*)
usage
esac
run_them $1 $2 $3
\ No newline at end of file
---
- hosts: ManagementNodes,LoginNodes,ComputeNodes
gather_facts: false
tasks:
- name: add user hpctest
user:
name: hpctest
shell: /bin/bash
become: true
- hosts: ManagementNodes
gather_facts: false
tasks:
- name: Create a parent account
command: ./sacctmgr -i add account parentAccount cluster=m3 Description="Test parent account" Organization="Monash"
args:
chdir: '/opt/slurm-latest/bin'
become: true
register: result
failed_when: result.rc != 0 and result.stdout != " Nothing new added."
- name: Create a project associated with a given parent
command: ./sacctmgr -i add account testProject parent=parentAccount cluster=m3 Organization="Monash"
args:
chdir: '/opt/slurm-latest/bin'
become: true
register: result
failed_when: result.rc != 0 and result.stdout != " Nothing new added."
- name: Create a user and associate them with a project
command: ./sacctmgr -i create user hpctest cluster=m3 account=testProject partition=batch
args:
chdir: '/opt/slurm-latest/bin'
become: true
register: result
failed_when: result.rc != 0 and result.stdout != " Nothing new added."
#sudo `which sacctmgr` modify user where name=hpctest set maxjobs=200
## 18 sudo `which sacctmgr` update account hpctest set qos=normal
# 22 sudo `which sacctmgr` update account testProject set qos=normal
- hosts: LoginNodes
gather_facts: false
tasks:
- name: make sure munge is running
service:
name: munge
state: started
become: true
- name: simple srun test
command: ./srun --ntasks=1 --partition=batch hostname
args:
chdir: '/opt/slurm-latest/bin'
become: true
become_user: hpctest
......@@ -3,8 +3,8 @@ desktopNodeList:
- { name : 'DesktopNodes', interface : 'eth0' }
clustername: "m3"
projectname: "m3"
slurm_version: 19.05.3-2
munge_version: 0.5.11
slurm_version: 19.05.4
munge_version: 0.5.13
nhc_version: 1.4.2
munge_dir: /opt/munge-{{ munge_version }}
slurm_dir: /opt/slurm-{{ slurm_version }}
......
---
sudo_group: systems
nagios_home: "/var/lib/nagios"
nvidia_version: "390.46"
nvidia_version: "367.134"
yumdisablerepo:
- 'base'
......@@ -16,6 +16,7 @@ yumenablerepo:
gpumap:
'K1': 'K1'
'K2': 'K2'
'K80': 'K80'
'P100-PCIE-16GB': 'P100'
'V100-PCIE-16GB': 'V100'
......@@ -119,8 +119,8 @@ JobCompType=jobcomp/none
Prolog={{ slurmjob.prolog }}
Epilog={{ slurmjob.epilog }}
{% else %}
Prolog={{ slurm_dir }}/bin/slurm.prolog
Epilog={{ slurm_dir }}/bin/slurm.epilog
Prolog=/opt/slurm/etc/slurm.prolog
Epilog=/opt/slurm/etc/slurm.epilog
{% endif %}
#
# ACCOUNTING
......
......@@ -6,8 +6,7 @@
line: "{{ reposerverip }} {{ reposervername }}" #this is duplicated in the role calculateEtcHosts
owner: root
group: root
become: true
become: True
#- name: remove default repos
# file:
......@@ -44,6 +43,7 @@
- name: get enabled repos
#shell: yum repolist | grep -v "repo id" | grep -v "Loaded plugins" | head -n -1 | cut -f 1 -d '/' | sed -s 's/\!//'
shell: yum repolist all | grep enabled | cut -f 1 -d '/' | sed -s 's/\!//'
when: ansible_os_family == 'RedHat'
register: repolist
check_mode: no
changed_when: False
......@@ -55,7 +55,8 @@
with_items: "{{ repolist.stdout_lines|difference(yumenablerepo) }}"
become: true
become_user: root
ignore_errors: false
ignore_errors: true
when: ansible_os_family == 'RedHat'
#- name: Enable epel
......@@ -75,11 +76,6 @@
become: true
when: ansible_distribution_release == 'trusty'
- name: add repos apt
shell: "add-apt-repository -y ppa:gluster/glusterfs-3.7"
become: true
when: ansible_distribution == 'Ubuntu'
- name: apt-get update
apt: update_cache=True
become: true
......
......@@ -25,12 +25,62 @@
- xorg-x11-xauth
- xorg-x11-proto-devel
- xorg-x11-xkb-utils
when: ansible_os_family == 'RedHat'
- name: install deps
apt:
name:
- 'gcc'
- 'perl'
- 'wget'
- 'pciutils'
- 'linux-headers-generic'
- 'xterm'
- 'libx11-dev'
- 'libx11-6'
- 'libglvnd-dev'
- 'xserver-xorg'
- 'vim'
state: present
update_cache: yes
become: true
become_user: root
when: ansible_distribution == 'Ubuntu'
- name: install deps
yum: name={{ item }} state=installed
become: true
with_items:
- gcc
- perl
- wget
- pciutils
- kernel-headers
- kernel-devel
- xterm
- libX11-common
- libX11-devel
- libX11
- libglvnd-devel
- xorg-x11-server-common
- xorg-x11-util-macros
- xorg-x11-server-utils
- xorg-x11-font-utils
- xorg-x11-server-Xorg
- xorg-x11-glamor
- xorg-x11-xinit
- xorg-x11-utils
- xorg-x11-xauth
- xorg-x11-proto-devel
- xorg-x11-xkb-utils
when: ansible_os_family == 'RedHat'
- name: install development tools
yum: name="@Development Tools" state=installed
become: true
become_user: root
ignore_errors: yes
when: ansible_os_family == 'RedHat'
- name: disable nouveau
template: src=blacklist-nouveau.conf.j2 dest=/etc/modprobe.d/blacklist-nouveau.conf
......@@ -99,7 +149,6 @@
become: true
when: install_driver
- name: stop the persistence daemon
service: name=nvidia-persistenced state=stopped
become: true
......@@ -138,7 +187,7 @@
when: install_driver
- name: build nvidia driver
shell: /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run
shell: /tmp/NVIDIA-Linux-x86_64-{{ nvidia_version }}.run -q -a -n -X -s
become: true
when: install_driver
......@@ -164,6 +213,15 @@
# become: true
# become_user: root
# when: template_xorgconf is defined and template_xorgcon
- name: install dependencies for nvidia-xconf-gen
apt:
name:
- python-jinja2
- python3-jinja2
update_cache: yes
state: present
become: true
become_user: root
- name: run nvidia-xconf-gen
script: scripts/nvidia-xconf-gen.py
......
......@@ -12,8 +12,7 @@
- gcc
- lua-devel
become: true
when:
- '"CentOS" in ansible_distribution'
when: ansible_os_family == 'RedHat'
- name: install lua RHEL7
yum: name={{ item }} state=installed update_cache=yes enablerepo="Monash_University_EPEL7_EPEL_7_-_x86_64"
......@@ -30,18 +29,8 @@
- '"RedHat" in ansible_distribution'
become: true
- name: install lua debian
apt: name={{ item }} state=installed
with_items:
- lua5.2
- lua5.2
- lua-filesystem
- lua-bitop
- lua-posix
- liblua5.2-0
- liblua5.2-dev
- tcl
apt: name=lmod state=installed
become: true
when: ansible_os_family == 'Debian'
......@@ -49,13 +38,12 @@
stat: path="{{ soft_dir }}/lmod/{{ lmod_version }}"
register: lmodstat
- name: Download LMOD
get_url:
url=http://consistency0/src/Lmod-{{ lmod_version }}.tar.bz2
dest={{ source_dir }}/Lmod-{{ lmod_version }}.tar.bz2
mode=0444
when: not lmodstat.stat.exists
when: ansible_os_family == 'RedHat' and not lmodstat.stat.exists
- name: Uncompress LMOD
unarchive:
......@@ -63,10 +51,11 @@
dest={{ source_dir }}
copy=no
creates={{ source_dir }}/Lmod-{{ lmod_version }}/README
when: not lmodstat.stat.exists
when: ansible_os_family == 'RedHat' and not lmodstat.stat.exists
- name: Compile and install Lmod
shell: cd {{ source_dir }}/Lmod-{{ lmod_version }}; ./configure --prefix={{ soft_dir }} --with-mpathSearch=YES --with-caseIndependentSorting=YES && make install LUA_INCLUDE={{ lua_include }}
args:
creates: "{{ soft_dir }}/lmod/{{ lmod_version }}"
become: true
when: ansible_os_family == 'RedHat'
\ No newline at end of file
---
- name: Make sure OS is updated since apt install might fail
apt:
update_cache: yes
become: true
when: ansible_os_family == "Debian"
- name: "Installing MySQL Debian"
apt: name="{{ server_packages }}" update_cache=yes state=present
become: true
when: ansible_os_family == "Debian"
- name: Installing MySQL RedHat
yum: name={{ item }}
with_items: "{{ server_packages }}"
become: true
when: ansible_os_family == "RedHat"
- name: make sure mysql conf directory exists
file: dest=/etc/mysql/conf.d state=directory
become: true
register: mysqldb_confdir_create
- name: "Starting MySQL"
service: name={{ sqlServiceName }} state=started enabled=true
become: true
#- name: "Adding root"
# become: true
# mysql_user: name=root host="{{ item }}" password="{{ mysql_root_password }}" login_user=root login_password="{{ mysql_root_password }}" check_implicit_admin=yes
# with_items:
# - "{{ ansible_hostname }}"
# - 127.0.0.1
# - ::1
# - localhost
- name: Check that the slurm_acct_db_directory exists
stat:
path: /var/lib/mysql/slurm_acct_db/ #defined in /vars/filesystems.yaml
register: slurm_acct_db_directory_result
# this will only work if a completely fresh db gets installed because it gets shipped with a blank root pw
- name: update mysql root password for all root accounts
mysql_user: name=root host=localhost password={{ mysql_root_password }} login_user=root
when: not slurm_acct_db_directory_result.stat.exists and mysqldb_confdir_create.changed
- name: "Adding user database"
mysql_db: name={{ mysql_user_db_name }} state=present login_user=root login_password={{ mysql_root_password }}
- name: "Giving priviliges to user"
mysql_user: name={{ mysql_user_name }} host={{ mysql_user_host }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
when: mysql_user_host is defined
- name: "Giving priviliges to user"
mysql_user: name={{ mysql_user_name }} host={{ hostvars[item].ansible_fqdn }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
with_items: "{{ mysql_user_hosts_group }}"
when: mysql_user_hosts_group is defined
---
- name: Make sure OS is updated since apt install might fail
apt:
update_cache: yes
become: true
- name: "Installing MySQL for Ubuntu"
apt: name="{{ server_packages }}" update_cache=yes state=present
become: true
- name: Comment out bind address so it doesn't bind to 127.0.0.1
replace:
path: /etc/mysql/mariadb.conf.d/50-server.cnf
regexp: '(.*bind.*)'
replace: '#\1'
become: true
- name: make sure mysql conf directory exists
file: dest=/etc/mysql/conf.d state=directory
become: true
register: mysqldb_confdir_create
- name: "Starting MySQL"
service: name={{ sqlServiceName }} state=started enabled=true
become: true
- name: Check that the slurm_acct_db_directory exists
stat:
path: /var/lib/mysql/slurm_acct_db/ #defined in /vars/filesystems.yaml
register: slurm_acct_db_directory_result
# this will only work if a completely fresh db gets installed because it gets shipped with a blank root pw
- name: update mysql root password for all root accounts
mysql_user: name=root host=localhost password={{ mysql_root_password }} login_user=root check_implicit_admin=yes
become: true
become_user: root
- name: "Adding user database"
mysql_db: name={{ mysql_user_db_name }} state=present login_user=root login_password={{ mysql_root_password }}
become: true
become_user: root
- name: "Giving priviliges to user"
mysql_user: name={{ mysql_user_name }} host={{ mysql_user_host }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
when: mysql_user_host is defined
become: true
become_user: root
- name: "Giving priviliges to user"
mysql_user: name={{ mysql_user_name }} host={{ hostvars[item].ansible_fqdn }} password={{ mysql_user_password }} login_user=root login_password={{ mysql_root_password }} priv={{ mysql_user_db_name }}.*:ALL,GRANT state=present
with_items: "{{ mysql_user_hosts_group }}"
when: mysql_user_hosts_group is defined
become: true
become_user: root
\ No newline at end of file
---
- include_vars: "{{ ansible_distribution }}_{{ ansible_distribution_major_version }}.yml"
- include: "{{ mysql_type }}.yml"
- include: "{{ ansible_distribution }}_{{ ansible_distribution_major_version }}_{{ mysql_type }}.yml"
- include: mysql_client.yml
\ No newline at end of file
---
- name: Make sure OS is updated since apt install might fail
apt:
update_cache: yes
become: true
when: ansible_os_family == "Debian"
- name: "Installing MySQL Debian"
apt: name="{{ item }}" update_cache=yes cache_valid_time=3600 state=present
with_items: "{{ server_packages }}"
apt: name="{{ server_packages }}" update_cache=yes state=present
become: true
when: ansible_os_family == "Debian"
......@@ -10,7 +15,7 @@
with_items: "{{ server_packages }}"
become: true
when: ansible_os_family == "RedHat"
- name: make sure mysql conf directory exists
file: dest=/etc/mysql/conf.d state=directory
become: true
......
server_packages:
- python
- python-dev
- libmariadb-dev
- python-pip
- libapache2-mod-wsgi
- python-mysql.connector
- mariadb-server
- python-mysqldb
client_packages:
- python
- mariadb-client
sqlServiceName: "mariadb"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment