Skip to content
Snippets Groups Projects
Commit 0f915b30 authored by Andreas Hamacher's avatar Andreas Hamacher
Browse files

Merge remote-tracking branch 'origin' into gpu

Former-commit-id: b37f630d
parents 42935387 88441fba
No related branches found
No related tags found
No related merge requests found
...@@ -24,7 +24,7 @@ trigger_pipeline_in_Clusterbuild: ...@@ -24,7 +24,7 @@ trigger_pipeline_in_Clusterbuild:
- ansible - ansible
script: script:
- echo ${CI_JOB_TOKEN} - echo ${CI_JOB_TOKEN}
- curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=aciab_upstream https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline # ID is from clusterbuild - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/193/trigger/pipeline # ID is from clusterbuild
trigger_pipeline_in_monarch: trigger_pipeline_in_monarch:
...@@ -33,7 +33,7 @@ trigger_pipeline_in_monarch: ...@@ -33,7 +33,7 @@ trigger_pipeline_in_monarch:
- ansible - ansible
script: script:
- echo ${CI_JOB_TOKEN} - echo ${CI_JOB_TOKEN}
- curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=cicd https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline # ID is from monarch - curl --request POST --form token=${CI_JOB_TOKEN} --form "variables[TRIGGER_CI_COMMIT_SHA]=${CI_COMMIT_SHA}" --form ref=master https://gitlab.erc.monash.edu.au/api/v4/projects/385/trigger/pipeline # ID is from monarch
yamllint: yamllint:
...@@ -147,6 +147,7 @@ tests: ...@@ -147,6 +147,7 @@ tests:
- bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem" - bash -e ./tests/run_tests.sh ManagementNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem" - bash -e ./tests/run_tests.sh NFSNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem" - bash -e ./tests/run_tests.sh SQLNodes "files/inventory.$STACKNAME" "../gc_key.pem"
- bash -e ./tests/run_tests.sh slurm "files/inventory.$STACKNAME" "../gc_key.pem"
extended: extended:
stage: extended stage: extended
...@@ -159,7 +160,7 @@ extended: ...@@ -159,7 +160,7 @@ extended:
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script: script:
- source ./$NECTAR_ALLOCATION-openrc.sh - source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $STACKNAME ${CI_PROJECT_NAME}
only: only:
variables: variables:
- $EXTENDED != null - $EXTENDED != null
...@@ -180,7 +181,7 @@ manual_cluster_spawn: ...@@ -180,7 +181,7 @@ manual_cluster_spawn:
- echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh - echo "$HPCCICD_openrc" > ./$NECTAR_ALLOCATION-openrc.sh
script: script:
- source ./$NECTAR_ALLOCATION-openrc.sh - source ./$NECTAR_ALLOCATION-openrc.sh
- bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME - bash -x ./CICD/heat/heatcicdwrapper.sh create $MANUAL_STACKNAME ${CI_PROJECT_NAME}
- openstack stack list - openstack stack list
- export STACKNAME=$MANUAL_STACKNAME - export STACKNAME=$MANUAL_STACKNAME
- sleep 25 - sleep 25
...@@ -189,7 +190,7 @@ manual_cluster_spawn: ...@@ -189,7 +190,7 @@ manual_cluster_spawn:
only: only:
refs: refs:
- "cicd" - "cicd"
clean: clean:
stage: clean stage: clean
tags: tags:
...@@ -228,11 +229,4 @@ clean: ...@@ -228,11 +229,4 @@ clean:
# after_script: # after_script:
# - sleep 20 # artifically wait a bit to make sure it is really dead # - sleep 20 # artifically wait a bit to make sure it is really dead
# - openstack stack list
# - bash -x ./CICD/heat/heatcicdwrapper.sh update $HEAT_TEST_STACKNAME
# - openstack stack list
# - bash -x ./CICD/heat/heatcicdwrapper.sh delete_if_exists $HEAT_TEST_STACKNAME
# - openstack stack list
# after_script:
# - sleep 20 # artifically wait a bit to make sure it is really dead
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
heat_template_version: 2013-05-23 heat_template_version: 2013-05-23
description: "A simple template to boot a cluster of desktops (LoginNode, ManagementNodes and Desktop Nodes)" description: "A simple template to boot a cluster of desktops (LoginNode, ManagementNodes and Desktop Nodes)"
# avz parameters disabled. they are working but I want just more options than monash-02. I would like to have a parameter that says "I don't care" # avz parameters disabled. they are working but I want just more options than monash-02. I would like to have a parameter that says "I don't care"
#This requires gc_secgroups to be called beforehand
parameters: parameters:
ubuntu_1804_image_id: ubuntu_1804_image_id:
...@@ -33,192 +34,33 @@ parameters: ...@@ -33,192 +34,33 @@ parameters:
Flavour: Flavour:
type: string type: string
default: m3.xsmall default: m3.xsmall
SlurmSecGroupID:
type: string
label: Resource ID
default: 6e7a09b0-981c-424f-a9b7-9fd4f4d8f416
NFSSecGroupID:
type: string
label: Resource ID
default: b07a75a3-830c-4778-96c6-8a3732ec7d6c
MySQLSecGroupID:
type: string
label: Resource ID
default: 4478f245-de5c-4177-bcbd-6fa661032cbe
SSHMonashSecGroupID:
type: string
label: Resource ID
default: c15081f4-c756-4c57-b8cf-388dd7fdbcd4
HTTPsSecGroupID:
type: string
label: Resource ID
default: 2d4510c3-ae73-44ea-9700-b6f0a00bf7aa
PublicSSHSecGroupID:
type: string
label: Resource ID
default: 8a029c04-08ce-40f1-a705-d45a2077e27d
resources: resources:
SlurmSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatslurmsecgroup"
rules: [ { protocol: tcp,
port_range_min: 12000,
port_range_max: 12999},
{ protocol: tcp,
port_range_min: 6817,
port_range_max: 6819},
{ protocol: tcp,
port_range_min: 1019,
port_range_max: 1019}]
NFSSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatnfssecgroup"
rules: [ { protocol: tcp,
port_range_min: 2049,
port_range_max: 2049,
remote_mode: "remote_group_id"},
{ protocol: tcp,
port_range_min: 111,
port_range_max: 111,
remote_mode: "remote_group_id"},
{ protocol: udp,
port_range_min: 2049,
port_range_max: 2049,
remote_mode: "remote_group_id"},
{ protocol: udp,
port_range_min: 111,
port_range_max: 111,
remote_mode: "remote_group_id"} ]
MySQLSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatmysqlsecgroup"
rules: [ { protocol: tcp,
port_range_min: 3306,
port_range_max: 3306,
remote_mode: "remote_group_id"} ]
SSHMonashSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "SSHMonashSecGroup"
rules: [ { protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 59.191.208.0/20
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 114.30.64.0/21
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 118.138.0.0/16
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 118.139.0.0/17
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 130.194.0.0/16
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 203.0.141.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 203.6.141.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 203.23.136.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 59.191.192.0/20
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 202.158.212.32/27
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 130.194.13.96/27
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 49.127.0.0/16
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 202.58.246.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 202.94.69.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 103.35.228.0/22
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 43.246.232.0/22
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 103.35.228.0/22
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 59.191.208.0/20
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 49.127.0.0/16
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 59.191.220.0/22
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 43.246.232.0/22
} ]
webaccess:
type: "OS::Neutron::SecurityGroup"
properties:
name: "webaccess"
rules: [ { protocol: tcp,
port_range_min: 80,
port_range_max: 80},
{ protocol: tcp,
port_range_min: 443,
port_range_max: 443} ]
SQLNode0: SQLNode0:
type: "OS::Nova::Server" type: "OS::Nova::Server"
...@@ -229,7 +71,7 @@ resources: ...@@ -229,7 +71,7 @@ resources:
flavor: m3.xsmall flavor: m3.xsmall
image: { get_param: centos_7_image_id } image: { get_param: centos_7_image_id }
key_name: { get_param: ssh_key } key_name: { get_param: ssh_key }
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: MySQLSecGroup }, { get_resource: NFSSecGroup } ] security_groups: [ { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: MySQLSecGroupID }, { get_param: NFSSecGroupID } ]
metadata: metadata:
ansible_host_groups: [ SQLNodes, NFSNodes ] ansible_host_groups: [ SQLNodes, NFSNodes ]
ansible_ssh_user: ec2-user ansible_ssh_user: ec2-user
...@@ -273,7 +115,7 @@ resources: ...@@ -273,7 +115,7 @@ resources:
volume_id: { get_resource: DBVolume } volume_id: { get_resource: DBVolume }
instance_uuid: { get_resource: SQLNode0 } instance_uuid: { get_resource: SQLNode0 }
MgmtNodesC: MgmtNodesCentos7:
type: "OS::Heat::ResourceGroup" type: "OS::Heat::ResourceGroup"
properties: properties:
count: 1 count: 1
...@@ -286,7 +128,7 @@ resources: ...@@ -286,7 +128,7 @@ resources:
mynodename: mynodename:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmt%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmt%index%' ]]
ssh_key: { get_param: ssh_key } ssh_key: { get_param: ssh_key }
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup }, { get_resource: MySQLSecGroup } ] security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
project_name: { get_param: project_name } project_name: { get_param: project_name }
MgmtNodesU: MgmtNodesU:
...@@ -302,7 +144,7 @@ resources: ...@@ -302,7 +144,7 @@ resources:
mynodename: mynodename:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmtU%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'mgmtU%index%' ]]
ssh_key: { get_param: ssh_key } ssh_key: { get_param: ssh_key }
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup }, { get_resource: MySQLSecGroup } ] security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: MySQLSecGroupID } ]
project_name: { get_param: project_name } project_name: { get_param: project_name }
LoginNodesC: LoginNodesC:
...@@ -318,7 +160,7 @@ resources: ...@@ -318,7 +160,7 @@ resources:
key_name: { get_param: ssh_key } key_name: { get_param: ssh_key }
name: name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'login%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'login%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ] security_groups: [ default, { get_param: PublicSSHSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata: metadata:
ansible_host_groups: [ LoginNodes ] ansible_host_groups: [ LoginNodes ]
ansible_ssh_user: ec2-user ansible_ssh_user: ec2-user
...@@ -329,7 +171,7 @@ resources: ...@@ -329,7 +171,7 @@ resources:
LoginNodesU: LoginNodesU:
type: "OS::Heat::ResourceGroup" type: "OS::Heat::ResourceGroup"
properties: properties:
count: 0 count: 1
resource_def: resource_def:
type: "OS::Nova::Server" type: "OS::Nova::Server"
properties: properties:
...@@ -339,7 +181,7 @@ resources: ...@@ -339,7 +181,7 @@ resources:
key_name: { get_param: ssh_key } key_name: { get_param: ssh_key }
name: name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'loginU%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'loginU%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ] security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata: metadata:
ansible_host_groups: [ LoginNodes ] ansible_host_groups: [ LoginNodes ]
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
...@@ -360,7 +202,7 @@ resources: ...@@ -360,7 +202,7 @@ resources:
key_name: { get_param: ssh_key } key_name: { get_param: ssh_key }
name: name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopc%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'desktopc%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ] security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata: metadata:
ansible_host_groups: [ DesktopNodes, VisNodes, ComputeNodes ] ansible_host_groups: [ DesktopNodes, VisNodes, ComputeNodes ]
ansible_ssh_user: ec2-user ansible_ssh_user: ec2-user
...@@ -380,8 +222,8 @@ resources: ...@@ -380,8 +222,8 @@ resources:
image: { get_param: ubuntu_1804_image_id } image: { get_param: ubuntu_1804_image_id }
key_name: { get_param: ssh_key } key_name: { get_param: ssh_key }
name: name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec7%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computeU%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ] security_groups: [ default, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID }, { get_param: SSHMonashSecGroupID } ]
metadata: metadata:
ansible_host_groups: [ ComputeNodes ] ansible_host_groups: [ ComputeNodes ]
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
...@@ -389,7 +231,7 @@ resources: ...@@ -389,7 +231,7 @@ resources:
networks: networks:
- network: { get_param: NetID } - network: { get_param: NetID }
ComputeNodesC: ComputeNodesCentos7:
type: "OS::Heat::ResourceGroup" type: "OS::Heat::ResourceGroup"
properties: properties:
count: 1 count: 1
...@@ -402,7 +244,7 @@ resources: ...@@ -402,7 +244,7 @@ resources:
key_name: { get_param: ssh_key } key_name: { get_param: ssh_key }
name: name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec7%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computec7%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ] security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata: metadata:
ansible_host_groups: [ ComputeNodes ] ansible_host_groups: [ ComputeNodes ]
ansible_ssh_user: ec2-user ansible_ssh_user: ec2-user
...@@ -413,7 +255,7 @@ resources: ...@@ -413,7 +255,7 @@ resources:
UbuntuDesktopNodes: UbuntuDesktopNodes:
type: "OS::Heat::ResourceGroup" type: "OS::Heat::ResourceGroup"
properties: properties:
count: 0 count: 1
resource_def: resource_def:
type: "OS::Nova::Server" type: "OS::Nova::Server"
properties: properties:
...@@ -423,7 +265,7 @@ resources: ...@@ -423,7 +265,7 @@ resources:
key_name: { get_param: ssh_key } key_name: { get_param: ssh_key }
name: name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'gpudesktopu%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'gpudesktopu%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ] security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata: metadata:
ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, K1, VisNodes ] ansible_host_groups: [ DesktopNodes, GPU, ComputeNodes, K1, VisNodes ]
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
...@@ -465,10 +307,11 @@ resources: ...@@ -465,10 +307,11 @@ resources:
key_name: { get_param: ssh_key } key_name: { get_param: ssh_key }
name: name:
list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computerhel%index%' ]] list_join: [ '-', [ { get_param: "OS::stack_name" }, 'computerhel%index%' ]]
security_groups: [ default, { get_resource: SSHMonashSecGroup }, { get_resource: SlurmSecGroup }, { get_resource: NFSSecGroup } ] security_groups: [ default, { get_param: SSHMonashSecGroupID }, { get_param: SlurmSecGroupID }, { get_param: NFSSecGroupID } ]
metadata: metadata:
ansible_host_groups: [ DGXRHELNodes ] ansible_host_groups: [ DGXRHELNodes ]
ansible_ssh_user: cloud-user ansible_ssh_user: cloud-user
project_name: { get_param: project_name } project_name: { get_param: project_name }
networks: networks:
- network: { get_param: NetID } - network: { get_param: NetID }
---
heat_template_version: 2013-05-23
description: # call with openstack stack [update || create ] --wait --template gc_secgroups.hot SecurityGroupStack
resources:
SlurmSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatslurmsecgroup"
rules: [ { protocol: tcp,
port_range_min: 12000,
port_range_max: 12999},
{ protocol: tcp,
port_range_min: 6817,
port_range_max: 6819},
{ protocol: tcp,
port_range_min: 1019,
port_range_max: 1019}]
NFSSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatnfssecgroup"
rules: [ { protocol: tcp,
port_range_min: 2049,
port_range_max: 2049,
remote_mode: "remote_group_id"},
{ protocol: tcp,
port_range_min: 111,
port_range_max: 111,
remote_mode: "remote_group_id"},
{ protocol: udp,
port_range_min: 2049,
port_range_max: 2049,
remote_mode: "remote_group_id"},
{ protocol: udp,
port_range_min: 111,
port_range_max: 111,
remote_mode: "remote_group_id"} ]
MySQLSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "heatmysqlsecgroup"
rules: [ { protocol: tcp,
port_range_min: 3306,
port_range_max: 3306,
remote_mode: "remote_group_id"} ]
PublicSSHSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "SSHSecGroup"
rules: [ { protocol: tcp,
port_range_min: 22,
port_range_max: 22} ]
SSHMonashSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "SSHMonashSecGroup"
rules: [ { protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 59.191.208.0/20
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 114.30.64.0/21
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 118.138.0.0/16
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 118.139.0.0/17
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 130.194.0.0/16
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 203.0.141.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 203.6.141.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 203.23.136.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 59.191.192.0/20
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 202.158.212.32/27
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 130.194.13.96/27
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 49.127.0.0/16
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 202.58.246.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 202.94.69.0/24
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 103.35.228.0/22
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 43.246.232.0/22
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 103.35.228.0/22
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 59.191.208.0/20
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 49.127.0.0/16
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 59.191.220.0/22
},
{ protocol: tcp,
port_range_min: 22,
port_range_max: 22,
direction: ingress,
remote_ip_prefix: 43.246.232.0/22
} ]
HTTPsSecGroup:
type: "OS::Neutron::SecurityGroup"
properties:
name: "HTTPsSecGroup"
rules: [ { protocol: tcp,
port_range_min: 80,
port_range_max: 80},
{ protocol: tcp,
port_range_min: 443,
port_range_max: 443} ]
...@@ -8,14 +8,8 @@ function usage { ...@@ -8,14 +8,8 @@ function usage {
exit 1 exit 1
} }
if [ "$#" -ne 2 ]; then
echo "Illegal number of parameters expecting 2"
usage
fi
STACKNAME=$2 STACKNAME=$2
if [[ "$STACKNAME" == "CICD"* ]]; then if [[ "$STACKNAME" == "CICD"* ]]; then
echo "CICD found in stackname. doing nothing" echo "CICD found in stackname. doing nothing"
else else
......
resource_registry: resource_registry:
My::Server::MgmtNode: mgmtnode_HOT.yaml My::Server::MgmtNode: ./mgmtnode_HOT.yaml
#!/bin/bash #!/bin/bash
function usage { function usage {
echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql}" INVENTORY_FILE KEY echo $"Usage: $0 {all, ComputeNodes, LoginNodes, ManagementNodes, NFSNodes, sql, slurm}" INVENTORY_FILE KEY
exit 1 exit 1
} }
...@@ -23,22 +23,4 @@ function run_them () ...@@ -23,22 +23,4 @@ function run_them ()
done done
} }
# I think I am just checking the if $1 is one of the listes strings (see usage) not proud of this at all but works
case "$1" in
all)
;;
ComputeNodes)
;;
ManagementNodes)
;;
NFSNodes)
;;
SQLNodes)
;;
LoginNodes)
;;
*)
usage
esac
run_them $1 $2 $3 run_them $1 $2 $3
\ No newline at end of file
---
- hosts: ManagementNodes,LoginNodes,ComputeNodes
gather_facts: false
tasks:
- name: add user hpctest
user:
name: hpctest
shell: /bin/bash
become: true
- hosts: ManagementNodes
gather_facts: false
tasks:
- name: Create a parent account
command: ./sacctmgr -i add account parentAccount cluster=m3 Description="Test parent account" Organization="Monash"
args:
chdir: '/opt/slurm-latest/bin'
become: true
register: result
failed_when: result.rc != 0 and result.stdout != " Nothing new added."
- name: Create a project associated with a given parent
command: ./sacctmgr -i add account testProject parent=parentAccount cluster=m3 Organization="Monash"
args:
chdir: '/opt/slurm-latest/bin'
become: true
register: result
failed_when: result.rc != 0 and result.stdout != " Nothing new added."
- name: Create a user and associate them with a project
command: ./sacctmgr -i create user hpctest cluster=m3 account=testProject partition=batch
args:
chdir: '/opt/slurm-latest/bin'
become: true
register: result
failed_when: result.rc != 0 and result.stdout != " Nothing new added."
#sudo `which sacctmgr` modify user where name=hpctest set maxjobs=200
## 18 sudo `which sacctmgr` update account hpctest set qos=normal
# 22 sudo `which sacctmgr` update account testProject set qos=normal
- hosts: LoginNodes
gather_facts: false
tasks:
- name: make sure munge is running
service:
name: munge
state: started
become: true
- name: simple srun test
command: ./srun --ntasks=1 --partition=batch hostname
args:
chdir: '/opt/slurm-latest/bin'
become: true
become_user: hpctest
...@@ -3,7 +3,7 @@ desktopNodeList: ...@@ -3,7 +3,7 @@ desktopNodeList:
- { name : 'DesktopNodes', interface : 'eth0' } - { name : 'DesktopNodes', interface : 'eth0' }
clustername: "m3" clustername: "m3"
projectname: "m3" projectname: "m3"
slurm_version: 19.05.3-2 slurm_version: 19.05.4
munge_version: 0.5.13 munge_version: 0.5.13
nhc_version: 1.4.2 nhc_version: 1.4.2
munge_dir: /opt/munge-{{ munge_version }} munge_dir: /opt/munge-{{ munge_version }}
......
...@@ -9,4 +9,6 @@ NHC_EMAIL_TO="{{ nhc_emails }}" ...@@ -9,4 +9,6 @@ NHC_EMAIL_TO="{{ nhc_emails }}"
NHC_EMAIL_SUBJECT="{{ nhc_email_subject }}" NHC_EMAIL_SUBJECT="{{ nhc_email_subject }}"
NHC_LOOP_TIME="300" NHC_LOOP_TIME="300"
DF_FLAGS="-Tk" DF_FLAGS="-Tk"
# Timeout value added temporarily by Trung - 20200409
TIMEOUT=150
...@@ -4,15 +4,29 @@ ...@@ -4,15 +4,29 @@
become: true become: true
become_user: root become_user: root
- name: restart ntpd - name: restart ntpd redhat
service: name=ntpd state=restarted service: name=ntpd state=restarted
become: true become: true
become_user: root become_user: root
when: ansible_os_family == "RedHat"
- name: ensure ntpd is enabled and started - name: ensure ntpd is enabled and started redhat
service: name=ntpd state=started enabled=yes service: name=ntpd state=started enabled=yes
become: true become: true
become_user: root become_user: root
when: ansible_os_family == "RedHat"
- name: restart ntpd ubuntu
service: name=ntp state=restarted
become: true
become_user: root
when: ansible_os_family == "Debian"
- name: ensure ntpd is enabled and started ubuntu
service: name=ntp state=started enabled=yes
become: true
become_user: root
when: ansible_os_family == "Debian"
- name: set local timezone - name: set local timezone
file: path=/etc/localtime state=link src={{ TIMEZONE_PATH }} file: path=/etc/localtime state=link src={{ TIMEZONE_PATH }}
......
...@@ -3,25 +3,22 @@ ...@@ -3,25 +3,22 @@
with_items: with_items:
- libcgroup - libcgroup
become: True become: True
become_method: sudo
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
- name: apt install cgroup - name: apt install cgroup
apt: name={{ item }} state=installed update_cache=yes package:
with_items: state: installed
name:
- libcgroup1 - libcgroup1
- cgroupfs-mount - cgroupfs-mount
- cgroup-tools - cgroup-tools
when: ansible_os_family == "Debian" when: ansible_os_family == "Debian"
become: True become: True
become_method: sudo
- name: config cgroup.conf file - name: config cgroup.conf file
template: dest={{ slurm_dir }}/etc/cgroup.conf src=cgroup.conf.j2 mode=644 template: dest={{ slurm_dir }}/etc/cgroup.conf src=cgroup.conf.j2 mode=644
become: True become: True
become_method: sudo
- name: config cgroup_allowed_devices.conf file - name: config cgroup_allowed_devices.conf file
template: dest={{ slurm_dir }}/etc/cgroup_allowed_devices.conf src=cgroup_allowed_devices.conf.j2 mode=644 template: dest={{ slurm_dir }}/etc/cgroup_allowed_devices.conf src=cgroup_allowed_devices.conf.j2 mode=644
become: True become: True
become_method: sudo
...@@ -8,22 +8,22 @@ ...@@ -8,22 +8,22 @@
src: "http://consistency0/src/munge-{{ munge_version }}.tar.bz2" src: "http://consistency0/src/munge-{{ munge_version }}.tar.bz2"
copy: no copy: no
dest: /tmp dest: /tmp
creates: /tmp/munge-munge-{{ munge_version }}/configure creates: /tmp/munge-{{ munge_version }}/configure
when: not munge_binary.stat.exists when: not munge_binary.stat.exists
- name: build munge - name: build munge
shell: ./configure --prefix={{ munge_dir }} && make shell: ./configure --prefix={{ munge_dir }} && make
args: args:
chdir: /tmp/munge-munge-{{ munge_version }} chdir: /tmp/munge-{{ munge_version }}
creates: /tmp/munge-munge-{{ munge_version }}/src/munge/munge creates: /tmp/munge-{{ munge_version }}/src/munge/munge
when: not munge_binary.stat.exists when: not munge_binary.stat.exists
- name: install munge - name: install munge
shell: make install shell: make install
become: true become: true
args: args:
chdir: /tmp/munge-munge-{{ munge_version }} chdir: /tmp/munge-{{ munge_version }}
creates: "{{ munge_dir }}/bin/munge" creates: "{{ munge_dir }}/bin/munge"
when: not munge_binary.stat.exists when: not munge_binary.stat.exists
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment