Skip to content
Snippets Groups Projects
Commit 89ae77fd authored by Andreas Hamacher's avatar Andreas Hamacher Committed by Trung Nguyen
Browse files

Maintenanceplaybook

parent f87011cd
No related branches found
No related tags found
No related merge requests found
......@@ -73,6 +73,8 @@ ansiblelint:
- echo "stage ansiblelint"
- cd CICD
- python3 ansiblelint/run_lint.py --targets master_playbook.yml
- python3 ansiblelint/run_lint.py --targets ../qa.yml
- python3 ansiblelint/run_lint.py --targets ../maintenance.yml
testlustre:
......
MPI tests.
run a test on a single (updated or rebuilt) node, and two nodes.
run_two_node.sh --newnode=<node> --reservation=<res> [ --partition=<partition> --testnode=<testnode> ]
e.g. run_two_node.sh --newNode=gf00 --reservation=monMaintenance
e.g. run_two_node.sh --newNode=gf00 --reservation=monMaintenance --partition=gpu --testnode=ge00
Where
<node> is the name of the updated host we wish to test
<res> is the name of the SLURM reservation
<partition> is the name of the SLURM partition (defaults to comp)
<testnode> is a second (unupdated) node to do 2 server MPI (defaults to gf01)
This code assumes:
- slurm controller is up (So have a MPI environment to use)
- it uses srun to submit the job
- it assumes there is a SLURM reservation present which you must specify
- uses openmpi openmpi/1.10.7-mlx
- it uses a timer to kill the shell in case srun hangs. This timer still seems to work even when
we exit the script normally.i.e. it activates when script has left.
#include <iostream>
#include <exception>
#include <string>
#include <cstring>
using namespace std;
#include <stdlib.h>
#include <mpi.h>
#include <unistd.h>
//
//
// rotate exercies from nci
// passes a number around the ranks of a MPI program
// used to test connectivity. We use non block primitives just because we can
//
void do_work_non_blocking();
int mpirank=0;
const int MES_TAG=1;
#include<iostream>
#include<fstream>
#include<string>
#include<cstdlib>
#include<sstream>
std::string ssystem (const char *command) {
char tmpname [L_tmpnam];
std::tmpnam ( tmpname );
std::string scommand = command;
std::string cmd = scommand + " >> " + tmpname;
std::system(cmd.c_str());
std::ifstream file(tmpname, std::ios::in );
std::string result;
if (file) {
while (!file.eof()) result.push_back(file.get());
file.close();
}
remove(tmpname);
return result;
}
int main( int argc, char* argv[])
{
try
{
cout <<"rotate program called on host ";
size_t BUFFLEN= 256;
char hostBuffer[BUFFLEN];
hostBuffer[0]='\0';
gethostname(hostBuffer, BUFFLEN);
cout<<hostBuffer<<endl;
int mpierror, mpisize;
cout<<"Before MPI_Init"<<endl;
mpierror = MPI_Init(&argc,&argv);
if (mpierror != MPI_SUCCESS)
{
cerr <<"Error in mpi init "<<mpierror<<endl;
exit(mpierror);
}
cout<<"Before MPI_Comm_size"<<endl;
mpierror = MPI_Comm_size(MPI_COMM_WORLD,&mpisize);
if (mpierror != MPI_SUCCESS)
{
cerr <<"Error in mpi comm size "<<mpierror<<endl;
exit(mpierror);
}
cout<< "Mpi size is "<<mpisize<<endl;
mpierror = MPI_Comm_rank(MPI_COMM_WORLD,&mpirank);
if (mpierror != MPI_SUCCESS)
{
cerr <<"Error in mpi rank size "<<mpierror<<endl;
exit(mpierror);
}
//std::string CPU_Affinity = ssystem("cat /proc/self/status | grep -i cpus_allowed_list");
//std::string CPU_Affinity = ssystem("grep -i cpus_allowed_list /proc/self/status");
//cout<<"Hostname="<<hostBuffer<< ": Mpi rank is "<<mpirank<<" and mpusize is "<<mpisize<< " and CPU Affinity is "<<CPU_Affinity<<endl;
cout<<"Hostname="<<hostBuffer<< ": Mpi rank is "<<mpirank<<" and mpusize is "<<mpisize<< endl;
//
//a do work here
//
do_work_non_blocking();
cout <<"Before MPI_Finalize from rank "<<mpirank<<endl;
MPI_Finalize();
cout <<"Exit from rank "<<mpirank<<endl;
}
catch (exception& e)
{
cerr<<"Exception caught " << e.what()<< "from mpirank "<<mpirank << endl;
}
}//main
//------------------------------------------
void sendString(char* s, int destRank)
{
if (s==NULL)
{
cerr<<"MPI_Send. error null pointer sent!\n";
return;
}
cout<<"MPI_Send::["<<mpirank<<"=>"<<destRank<< "] string is \""<<s<<"\""<<endl;
MPI_Status status;
int error=MPI_Ssend(s,strlen(s)+1,MPI_CHAR,destRank,MES_TAG,MPI_COMM_WORLD);
if (error != MPI_SUCCESS)
{
cout<<"error MPI_Ssend: from "<<mpirank<<" to "<<destRank<<endl;
return;
}
cout<<"........MPI_Send::["<<mpirank<<"=>"<<destRank<< "] Successful send\n";
}//sendString
//------------------------------------------
void recvString(char* buffer, int MAX_BUFFER, int destRank)
{
cout<<"........MPI_Recv: rank "<<mpirank<< "<="<<destRank<<endl;
MPI_Status status;
int error=MPI_Recv(buffer,MAX_BUFFER,MPI_CHAR,destRank, MPI_ANY_TAG,MPI_COMM_WORLD,&status);
if (error != MPI_SUCCESS)
{
cout<<"error MPI_Recv: from "<<mpirank<<endl;
return;
}
int received;
MPI_Get_count(&status,MPI_CHAR,&received);
if (strlen(buffer)==0)
{
cerr<<"Error from rank "<<mpirank<<" no string found! "<<endl;
return;
}
if (strlen(buffer)<MAX_BUFFER)
{
cout <<"MPI_Recv["<<mpirank<< "<=" <<status.MPI_SOURCE << "]: Successful receive of \""<<buffer<<"\" bytes\n";
}
}//recvString
void do_work_non_blocking()
{
cout<< "do_work_non_blocking: "<<mpirank<<endl;
int error;
double start = MPI_Wtime();
bool finish=false;
int MPI_SIZE;
MPI_Comm_size(MPI_COMM_WORLD,&MPI_SIZE);
char my_cpu_name[BUFSIZ];
int my_name_length;
MPI_Request requestSend;
MPI_Request requestReceive;
MPI_Status status;
int sendBuffer;
int receiveBuffer;
int nextRank;
if (mpirank==MPI_SIZE-1)
{
nextRank=0;
}
else
{
nextRank=mpirank+1;
}
MPI_Get_processor_name(my_cpu_name, &my_name_length);
cout<<"Rank "<<mpirank<<" Processor name is "<<my_cpu_name<<endl;
bool finished=false;
int currentRank=mpirank;
while (!finished)
{
//send rank
sendBuffer=currentRank;
cout<<"Send from rank "<<mpirank<<"=>"<<nextRank<<" sendBuffer "<<sendBuffer<<endl;
error =MPI_Isend(&sendBuffer,1,MPI_INT,nextRank, MES_TAG,MPI_COMM_WORLD,&requestSend);
if (error != MPI_SUCCESS)
{
cout<<"error MPI_Isend(int): from "<<mpirank<<endl;
return;
}
//receive rank
error=MPI_Irecv(&receiveBuffer,1,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&requestReceive);
if (error != MPI_SUCCESS)
{
cout<<"error MPI_Irecv(int): from "<<mpirank<<endl;
return;
}
//wait
MPI_Status status;
cout<<"Before MPI_Wait for rank "<<mpirank<<endl;
MPI_Wait(&requestSend,&status);
MPI_Wait(&requestReceive,&status);
cout<<"2*waits finished for rank "<<mpirank<<" and received value is "<<receiveBuffer<<endl;
currentRank=receiveBuffer;
if (currentRank==mpirank)
{
finished=true;
cout<<"SUCCESS from "<<mpirank<<endl;
}
}//while
double end = MPI_Wtime();
cout<<"Time of work["<< mpirank << "] "<< (end-start) << " seconds "<<endl;
}
//------------------------------------------
//------------------------------------------
#!/bin/bash
#
# rub single_node MPI test
# Author Simon Michnowicz
# 20 Aug 20
#
# usage: run_single_node.sh <NameofNodeRebuilt> <ReservationName>
#check commadn line includes host
hostname=""
reservationName=""
partitionName=comp
################################################
function printUsage
{
echo "$0 To test a new or updated node, it runs a simple MPI job on it "
echo "Usage: run_single_node.sh --newnode=<node> --reservation=<res> [ --partition=<partition> ] "
echo "e.g. run_single_node.sh --newnode=gf00 --reservation=monMaintenance"
echo "e.g. run_single_node.sh --newnode=gf00 --reservation=monMaintenance --partition=gpu "
echo "Where"
echo "<node> is the name of the updated host we wish to test"
echo "<res> is the name of the SLURM reservation"
echo "<partition> is the name of the SLURM partition (defaults to comp)"
echo ""
#echo "We also support short options, i.e. -n -r -P "
exit 0
}
################################################
function parseARG
{
OPTS="--long help:: --long newnode: --long reservation: --long partition:: -o nr:,hP::"
#echo "getopt $OPTS -n $0 -- $@"
TEMP=`getopt $OPTS -n $0 -- "$@"`
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
eval set -- "$TEMP"
#echo "Output is $TEMP"
while true; do
#echo "Parameter is $1"
case "$1" in
-n|--newnode) # Specify p value.
newnode=$2
shift 2
#echo "Newnode is $newnode"
;;
-r|--reservation) # Specify strength, either 45 or 90.
reservation=$2
shift 2
#echo "Reservation is $reservation"
;;
-P|--partition) # Specify p value.
partition=$2
shift 2
#echo "partition is ${partition}"
;;
#
# -- means the end of the arguments; drop this, and break out of the while loop
#
--) shift; break ;;
h | *) # Display help.
echo "In h option"
printUsage
exit 0
;;
esac
done
}
################################################
#main
################################################
#force a timeout if srun hangs
TIMEOUT=2m
echo "PID is $$"
(sleep $TIMEOUT && kill $$ ) &
#parse arguments
parseARG $0 "$@"
echo "newnode=$newnode"
echo "reservation=$reservation"
echo "partition=$partition"
if [ -z $newnode ]
then
echo "You need to specify a nodename "
printUsage
exit 1
fi
if [ -z $reservation ]
then
echo "You need to specify a reservation "
printUsage
exit 1
fi
#######################
# compile and run mpi job
#######################
module load openmpi/1.10.7-mlx
mpic++ -o rotateMPI.exe rotate.cpp
if [ ! -e rotateMPI.exe ]
then
echo "Error trying to compile rotateMPI.exe"
echo "Exiting"
exit 1
fi
SLURM_PARAMETERS="--nodes=1 --tasks-per-node=2 --cpus-per-task=1 --partition=${partition}"
SLURM_PARAMETERS="${SLURM_PARAMETERS} --reservation=${reservation} --nodelist=${hostname}"
echo "srun ${SLURM_PARAMETERS} rotateMPI.exe"
srun ${SLURM_PARAMETERS} rotateMPI.exe
retValue=$?
echo "retValue is $retValue"
if [ $retValue -ne "0" ]
then
echo "FAILURE TO RUN rotateMPI.exe as a slurm job"
exit $retValue
fi
echo "##############################"
echo "srun test OK"
echo "##############################"
exit 0
#!/bin/bash
#
# run_two_node MPI test
# Author Simon Michnowicz
# 20 Aug 20
#
# usage: run_two_node.sh <NameofNodeRebuilt> <ReservationName>
#check commadn line includes host
function printUsage
{
echo "$0 To test a new or updated node, it runs a simple MPI job between it and a known test node"
echo "Usage: run_two_node.sh --newnode=<node> --reservation=<res> [ --partition=<partition> --testnode=<testnode> ] "
echo "e.g. run_two_node.sh --newNode=gf00 --reservation=monMaintenance"
echo "e.g. run_two_node.sh --newNode=gf00 --reservation=monMaintenance --partition=gpu --testnode=ge00"
echo "Where"
echo "<node> is the name of the updated host we wish to test"
echo "<res> is the name of the SLURM reservation"
echo "<partition> is the name of the SLURM partition (defaults to comp)"
echo "<testnode> is a second (unupdated) node to do 2 server MPI (defaults to gf01)"
echo ""
#echo "We also support short options, i.e. -n -r -P -t"
exit 0
}
newnode=""
reservation=""
partition=comp
testnode=gf01
function parseARG
{
OPTS="--long help:: --long newnode: --long reservation: --long partition:: --long testnode:: -o nr:,hPt::"
#echo "getopt $OPTS -n $0 -- $@"
TEMP=`getopt $OPTS -n $0 -- "$@"`
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
eval set -- "$TEMP"
#echo "Output is $TEMP"
while true; do
#echo "Parameter is $1"
case "$1" in
#-n|*newnode*) # Specify p value.
-n|--newnode) # Specify p value.
newnode=$2
shift 2
#echo "Newnode is $newnode"
;;
-r|--reservation) # Specify strength, either 45 or 90.
reservation=$2
shift 2
#echo "Reservation is $reservation"
;;
-P|--partition) # Specify p value.
partition=$2
shift 2
#echo "partition is ${partition}"
;;
-t|--testnode) # Specify testnode
testnode=$2
shift 2
#echo "testnode is ${testnode}"
;;
#
# -- means the end of the arguments; drop this, and break out of the while loop
#
--) shift; break ;;
h | *) # Display help.
#echo "In h option"
printUsage
exit 0
;;
esac
done
}
#######################
# main routine here
#######################
#force a timeout if srun hangs
TIMEOUT=2m
echo "PID is $$"
(sleep $TIMEOUT && kill $$ ) &
#parse arguments
parseARG $0 "$@"
echo "newnode=$newnode"
echo "reservation=$reservation"
echo "partition=$partition"
echo "testnode=$testnode"
if [ -z $newnode ]
then
echo "You need to specify a nodename "
printUsage
exit 1
fi
if [ -z $reservation ]
then
echo "You need to specify a reservation "
printUsage
exit 1
fi
#######################
# compile and run mpi job
#######################
module load openmpi/1.10.7-mlx
mpic++ -o rotateMPI.exe rotate.cpp
if [ ! -e rotateMPI.exe ]
then
echo "Error trying to compile rotateMPI.exe"
echo "Exiting"
exit 1
fi
SLURM_PARAMETERS="--nodes=2 --tasks-per-node=2 --cpus-per-task=1 --partition=${partition} "
SLURM_PARAMETERS="${SLURM_PARAMETERS} --reservation=${reservation} --nodelist=${newnode},${testnode}"
SLURM_PARAMETERS="${SLURM_PARAMETERS} --time=00:01:00 --job-name=MPItest"
echo "srun ${SLURM_PARAMETERS} rotateMPI.exe"
srun ${SLURM_PARAMETERS} rotateMPI.exe
retValue=$?
echo "retValue is $retValue"
if [ $retValue -ne "0" ]
then
echo "FAILURE TO RUN rotateMPI.exe as a slurm job"
exit $retValue
fi
echo
echo "#####################"
echo "srun test OK"
echo "#####################"
exit 0
- hosts: 'ComputeNodes,DGXRHELNodes'
gather_facts: smart # not sure if false is clever here
tasks:
- include_vars: vars/ldapConfig.yml
- include_vars: vars/filesystems.yml
- include_vars: vars/slurm.yml
- include_vars: vars/vars.yml
- { name: set use shared state, set_fact: usesharedstatedir=False }
tags: [ always ]
# these are just templates. Not the tag never! Everything with never is only executed if called explicitly aka ansible-playbook --tags=foo,bar OR -tags=tag_group
- hosts: 'ComputeNodes,DGXRHELNodes'
gather_facts: false
tasks:
- { name: template_shell, shell: ls, tags: [never,tag_group,uniquetag_foo] }
- { name: template_command, command: uname chdir=/bin, tags: [never,tag_group,uniquetag_bar] }
- hosts: 'ComputeNodes,LoginNodes,DGXRHELNodes'
gather_facts: false
tasks:
- { name: kill user bash shells, shell: 'ps aux | grep -i bash | grep -v "ec2-user" | grep -v "root" | sed "s/\ \ */\ /g" | cut -f 2 -d " " | xargs -I{} kill -09 {}', become: true, become_user: root, tags: [never,kickshells]}
- hosts: 'ManagementNodes'
gather_facts: false
tasks:
- name: prep a mgmt node for shutdown DO NOT FORGET TO LIMIT gluster needs 2 out of 3 to run
block:
# the failover actually works. but it only takes down the primary. so if this would be called from the backup all of slurm would go down
#- { name: force a failover shell: /opt/slurm-19.05.4/bin/scontrol takeover }
- { name: stop slurmdbd service, service: name=slurmdbd state=stopped }
- { name: stop slurmctld service, service: name=slurmctld state=stopped }
- { name: stop glusterd service, service: name=glusterd state=stopped }
- { name: stop glusterfsd service, service: name=glusterfsd state=stopped }
become: true
tags: [never,prepmgmtshutdown]
- name: verify a mgmt node came up well
block:
# TODO verify vdb is mounted
- { name: start glusterd service, service: name=glusterd state=started }
- { name: start glusterfsd service, service: name=glusterfsd state=started }
- { name: start slurmctld service, service: name=slurmctld state=started }
- { name: start slurmdbd service, service: name=slurmdbd state=started }
become: true
tags: [never,verifymgmtNode16Aug]
- hosts: 'SQLNodes'
gather_facts: false
tasks:
- name: prep a sqlnode node for shutdown
block:
- { name: stop mariadb service, service: name=mariadb state=stopped }
- { name: stop glusterd service, service: name=glusterd state=stopped }
- { name: stop glusterfsd service, service: name=glusterfsd state=stopped }
become: true
tags: [never,prepsqlshutdown]
- name: verify an sql node after a restart
block:
- { name: ensure mariadb service runs, service: name=mariadb state=started }
- { name: ensure glusterd service runs, service: name=glusterd state=started }
- { name: ensure glusterfsd service runs, service: name=glusterfsd state=started }
become: true
tags: [never,sqlverify]
- hosts: 'LoginNodes'
gather_facts: false
tasks:
- name: verify Loginnodes for 16Aug maintenance
block:
- { name: make sure lustre service is stopped, service: name=lustre-client enabled=False state=stopped }
- { name: make sure nologin is still present, file: path=/etc/nologin state=file }
become: true
tags: [never,verifyLoginNode16Aug]
- hosts: 'LoginNodes,ComputeNodes,DGXRHELNodes'
gather_facts: false
tasks:
- name: stop lustre and disable service
block:
- { name: stop and disable lustre service some nodes will be rebooted and should not come up with a runnign service, service: name=lustre-client enabled=False state=stopped }
become: true
tags: [never,stopdisablelustre16Aug]
- name: start lustre and enable service
block:
- { name: start and enable lustre service, service: name=lustre-client enabled=True state=started }
become: true
tags: [never,startenablelustre16Aug]
- hosts: 'ComputeNodes,LoginNodes,DGXRHELNodes'
gather_facts: false
tasks:
- { name: kill squashfs shell: "pkill -f squashfuse", become: true, become_user: root, tags: [never,umount_home] }
- { name: umount /home, mount: path=/home state=unmounted, become: true, become_user: root, tags: [never,umount_home] }
#!/bin/sh
#
#mount | grep gvfs | while read -r line ;
#do
# read -ra line_array <<< $line
# echo "umount ${line_array[2]}"
#done
#un-stuck yum
#mv /var/lib/rpm/__db* /tmp/
#mv /var/lib/rpm/.rpm.lock /tmp/
#mv /var/lib/rpm/.dbenv.lock /tmp
#yum clean all
qa.yml 0 → 100644
#- hosts: 'all'
#gather_facts: false # not sure if false is clever here
#tasks:
#- include_vars: vars/ldapConfig.yml
#- include_vars: vars/filesystems.yml
#- include_vars: vars/slurm.yml
#- include_vars: vars/vars.yml
#- { name: set use shared state, set_fact: usesharedstatedir=False }
#tags: [ always ]
# this playbook is roughly sorted by
# - hostgroupstopics like ComputeNodes or ComputeNodes,LoginNodes, last VisNodes
# - "tag_groups" each starting after a #comment see #misc or misc tag
- hosts: 'ComputeNodes'
gather_facts: false
tasks:
# these are just templates.
#Note the tag never! Everything with never is only executed if called explicitly aka ansible-playbook --tags=foo,bar OR -tags=tag_group
- { name: template_shell, shell: ls, tags: [never,tag_group,uniquetag_foo] }
- { name: template_command, command: uname chdir=/bin, tags: [never,tag_group,uniquetag_bar] }
- { name: template_scipt, script: ./scripts/qa/test.sh, tags: [never,tag_group,uniquetag_script] }
#mpi stuff
- { name: run mpi on one computenode, command: ls, args: {chdir: "/tmp"} , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_local,TODO] }
- { name: run mpi on two computenode, command: ls, args: {chdir: "/tmp"} , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_local_two,TODO] }
#- { name: run mpi via sbatch, command: cmd=ls chdir="/tmp" , failed_when: "TODO is TRUE", tags: [never,mpi,slurm_mpi,TODO] }
#- { name: mpi_pinging, command: cmd=ls chdir="/tmp" , failed_when: "TODO is TRUE", tags: [never,mpi,mpi_ping,TODO] }
#module load openmpi/3.1.6-ucx;mpirun --mca btl self --mca pml ucx -x UCX_TLS=mm -n 24 /projects/pMOSP/mpi/parallel_mandelbrot/parallel/mandelbrot
#module load openmpi/3.1.6-ucx;srun mpirun --mca btl self --mca pml ucx -x UCX_TLS=mm -n 24 /projects/pMOSP/mpi/parallel_mandelbrot/parallel/mandelbrot
#slurm
- { name: slurmd should be running, service: name=slurmd state=started, tags: [never,slurm,slurmd] }
- { name: munged should be running, service: name=munged state=started, tags: [never,slurm,munged] }
- { name: ensure connectivity to the controller, shell: scontrol ping, tags: [never,slurm,scontrol_ping] }
- { name: the most simple srun test, shell: srun --reservation=AWX hostname, tags: [never,slurm,srun_hostname] }
#nhc, manually run nhc because it contains many tests
- { name: run nhc explicitly, command: /opt/nhc-1.4.2/sbin/nhc -c /opt/nhc-1.4.2/etc/nhc/nhc.conf, become: true , tags: [never,slurm,nhc] }
# networking
- { name: ping license server, shell: ls, tags: [never,network,ping_license] }
- { name: ping something outside monash, command: ping -c 1 8.8.8.8, tags: [never,network,ping_external] }
#mounts
- hosts: 'ComputeNodes,LoginNodes'
gather_facts: false
tasks:
- { name: check mount for usr_local, shell: "mount | grep -q local", tags: [never,mountpoints,mountpoints_local] }
- { name: check mount for projects, shell: "lfs df -h", tags: [never,mountpoints_projects] }
- { name: check mount for home, shell: "mount | grep -q home", tags: [never,mountpoints,mountpoints_home] }
- { name: check mount for scratch, shell: "mount | grep -q scratch" , tags: [never,mountpoints_scratch] }
#misc
- { name: check singularity, shell: module load octave && octave --version, tags: [never,misc,singularity3] }
- { name: module test, shell: cmd="module load gcc" executable="/bin/bash", tags: [never,misc,modulecmd] }
- { name: contact ldap, shell: maybe test ldapsearch, failed_when: "TODO is TRUE", tags: [never,misc,ldap,TODO] }
#gpu
- hosts: 'VisNodes'
gather_facts: false
tasks:
- { name: run nvida-smi to see if a gpu driver is present, command: cmd="/bin/nvidia-smi", tags: [never,gpu,smi] }
- { name: run gpu burn defaults to 30 seconds, command: cmd="/usr/local/gpu_burn/1.0/run_silent.sh", tags: [never,gpu,long,gpuburn] }
# extended time-consuming tests
# relion see https://docs.massive.org.au/communities/cryo-em/tuning/tuning.html
# linpack
#module load openmpi/1.10.7-mlx;ldd /usr/local/openmpi/1.10.7-mlx/bin/* | grep -ic found
\ No newline at end of file
#!/bin/bash
echo $HOSTNAME
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment