Main Script

#!/bin/bash

#SBATCH --job-name SUMO   ## name that will show up in the queue
#SBATCH --exclusive
#SBATCH --output slurm-%j.out   ## filename of the output; the %j is equal to jobID; default is slurm-[jobID].out
#SBATCH --partition=gpu
#SBATCH -t 7-00:00  # time limit: (D-HH:MM)
#SBATCH --comment laion
#SBATCH --nodes=2
#SBATCH --ntasks=2

## Loading the modules
module load openmpi
## Internode comms
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nccl/build/lib:/opt/aws-ofi-nccl-install/lib
export NCCL_PROTO=simple
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/aws-ofi-nccl/lib
export PATH=$PATH:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin
export FI_EFA_FORK_SAFE=1
export FI_LOG_LEVEL=1
export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn
export NCCL_DEBUG=info
export OMPI_MCA_mtl_base_verbose=1
export FI_EFA_ENABLE_SHM_TRANSFER=0
export FI_PROVIDER=efa
export FI_EFA_TX_MIN_CREDITS=64
export NCCL_TREE_THRESHOLD=0
export OMPI_MCA_pml="^cm"
export OMPI_MCA_btl="tcp,self"
export OMPI_MCA_btl_tcp_if_exclude="lo,docker1"
export OMPI_MCA_plm_rsh_no_tree_spawn=1

export SINGULARITY_OMPI_DIR=/opt/amazon/openmpi
export SINGULARITYENV_APPEND_PATH=/opt/amazon/openmpi/bin
export SINGULAIRTYENV_APPEND_LD_LIBRARY_PATH=/opt/amazon/openmpi/lib

mpirun --version
##==========================================================
## sent to sub script
export HOSTNAMES=`scontrol show hostnames "$SLURM_JOB_NODELIST"`
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
## get the IP address of the master node
export MASTER_IP=`hostname -I | cut -d' ' -f1`
export MASTER_PORT=16543
export COUNT_NODE=`scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l`

# Convert HOSTNAMES to a list for passing in as arguments
export HOSTNAMES_LIST=`echo $HOSTNAMES | sed 's/ /,/g'`

echo Number of Nodes: $COUNT_NODE
echo Name of all Hosts: $HOSTNAMES
echo Master IP: $MASTER_IP
echo PROC_ID: $SLURM_PROCID

## Run the distributed script
# Use srun to run torch_runner.sh 
#srun /fsx/awesome/torch_runner.sh -w $HOSTNAMES_LIST --comment laion
srun /fsx/awesome/torch_runner.sh --comment laion

Child script - torch_runner.sh

#!/bin/bash

# HOSTNAMES MASTER_ADDR MASTER_PORT COUNT_NODE are coming from the main script
# Script runs the distributed training of the model

## Distributed script arguments
export WANDB_API_KEY=618e11c734b0f6069af4735cde3d3d515930d678
echo myuser=`whoami`
echo COUNT_NODE=$COUNT_NODE
echo LD_LIBRARY_PATH = $LD_LIBRARY_PATH
echo PATH = $PATH
echo which mpicc `which mpicc`
echo HOSTNAMES = $HOSTNAMES
echo hostname = `hostname`
echo MASTER_ADDR= $MASTER_IP
echo MASTER_PORT= $MASTER_PORT

H=`hostname`
THEID=`echo -e $HOSTNAMES  | python3 -c "import sys;[sys.stdout.write(str(i)) for i,line in enumerate(next(sys.stdin).split(' ')) if line.strip() == '$H'.strip()]"`
echo THEID=$THEID

#source ~/.bashrc
#conda activate SUMO_dist
chmod +x /home/awesome/awesome/scripts/torch_convnext.py

echo $THEID Starting
singularity exec --nv --cleanenv -B /fsx/awesome:/home/awesome torch_cuda_11_7.sifaccelerate launch --num_processes $(( 8 * $COUNT_NODE )) --num_machines $COUNT_NODE --multi_gpu--mixed_precision fp16 --machine_rank $THEID --main_process_ip $MASTER_ADDR --main_process_port $MASTER_PORTscripts/torch_convnext.py --model_name='convnext_xlarge_in22k' --batch_size=80 --epochs=25--lr=7e-5 --optimize='AdamW' --weight_decay=0.002 --group_name='No_Pretrained_Models_7e-5'
Edit
Pub: 21 Aug 2022 18:22 UTC
Views: 237