#!/bin/bash#SBATCH --job-name SUMO ## name that will show up in the queue#SBATCH --exclusive#SBATCH --output slurm-%j.out ## filename of the output; the %j is equal to jobID; default is slurm-[jobID].out#SBATCH --partition=gpu#SBATCH -t 7-00:00 # time limit: (D-HH:MM)#SBATCH --comment laion#SBATCH --nodes=2#SBATCH --ntasks=2## Loading the modulesmodule load openmpi
## Internode commsexportLD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nccl/build/lib:/opt/aws-ofi-nccl-install/lib
exportNCCL_PROTO=simple
exportLD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/aws-ofi-nccl/lib
exportPATH=$PATH:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin
exportFI_EFA_FORK_SAFE=1exportFI_LOG_LEVEL=1exportFI_EFA_USE_DEVICE_RDMA=1# use for p4dnexportNCCL_DEBUG=info
exportOMPI_MCA_mtl_base_verbose=1exportFI_EFA_ENABLE_SHM_TRANSFER=0exportFI_PROVIDER=efa
exportFI_EFA_TX_MIN_CREDITS=64exportNCCL_TREE_THRESHOLD=0exportOMPI_MCA_pml="^cm"exportOMPI_MCA_btl="tcp,self"exportOMPI_MCA_btl_tcp_if_exclude="lo,docker1"exportOMPI_MCA_plm_rsh_no_tree_spawn=1exportSINGULARITY_OMPI_DIR=/opt/amazon/openmpi
exportSINGULARITYENV_APPEND_PATH=/opt/amazon/openmpi/bin
exportSINGULAIRTYENV_APPEND_LD_LIBRARY_PATH=/opt/amazon/openmpi/lib
mpirun --version
##==========================================================## sent to sub scriptexportHOSTNAMES=`scontrol show hostnames "$SLURM_JOB_NODELIST"`exportMASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST"| head -n 1)## get the IP address of the master nodeexportMASTER_IP=`hostname -I | cut -d' ' -f1`exportMASTER_PORT=16543exportCOUNT_NODE=`scontrol show hostnames "$SLURM_JOB_NODELIST"| wc -l`# Convert HOSTNAMES to a list for passing in as argumentsexportHOSTNAMES_LIST=`echo$HOSTNAMES| sed 's/ /,/g'`echo Number of Nodes: $COUNT_NODEecho Name of all Hosts: $HOSTNAMESecho Master IP: $MASTER_IPecho PROC_ID: $SLURM_PROCID## Run the distributed script# Use srun to run torch_runner.sh #srun /fsx/awesome/torch_runner.sh -w $HOSTNAMES_LIST --comment laionsrun /fsx/awesome/torch_runner.sh --comment laion