#!/bin/sh #SBATCH -N 1 #SBATCH -n 1 #SBATCH --partition=Teach-Standard #SBATCH --gres=gpu:4 #SBATCH --mem=24000 #SBATCH --time=3-00:00:00 set -e export CUDA_HOME=/opt/cuda-9.0.176.1/ export CUDNN_HOME=/opt/cuDNN-7.0/ export STUDENT_ID=$(whoami) export LD_LIBRARY_PATH=${CUDNN_HOME}/lib64:${CUDA_HOME}/lib64:${LD_LIBRARY_PATH} export LIBRARY_PATH=${CUDNN_HOME}/lib64:${LIBRARY_PATH} export CPATH=${CUDNN_HOME}/include:$CPATH export PATH=${CUDA_HOME}/bin:${PATH} export PYTHON_PATH=$PATH mkdir -p /disk/scratch/${STUDENT_ID} export TMPDIR=/disk/scratch/${STUDENT_ID}/ export TMP=/disk/scratch/${STUDENT_ID}/ source /home/${STUDENT_ID}/miniconda3/bin/activate mlp-cuda python train.py \ --use_ddp True \ --ddp_world_size 4 \ --pth_tar './pretrained/deit_base_patch16_384-8de9b5d1.pth' \ --train_dataset 'ShanghaiB' \ --save_path ./save/DDP-ShanghaiB-base-$(date -Iminutes) \ --batch_size 4 \ --gpus 0,1,2,3 \ --print_freq 100