forked from plemeri/InSPyReNet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
slurm-submit.sh
53 lines (38 loc) · 1.16 KB
/
slurm-submit.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#!/bin/bash
## JOB NAME
#SBATCH --job-name=inspyrenet
## Name of stdout output file (%j expands to %jobId)
#SBATCH --output=train.%j.out
## Queue name or partiton name (2080ti, titanxp, titanrtx)
#SBATCH --partition=A6000
#SBATCH --time=72:00:00
#SBATCH --nodes=1 # always 1
## Specifying nodelist makes your priority higher
## Number of gpus
#SBATCH --gres=gpu:8
## Same as gres
#SBATCH --ntasks-per-node=8
## Number of cores per task
#SBATCH --cpus-per-task=1
#SBATCH --mail-type=ALL
#SBATCH [email protected]
cd $SLURM_SUBMIT_DIR
echo "SLURM_SUBMIT_DIR=$SLURM_SUBMIT_DIR" echo "CUDA_HOME=$CUDA_HOME"
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
echo "CUDA_VERSION=$CUDA_VERSION"
srun -l /bin/hostname
srun -l /bin/pwd
srun -l /bin/date
module purge # Remove all modules.
module load postech
echo "Start"
echo "source $HOME/anaconda3/etc/profile.d/conda.sh"
source $HOME/anaconda3/etc/profile.d/conda.sh
echo "conda activate inspyrenet"
conda activate inspyrenet
cd Projects/InSPyReNet
torchrun --standalone --nproc_per_node=8 run/Train.py --config $1 --verbose --debug
date
echo "conda deactivate"
conda deactivate
squeue --job $SLURM_JOBID