-
Notifications
You must be signed in to change notification settings - Fork 0
/
job_detect.sh
executable file
·51 lines (39 loc) · 1.37 KB
/
job_detect.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#!/bin/bash
#( use ## for comments with SBATCH)
## DON'T USE SPACES AFTER COMMAS
# You must specify a valid email address!
#SBATCH [email protected]
# Mail on NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --mail-type=FAIL,END
#SBATCH --account=ws_00000
# Job name
#SBATCH --job-name="perimetry"
# Partition
#SBATCH --partition=bdw # all, gpu, phi, long, gpu-invest
# Runtime and memory
#SBATCH --time=12:00:00 # days-HH:MM:SS
#SBATCH --mem-per-cpu=4G # it's memory PER CPU, NOT TOTAL RAM! maximum RAM is 246G in total
# total RAM is mem-per-cpu * cpus-per-task
# maximum cores is 20 on all, 10 on long, 24 on gpu, 64 on phi!
#SBATCH --cpus-per-task=20
#SBATCH --nodes=1
#SBATCH --ntasks=1
##SBATCH --ntasks-per-node=1
# on gpu partition
##SBATCH --gres=gpu:rtx3090:1
# Set the current working directory.
# All relative paths used in the job script are relative to this directory
##SBATCH --workdir=
# create job output file
#SBATCH --output=logs/slurm-%A_%a.out
# For array jobs
# Array job containing 6 tasks, run max 2 tasks at the same time
##SBATCH --array=1-6%2
# param_store=$HOME/oct_biomarker_classification/args.txt
# target=$(cat $param_store | awk -v var=$SLURM_ARRAY_TASK_ID 'NR==var {print $1}')
# Main Python code below this line
module load Workspace
module load Anaconda3
eval "$(conda shell.bash hook)"
conda activate perimetry_env
srun python ./main.py