-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbatch-fit.sh
55 lines (46 loc) · 1.49 KB
/
batch-fit.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#!/bin/bash
#SBATCH --partition fhs-fast
#SBATCH --ntasks 1
#SBATCH --nodes 1
#SBATCH --tasks-per-node 5
#SBATCH --cpus-per-task 9
#SBATCH --mem-per-cpu 4G
#SBATCH --time 120:00:00
#SBATCH --job-name fit-nav
#SBATCH --output log/fit-hh-vc1-cell1-NaIVCP80.%j.out
#SBATCH --error log/fit-hh-vc1-cell1-NaIVCP80.%j.err
##SBATCH --mail-type ALL
##SBATCH --mail-user [email protected]
source /etc/profile
source /etc/profile.d/modules.sh
source /home/chonloklei/m # Load miniconda
ulimit -s unlimited
# Load module
module purge
# module load intel impi
# NOTE: Running multiple MPI jobs per node is currently only possible with IntelMPI and MVAPICH2 (i.e. OpenMPI does not work).
# https://scitas-data.epfl.ch/confluence/display/DOC/Running+multiple+tasks+on+one+node
# Path and Python version checks
pwd
python --version
conda activate /home/chonloklei/nav-artefact-model/env # Load miniconda venv
python --version
which python
# Set up
model="hh"
protocol="NaIVCP80"
level="1"
# We are using multiprocessing, so switch multi-threading off
# https://stackoverflow.com/a/43897781
# export OMP_NUM_THREADS=1
# Run
for data in cell1 cell2
do
for i in 1:5
do
srun --exclusive --ntasks=1 --cpus-per-task=${SLURM_CPUS_PER_TASK} --mem=36G python -u fit.py -m $model -p $protocol -d $data -l $level &
sleep 5
done
wait
done
echo "Done."