-
Notifications
You must be signed in to change notification settings - Fork 70
/
job_script_DARWIN
65 lines (48 loc) · 1.95 KB
/
job_script_DARWIN
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#!/bin/bash
##SLURM Example for skylake system in Cambridge
#SBATCH -p skylake
#SBATCH -A PLANCK-SL2-CPU
#SBATCH --nodes=##NUMNODES##
#SBATCH --ntasks=##NUMTASKS##
#SBATCH --time=##WALLTIME##
#SBATCH --mail-type=FAIL
#SBATCH --cpus-per-task=##OMP##
cd ##ROOTDIR##
#export PICO=/home/aml1005/git/pypico/pypico
#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/aml1005/anaconda2/lib
. /etc/profile.d/modules.sh # Leave this line (enables the module command)
module purge # Removes all modules still loaded
module load rhel7/default-peta4 # REQUIRED - loads the basic environment
module load intel/bundles/complib/2019.3 #2017.4 seems to have compiler bugs
#cfitsio is only needed for WMAP
#module load cfitsio
#for PICO runs also need python (check path also set correctly)
#not needed otherwise
#module load python/2.7.14
export OMP_NUM_THREADS=##OMP##
export I_MPI_PIN=##ONERUN##
export I_MPI_HYDRA_RMK=slurm
JOBID=$SLURM_JOB_ID
echo -e "JobID: $JOBID\n======"
echo "Time: `date`"
echo "Running on master node: `hostname`"
echo "Current directory: `pwd`"
if [ "$SLURM_JOB_NODELIST" ]; then
#! Create a machine file:
export NODEFILE=`generate_pbs_nodefile`
cat $NODEFILE | uniq > scripts/machine.file.$JOBID
echo -e "\nNodes allocated:\n================"
echo `cat scripts/machine.file.$JOBID | sed -e 's/\..*$//g'`
fi
###set things to be used by the python script, which extracts text from here with ##XX: ... ##
### command to use for each run in the batch
### Can set coresPerNode to 32 as physical nodes, but system allows for multible jobs on one node
### and using OMP_NUM_THREADS=4, 4 chains, 16 in total probably more efficient
##RUN: time srun --mpi=pmi2 ##PROGRAM## ##INI## > ./scripts/##INIBASE##.log 2>&1 ##
### defaults for this script
##DEFAULT_qsub: qsub ##
##DEFAULT_coresPerNode: 16 ##
##DEFAULT_chainsPerNode: 4 ##
##COMMAND##
#wait for processes to finish
wait