From 396c0af4c03c46f3816f81d85dec0edf21fc684d Mon Sep 17 00:00:00 2001 From: Georgios Kafanas Date: Wed, 17 Apr 2024 09:44:09 +0200 Subject: [PATCH 1/2] [BUG FIX] Remove all uses/mentions of SRUN_CPUS_PER_TASK - The SRUN_CPUS_PER_TASK variable has been deprecated in Slurm 23.11 and beyond. - srun now reads the SLURM_CPUS_PER_TASK variable. - A node was added to use the `--cpus-per-task` flag of srun if a restriction within an allocation is required. --- .../performance-debugging-tools/advisor.md | 4 +--- .../performance-debugging-tools/aps.md | 2 -- .../performance-debugging-tools/arm-forge.md | 3 +-- .../performance-debugging-tools/inspector.md | 1 - .../performance-debugging-tools/itac.md | 3 +-- .../performance-debugging-tools/scalasca.md | 3 +-- .../performance-debugging-tools/vtune.md | 1 - docs/environment/conda.md | 1 - docs/jobs/gpu.md | 1 - docs/services/jupyter.md | 1 - docs/slurm/index.md | 10 +++++----- docs/slurm/launchers.md | 20 ------------------- docs/software/build.md | 4 ---- docs/software/cae/abaqus.md | 6 ------ docs/software/cae/ansys.md | 4 ---- docs/software/cae/fds.md | 2 -- docs/software/cae/fenics.md | 5 ----- docs/software/cae/meshing-tools.md | 2 -- docs/software/cae/openfoam.md | 2 -- .../electronics/ase.md | 2 -- .../electronics/crystal.md | 2 -- .../electronics/meep.md | 2 -- .../electronics/quantum-espresso.md | 2 -- .../electronics/vasp.md | 2 -- .../molecular-dynamics/cp2k.md | 2 -- .../molecular-dynamics/gromacs.md | 2 -- .../molecular-dynamics/namd.md | 2 -- .../molecular-dynamics/nwchem.md | 2 -- docs/software/maths/julia.md | 4 ---- docs/software/maths/mathematica.md | 6 ------ docs/software/optim/index.md | 6 ------ docs/software/visu/paraview.md | 2 -- 32 files changed, 9 insertions(+), 102 deletions(-) diff --git a/docs/development/performance-debugging-tools/advisor.md b/docs/development/performance-debugging-tools/advisor.md index 9f0813ff..a560ac0c 100644 --- a/docs/development/performance-debugging-tools/advisor.md +++ b/docs/development/performance-debugging-tools/advisor.md @@ -70,10 +70,8 @@ module load toolchain/intel/2019a module load perf/Advisor/2019_update4 module load vis/GTK+/3.24.8-GCCcore-8.2.0 -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK export OMP_NUM_THREADS=16 advixe-cl -collect survey -project-dir my_result -- ./a.out - ``` @@ -112,4 +110,4 @@ The below figure shows the hybrid(MPI+OpenMP) programming analysis results: !!! tip If you find some issues with the instructions above, - please report it to us using [support ticket](https://hpc.uni.lu/support). \ No newline at end of file + please report it to us using [support ticket](https://hpc.uni.lu/support). diff --git a/docs/development/performance-debugging-tools/aps.md b/docs/development/performance-debugging-tools/aps.md index 20b0b69b..b2b4e6ef 100644 --- a/docs/development/performance-debugging-tools/aps.md +++ b/docs/development/performance-debugging-tools/aps.md @@ -91,7 +91,6 @@ module load swenv/default-env/v1.2-20191021-production module load tools/VTune/2019_update4 module load toolchain/intel/2019a -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK export OMP_NUM_THREADS=16 aps --collection-mode=all -r report_output ./a.out ``` @@ -116,7 +115,6 @@ module load swenv/default-env/v1.2-20191021-production module load tools/VTune/2019_update4 module load toolchain/intel/2019a -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # To collect all the results export MPS_STAT_LEVEL=${SLURM_CPUS_PER_TASK:-1} # An option for the OpenMP+MPI application diff --git a/docs/development/performance-debugging-tools/arm-forge.md b/docs/development/performance-debugging-tools/arm-forge.md index 0da233de..b899f58c 100644 --- a/docs/development/performance-debugging-tools/arm-forge.md +++ b/docs/development/performance-debugging-tools/arm-forge.md @@ -54,7 +54,6 @@ module load toolchain/intel/2019a module load tools/ArmForge/19.1 module load tools/ArmReports/19.1 -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} # for debugging @@ -99,4 +98,4 @@ To see the result !!! tip If you find some issues with the instructions above, - please report it to us using [support ticket](https://hpc.uni.lu/support). \ No newline at end of file + please report it to us using [support ticket](https://hpc.uni.lu/support). diff --git a/docs/development/performance-debugging-tools/inspector.md b/docs/development/performance-debugging-tools/inspector.md index 3b4e5c8a..a1da4797 100644 --- a/docs/development/performance-debugging-tools/inspector.md +++ b/docs/development/performance-debugging-tools/inspector.md @@ -92,7 +92,6 @@ module load toolchain/intel/2019a module load tools/Inspector/2019_update4 module load vis/GTK+/3.24.8-GCCcore-8.2.0 -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK inspxe-cl -collect mi1 -result-dir mi1 -- ./a.out` ``` To see the result: diff --git a/docs/development/performance-debugging-tools/itac.md b/docs/development/performance-debugging-tools/itac.md index ec623578..ef8d6cb9 100644 --- a/docs/development/performance-debugging-tools/itac.md +++ b/docs/development/performance-debugging-tools/itac.md @@ -53,7 +53,6 @@ module load toolchain/intel/2019a module load tools/itac/2019.4.036 module load vis/GTK+/3.24.8-GCCcore-8.2.0 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK $ export OMP_NUM_THREADS=16 $ -trace-collective ./a.out ``` @@ -102,4 +101,4 @@ $ stftool tracefile.stf --print-statistics !!! tip If you find some issues with the instructions above, - please report it to us using [support ticket](https://hpc.uni.lu/support). \ No newline at end of file + please report it to us using [support ticket](https://hpc.uni.lu/support). diff --git a/docs/development/performance-debugging-tools/scalasca.md b/docs/development/performance-debugging-tools/scalasca.md index 8c8bc3ad..381c2d85 100644 --- a/docs/development/performance-debugging-tools/scalasca.md +++ b/docs/development/performance-debugging-tools/scalasca.md @@ -52,7 +52,6 @@ module load toolchain/foss/2018a module load perf/Scalasca/2.3.1-foss-2018a module load perf/Score-P/3.1-foss-2018a -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK export OMP_NUM_THREADS=16 # analyze @@ -100,4 +99,4 @@ $ scalasca -examine result_folder ``` !!! tip If you find some issues with the instructions above, - please report it to us using [support ticket](https://hpc.uni.lu/support). \ No newline at end of file + please report it to us using [support ticket](https://hpc.uni.lu/support). diff --git a/docs/development/performance-debugging-tools/vtune.md b/docs/development/performance-debugging-tools/vtune.md index 66f9c1af..27b8e0e2 100644 --- a/docs/development/performance-debugging-tools/vtune.md +++ b/docs/development/performance-debugging-tools/vtune.md @@ -48,7 +48,6 @@ module load toolchain/intel/2019a module load tools/VTune/2019_update4 module load vis/GTK+/3.24.8-GCCcore-8.2.0 -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK export OMP_NUM_THREADS=16 amplxe-cl -collect hotspots-r my_result ./a.out ``` diff --git a/docs/environment/conda.md b/docs/environment/conda.md index f0ab75e3..a3d7d869 100644 --- a/docs/environment/conda.md +++ b/docs/environment/conda.md @@ -148,7 +148,6 @@ echo "Numb. of cores: ${SLURM_CPUS_PER_TASK}" micromamba activate R-project -export SRUN_CPUS_PER_TASK="${SLURM_CPUS_PER_TASK}" export OMP_NUM_THREADS=1 srun Rscript --no-save --no-restore script.R diff --git a/docs/jobs/gpu.md b/docs/jobs/gpu.md index 3bb1474a..ebd4a197 100644 --- a/docs/jobs/gpu.md +++ b/docs/jobs/gpu.md @@ -28,7 +28,6 @@ print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" module load numlib/cuDNN # Example with cuDNN -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # Propagate Slurm 'cpus-per-task' to srun [...] ``` diff --git a/docs/services/jupyter.md b/docs/services/jupyter.md index fcf2ce09..2deee0ab 100644 --- a/docs/services/jupyter.md +++ b/docs/services/jupyter.md @@ -59,7 +59,6 @@ The following script is an example how to proceed: print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # Python 3.X by default (also on system) module load lang/Python diff --git a/docs/slurm/index.md b/docs/slurm/index.md index b590a27e..7cdebf4f 100644 --- a/docs/slurm/index.md +++ b/docs/slurm/index.md @@ -102,12 +102,12 @@ Within a job, you aim at running a certain number of **tasks**, and Slurm allow The total number of tasks defined in a given job is stored in the `$SLURM_NTASKS` environment variable. -!!! note "--cpus-per-task in srun since Slurm 22.05" - Beginning with Slurm 22.05, srun will not inherit the --cpus-per-task value requested by salloc or sbatch. It must be requested again with the call to srun or set with the SRUN_CPUS_PER_TASK environment variable if desired for the task(s). +!!! note "--cpus-per-task in `srun` in Slurm 23.11 and later" + In the latest versions of Slurm `srun` inherits the --cpus-per-task value requested by salloc or sbatch by reading the value of `SLURM_CPUS_PER_TASK`. -This is very convenient to abstract from the job context to run MPI tasks/processes in parallel using for instance: +In case you would like to launch multiple programs in a single allocation/batch script, divide the resources accordingly by requesting resources with `srun` when launching the process, for instance: ```bash -srun -c ${SLURM_CPUS_PER_TASK} -n ${SLURM_NTASKS} [...] +srun --cpus-per-task --ntasks [...] ``` We encourage you to **always** explicitly specify upon resource allocation the number of tasks you want _per_ node/socket (`--ntasks-per-node --ntasks-per-socket `), to easily scale on multiple nodes with `-N `. Adapt the number of threads and the settings to match the physical NUMA characteristics of the nodes @@ -344,5 +344,5 @@ submitted. | `-N ` | `SLURM_JOB_NUM_NODES` or
`SLURM_NNODES` | | | `--ntasks-per-node=` | `SLURM_NTASKS_PER_NODE` | | | `--ntasks-per-socket=` | `SLURM_NTASKS_PER_SOCKET` | | -| `-c ` | `SLURM_CPUS_PER_TASK` | `OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}` and `SRUN_CPUS_PER_TASK=${SLURM_CPUS_PER_TASK}` | +| `-c ` | `SLURM_CPUS_PER_TASK` | `OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}` | | | `SLURM_NTASKS`
Total number of tasks | `srun -n $SLURM_NTASKS [...]` | diff --git a/docs/slurm/launchers.md b/docs/slurm/launchers.md index 4b4528ce..9b28bf0e 100644 --- a/docs/slurm/launchers.md +++ b/docs/slurm/launchers.md @@ -103,8 +103,6 @@ When setting your default `#SBATCH` directive, always keep in mind your expected module purge || print_error_and_exit "No 'module' command" # List modules required for execution of the task module load <...> - # Propagate Slurm "-c" option to srun - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # [...] ``` @@ -121,7 +119,6 @@ When setting your default `#SBATCH` directive, always keep in mind your expected print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load <...> # [...] ``` @@ -139,7 +136,6 @@ When setting your default `#SBATCH` directive, always keep in mind your expected print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load <...> # [...] ``` @@ -193,7 +189,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # C/C++: module load toolchain/intel # OR: module load toolchain/foss # Java: module load lang/Java/1.8 # Ruby/Perl/Rust...: module load lang/{Ruby,Perl,Rust...} @@ -216,7 +211,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # Python 3.X by default (also on system) module load lang/Python # module load lang/SciPy-bundle @@ -240,7 +234,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load lang/R export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} OPTS=$* @@ -262,7 +255,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load math/MATLAB matlab -nodisplay -nosplash < INPUTFILE.m > OUTPUTFILE.out @@ -287,7 +279,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load <...> # [...] ``` @@ -305,7 +296,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load <...> # USE apps compiled against the {foss,intel}cuda toolchain ! # Ex: # module load numlib/cuDNN @@ -339,7 +329,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/foss export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} @@ -361,7 +350,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/foss export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} @@ -391,7 +379,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/intel OPTS=$* @@ -413,7 +400,6 @@ Luckily, we have prepared a [generic GNU Parallel launcher](https://github.com/U print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/intel OPTS=$* @@ -442,7 +428,6 @@ You may want to use [PMIx](https://pmix.github.io/standard) as MPI initiator -- print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/foss module load mpi/OpenMPI OPTS=$* @@ -464,7 +449,6 @@ You may want to use [PMIx](https://pmix.github.io/standard) as MPI initiator -- print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/foss module load mpi/OpenMPI OPTS=$* @@ -489,7 +473,6 @@ You may want to use [PMIx](https://pmix.github.io/standard) as MPI initiator -- print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/intel export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} OPTS=$* @@ -512,7 +495,6 @@ You may want to use [PMIx](https://pmix.github.io/standard) as MPI initiator -- print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/intel export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} OPTS=$* @@ -537,7 +519,6 @@ You may want to use [PMIx](https://pmix.github.io/standard) as MPI initiator -- print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/foss module load mpi/OpenMPI export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} @@ -561,7 +542,6 @@ You may want to use [PMIx](https://pmix.github.io/standard) as MPI initiator -- print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load toolchain/foss module load mpi/OpenMPI export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} diff --git a/docs/software/build.md b/docs/software/build.md index 0a0a9a0f..a0728c72 100644 --- a/docs/software/build.md +++ b/docs/software/build.md @@ -173,8 +173,6 @@ From that point, the compiled software and associated module is available in you #SBATCH -c print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } -# Propagate Slurm "cpus-per-task" to srun -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # Safeguard for NOT running this launcher on access/login nodes module purge || print_error_and_exit "No 'module' command" @@ -211,8 +209,6 @@ From that point, the compiled software and associated module is available in the #SBATCH -c print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } -# Propagate Slurm "cpus-per-task" to srun -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # Safeguard for NOT running this launcher on access/login nodes module purge || print_error_and_exit "No 'module' command" diff --git a/docs/software/cae/abaqus.md b/docs/software/cae/abaqus.md index a1ad760c..41f52700 100644 --- a/docs/software/cae/abaqus.md +++ b/docs/software/cae/abaqus.md @@ -87,9 +87,6 @@ $ si --x11 -c 8 # Abaqus mp_mode=threads test # OR $ si --x11 --ntask-per-node 8 # abaqus mp_mode=mpi test -# Propagate Slurm "cpus-per-task / -c" to srun -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - # Load the module ABAQUS and needed environment (node)$ module purge (node)$ module load cae/ABAQUS @@ -161,7 +158,6 @@ abaqus job= resume print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load cae/ABAQUS # export LM_LICENSE_FILE=[...] unset SLURM_GTIDS @@ -185,7 +181,6 @@ abaqus job= resume print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load cae/ABAQUS # export LM_LICENSE_FILE=[...] unset SLURM_GTIDS @@ -211,7 +206,6 @@ abaqus job= resume print_error_and_exit() { echo "***ERROR*** $*"; exit 1; } module purge || print_error_and_exit "No 'module' command" - export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK module load cae/ABAQUS # export LM_LICENSE_FILE=[...] unset SLURM_GTIDS diff --git a/docs/software/cae/ansys.md b/docs/software/cae/ansys.md index 54ce4b34..ae6ee3f5 100644 --- a/docs/software/cae/ansys.md +++ b/docs/software/cae/ansys.md @@ -25,9 +25,6 @@ $ ssh -X iris-cluster # Reserve the node for interactive computation $ salloc -p interactive --time=00:30:00 --ntasks 1 -c 4 --x11 -# Propagate Slurm "cpus-per-task / -c" to srun -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - # Load the required version of ANSYS and needed environment $ module purge $ module load toolchain/intel/2019a @@ -70,7 +67,6 @@ module load tools/ANSYS/19.4 # The Input file defFile=Benchmark.def -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK MYHOSTLIST=$(srun hostname | sort | uniq -c | awk '{print $2 "*" $1}' | paste -sd, -) echo $MYHOSTLIST cfx5solve -double -def $defFile -start-method "Platform MPI Distributed Parallel" -par-dist $MYHOSTLIST diff --git a/docs/software/cae/fds.md b/docs/software/cae/fds.md index 9c74b3de..ac53fab6 100644 --- a/docs/software/cae/fds.md +++ b/docs/software/cae/fds.md @@ -24,7 +24,6 @@ $ salloc -p interactive --time=00:30:00 --ntasks 1 -c 4 --x11 $ module purge $ module load swenv/default-env/devel $ module load phys/FDS/6.7.3-intel-2019a -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK # Example in fds $ fds example.fds @@ -61,7 +60,6 @@ module purge module load swenv/default-env/devel module load phys/FDS/6.7.3-intel-2019a -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK srun fds example.fds ``` diff --git a/docs/software/cae/fenics.md b/docs/software/cae/fenics.md index 2a8d127e..40fabfbb 100644 --- a/docs/software/cae/fenics.md +++ b/docs/software/cae/fenics.md @@ -19,8 +19,6 @@ $ ssh -X iris-cluster # OR ssh -Y iris-cluster on Mac $ si --x11 --ntasks-per-node 1 -c 4 # salloc -p interactive --qos debug -C batch --x11 --ntasks-per-node 1 -c 4 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - # Go to scratch directory $ cds @@ -63,8 +61,6 @@ $ ssh -X iris-cluster # or ssh -Y iris-cluster on Mac $ si --ntasks-per-node 1 -c 4 --x11 # salloc -p interactive --qos debug -C batch --x11 --ntasks-per-node 1 -c 4 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - # Activate anaconda $ source /${SCRATCH}/Anaconda3/bin/activate @@ -98,7 +94,6 @@ source ${SCRATCH}/Anaconda3/bin/activate conda activate fenicsproject # execute the poisson.py through python -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK srun python3 Poisson.py ``` diff --git a/docs/software/cae/meshing-tools.md b/docs/software/cae/meshing-tools.md index a8d875b0..721995f7 100644 --- a/docs/software/cae/meshing-tools.md +++ b/docs/software/cae/meshing-tools.md @@ -38,7 +38,6 @@ $ module purge $ module load swenv/default-env/v1.2-20191021-production $ module load cae/gmsh/4.4.0-intel-2019a -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK $ gmsh example.geo ``` ## Salome @@ -74,7 +73,6 @@ $ module purge $ module load swenv/default-env/v1.2-20191021-production $ module load cae/Salome/8.5.0-intel-2019a -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK $ salome start ``` diff --git a/docs/software/cae/openfoam.md b/docs/software/cae/openfoam.md index 0a849911..a3713cc5 100644 --- a/docs/software/cae/openfoam.md +++ b/docs/software/cae/openfoam.md @@ -29,7 +29,6 @@ $ module load swenv/default-env/v1.1-20180716-production $ module load cae/OpenFOAM/v1712-intel-2018a # Load the OpenFOAM environment -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK $ source $FOAM_BASH $ mkdir OpenFOAM @@ -91,7 +90,6 @@ module load swenv/default-env/v1.1-20180716-production module load cae/OpenFOAM/v1712-intel-2018a # Load the OpenFOAM environment -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK source $FOAM_BASH srun interFoam -parallel diff --git a/docs/software/computational-chemistry/electronics/ase.md b/docs/software/computational-chemistry/electronics/ase.md index 91966ad4..4442719e 100644 --- a/docs/software/computational-chemistry/electronics/ase.md +++ b/docs/software/computational-chemistry/electronics/ase.md @@ -33,8 +33,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load chem/ASE/3.17.0-intel-2019a-Python-3.7.2 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ python3 example.py ``` diff --git a/docs/software/computational-chemistry/electronics/crystal.md b/docs/software/computational-chemistry/electronics/crystal.md index a57aee85..6dc2ba52 100644 --- a/docs/software/computational-chemistry/electronics/crystal.md +++ b/docs/software/computational-chemistry/electronics/crystal.md @@ -36,8 +36,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load chem/CRYSTAL/17-intel-2019a-1.0.2 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ Pcrystal >& log.out ``` diff --git a/docs/software/computational-chemistry/electronics/meep.md b/docs/software/computational-chemistry/electronics/meep.md index 55af1436..073592e8 100644 --- a/docs/software/computational-chemistry/electronics/meep.md +++ b/docs/software/computational-chemistry/electronics/meep.md @@ -29,8 +29,6 @@ $ module load swenv/default-env/devel # Eventually (only relevant on 2019a softw $ module load toolchain/intel/2019a $ module load phys/Meep/1.4.3-intel-2019a -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ meep example.ctl > result_output ``` diff --git a/docs/software/computational-chemistry/electronics/quantum-espresso.md b/docs/software/computational-chemistry/electronics/quantum-espresso.md index 723d9b50..550a3273 100644 --- a/docs/software/computational-chemistry/electronics/quantum-espresso.md +++ b/docs/software/computational-chemistry/electronics/quantum-espresso.md @@ -41,8 +41,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load chem/QuantumESPRESSO/6.4.1-intel-2019a -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ pw.x -input example.in ``` diff --git a/docs/software/computational-chemistry/electronics/vasp.md b/docs/software/computational-chemistry/electronics/vasp.md index 7536ee23..cf2b1c18 100644 --- a/docs/software/computational-chemistry/electronics/vasp.md +++ b/docs/software/computational-chemistry/electronics/vasp.md @@ -29,8 +29,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load phys/VASP/5.4.4-intel-2019a -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ vasp_[std/gam/ncl] ``` diff --git a/docs/software/computational-chemistry/molecular-dynamics/cp2k.md b/docs/software/computational-chemistry/molecular-dynamics/cp2k.md index 484ce9db..f117d433 100644 --- a/docs/software/computational-chemistry/molecular-dynamics/cp2k.md +++ b/docs/software/computational-chemistry/molecular-dynamics/cp2k.md @@ -34,8 +34,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load chem/CP2K/6.1-intel-2018a -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ cp2k.popt -i example.inp ``` diff --git a/docs/software/computational-chemistry/molecular-dynamics/gromacs.md b/docs/software/computational-chemistry/molecular-dynamics/gromacs.md index 5488a9db..54692b29 100644 --- a/docs/software/computational-chemistry/molecular-dynamics/gromacs.md +++ b/docs/software/computational-chemistry/molecular-dynamics/gromacs.md @@ -34,8 +34,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load bio/GROMACS/2019.2-intel-2019a -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ gmx_mpi mdrun ``` diff --git a/docs/software/computational-chemistry/molecular-dynamics/namd.md b/docs/software/computational-chemistry/molecular-dynamics/namd.md index ae206eac..a5cae323 100644 --- a/docs/software/computational-chemistry/molecular-dynamics/namd.md +++ b/docs/software/computational-chemistry/molecular-dynamics/namd.md @@ -33,8 +33,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load chem/NAMD/2.12-intel-2018a-mpi -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ namd2 +setcpuaffinity +p4 config_file > output_file ``` diff --git a/docs/software/computational-chemistry/molecular-dynamics/nwchem.md b/docs/software/computational-chemistry/molecular-dynamics/nwchem.md index 56831466..349ee1d9 100644 --- a/docs/software/computational-chemistry/molecular-dynamics/nwchem.md +++ b/docs/software/computational-chemistry/molecular-dynamics/nwchem.md @@ -29,8 +29,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load chem/NWChem/6.8.revision47-intel-2019a-Python-2.7.15 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ nwchem example ``` diff --git a/docs/software/maths/julia.md b/docs/software/maths/julia.md index 16b753d3..62a8cab0 100644 --- a/docs/software/maths/julia.md +++ b/docs/software/maths/julia.md @@ -24,8 +24,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load lang/Julia/1.3.0 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ julia ``` @@ -46,8 +44,6 @@ module purge module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) module load lang/Julia/1.3.0 -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - julia {example}.jl ``` diff --git a/docs/software/maths/mathematica.md b/docs/software/maths/mathematica.md index 9738ed79..991ea5db 100644 --- a/docs/software/maths/mathematica.md +++ b/docs/software/maths/mathematica.md @@ -30,8 +30,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load math/Mathematica/12.0.0 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ math ``` @@ -52,8 +50,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load math/Mathematica/12.0.0 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ srun -n ${SLURM_NTASKS} math -run < {mathematica-script-file}.m ``` @@ -73,8 +69,6 @@ $ module purge $ module load swenv/default-env/devel # Eventually (only relevant on 2019a software environment) $ module load math/Mathematica/12.0.0 -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ srun -n ${SLURM_NTASKS} math -run < {mathematica-script-file}.m ``` diff --git a/docs/software/optim/index.md b/docs/software/optim/index.md index 3a99bd93..1b883a3d 100644 --- a/docs/software/optim/index.md +++ b/docs/software/optim/index.md @@ -105,8 +105,6 @@ The below launcher is an example showing how to reserve ressources on multiple n #SBATCH --qos=normal module load math/CPLEX -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - # Some variables MPS_FILE=$1 RES_FILE=$2 @@ -152,8 +150,6 @@ The script below allows you to start multi-threaded MIP optimization with Gurobi # Load Gurobi module load math/Gurobi -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - # Some variable MPS_FILE=$1 RES_FILE=$2 @@ -184,8 +180,6 @@ mu # Load gurobi module load math/Gurobi -export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - export MASTER_PORT=61000 export SLAVE_PORT=61000 export MPS_FILE=$1 diff --git a/docs/software/visu/paraview.md b/docs/software/visu/paraview.md index cf8a443a..7a1f8279 100644 --- a/docs/software/visu/paraview.md +++ b/docs/software/visu/paraview.md @@ -44,8 +44,6 @@ $ module purge $ module load swenv/default-env/latest $ module load vis/ParaView/5.6.2-intel-2019a-mpi -$ export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK - $ paraview & ``` From b7ba14a7ad036f81aad215213659195292d16709 Mon Sep 17 00:00:00 2001 From: Georgios Kafanas Date: Wed, 17 Apr 2024 09:53:25 +0200 Subject: [PATCH 2/2] [REFACTORING] Explain special mentions to --cpus-per-task for srun --- docs/slurm/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/slurm/index.md b/docs/slurm/index.md index 7cdebf4f..06f4ab9b 100644 --- a/docs/slurm/index.md +++ b/docs/slurm/index.md @@ -102,8 +102,8 @@ Within a job, you aim at running a certain number of **tasks**, and Slurm allow The total number of tasks defined in a given job is stored in the `$SLURM_NTASKS` environment variable. -!!! note "--cpus-per-task in `srun` in Slurm 23.11 and later" - In the latest versions of Slurm `srun` inherits the --cpus-per-task value requested by salloc or sbatch by reading the value of `SLURM_CPUS_PER_TASK`. +!!! note "The --cpus-per-task option of srun in Slurm 23.11 and later" + In the latest versions of Slurm `srun` inherits the `--cpus-per-task` value requested by `salloc` or `sbatch` by reading the value of `SLURM_CPUS_PER_TASK`, as for any other option. _This behavior may differ from some older versions where special handling was required to propagate the `--cpus-per-task` option to `srun`._ In case you would like to launch multiple programs in a single allocation/batch script, divide the resources accordingly by requesting resources with `srun` when launching the process, for instance: ```bash