*** Wartungsfenster jeden ersten Mittwoch vormittag im Monat ***

Skip to content
Snippets Groups Projects
Commit 9f31a145 authored by Muck, Katrin's avatar Muck, Katrin
Browse files

corrected cpus-per-task arguments (cpus means logical cpus in our slurm configuration)

parent c66b5861
No related branches found
No related tags found
1 merge request!2corrected cpus-per-task arguments (cpus means logical cpus in our slurm configuration)
......@@ -22,10 +22,10 @@
#SBATCH --job-name="full node; single program"
#SBATCH --nodes=1
#SBATCH --ntasks=4
#SBATCH --cpus-per-task=12
#SBATCH --cpus-per-task=24
#SBATCH --partition=skylake_0096
#SBATCH --qos=skylake_0096
../util/print_job_info.sh
srun --cpus-per-task=12 ../util/print_task_info.sh
srun --cpus-per-task=24 ../util/print_task_info.sh
......@@ -24,7 +24,7 @@
#SBATCH --job-name="task based; 4 tasks; same program; 4 cores / task; 8 GB"
#SBATCH --ntasks=4
#SBATCH --cpus-per-task=4
#SBATCH --cpus-per-task=8
#SBATCH --mem=8G
#SBATCH --partition=skylake_0096
#SBATCH --qos=skylake_0096
......@@ -33,4 +33,4 @@
# from sbatch docs:
# Beginning with 22.05, srun will not inherit the --cpus-per-task value
srun --cpus-per-task=4 ../util/print_task_info.sh
srun --cpus-per-task=8 ../util/print_task_info.sh
......@@ -24,7 +24,7 @@
#SBATCH --job-name="task based; 4 tasks; different program; 4 cores / task; 8 GB"
#SBATCH --ntasks=4
#SBATCH --cpus-per-task=4
#SBATCH --cpus-per-task=8
#SBATCH --mem=8G
#SBATCH --partition=skylake_0096
#SBATCH --qos=skylake_0096
......@@ -33,4 +33,4 @@
# from sbatch docs:
# Beginning with 22.05, srun will not inherit the --cpus-per-task value
srun --cpus-per-task=4 --multi-prog "04-task-based-multiple-programs.conf"
srun --cpus-per-task=8 --multi-prog "04-task-based-multiple-programs.conf"
......@@ -31,7 +31,7 @@
#SBATCH --qos=skylake_0096
#SBATCH hetjob
#SBATCH --ntasks=2
#SBATCH --cpus-per-task=4
#SBATCH --cpus-per-task=8
#SBATCH --mem=8G
#SBATCH --partition=skylake_0096
#SBATCH --qos=skylake_0096
......
......@@ -22,11 +22,11 @@
#SBATCH --job-name="job array; 2 jobs; same program; 1 task; 16 physical cores; 4 GB"
#SBATCH --array=0-1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=16
#SBATCH --cpus-per-task=32
#SBATCH --mem=4G
#SBATCH --partition=skylake_0096
#SBATCH --qos=skylake_0096
../util/print_job_info.sh
srun --cpus-per-task=16 ../util/print_task_info.sh
srun --cpus-per-task=32 ../util/print_task_info.sh
......@@ -22,10 +22,10 @@
#SBATCH --array=0-1
#SBATCH --nodes=1
#SBATCH --ntasks=4
#SBATCH --cpus-per-task=12
#SBATCH --cpus-per-task=24
#SBATCH --partition=skylake_0096
#SBATCH --qos=skylake_0096
../util/print_job_info.sh
srun --cpus-per-task=12 ../util/print_task_info.sh
srun --cpus-per-task=24 ../util/print_task_info.sh
......@@ -42,8 +42,8 @@ echo "nodes_num: $nodes_num"
tasks_per_node=$SLURM_NTASKS_PER_NODE
echo "tasks_per_node: $tasks_per_node"
# (physical) cpus per node (slurm cpus on node gives us logical cores)
cpus_per_node=$(( SLURM_CPUS_ON_NODE / 2 ))
# logical cpus per node (slurm cpus on node gives us logical cores)
cpus_per_node=$(( SLURM_CPUS_ON_NODE ))
echo "cpus_per_node: $cpus_per_node"
# cpus per task: ray itself should do the work scheduling and hardware management
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment