diff --git a/02-skylake_0096-single-node/01-single-node-single-prog.sh b/02-skylake_0096-single-node/01-single-node-single-prog.sh
index f3fca7f10596d0c6e9996d7dfc42f6cbfce4f001..def37d63e97ff9be43ac1be309fc6540eced1f9b 100644
--- a/02-skylake_0096-single-node/01-single-node-single-prog.sh
+++ b/02-skylake_0096-single-node/01-single-node-single-prog.sh
@@ -24,6 +24,7 @@
 #SBATCH --nodes=1
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/02-skylake_0096-single-node/02-single-node-multi-prog.sh b/02-skylake_0096-single-node/02-single-node-multi-prog.sh
index 3a76a908565e23b55ade2465ec0bc117ed9f08a1..8f39850eac650374b5e17637539d24b02be15d73 100644
--- a/02-skylake_0096-single-node/02-single-node-multi-prog.sh
+++ b/02-skylake_0096-single-node/02-single-node-multi-prog.sh
@@ -25,6 +25,7 @@
 #SBATCH --nodes=1
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/02-skylake_0096-single-node/03-single-node-multiple-tasks.sh b/02-skylake_0096-single-node/03-single-node-multiple-tasks.sh
index df20d95dd69cd77e1205f9a9e58969aee1fdb980..ba666a1951c24c6b5186271e2dd7fc1ebb3cbc5b 100644
--- a/02-skylake_0096-single-node/03-single-node-multiple-tasks.sh
+++ b/02-skylake_0096-single-node/03-single-node-multiple-tasks.sh
@@ -25,6 +25,7 @@
 #SBATCH --cpus-per-task=24
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/03-skylake_0096-partial-node/01a-core-based-single-prog.sh b/03-skylake_0096-partial-node/01a-core-based-single-prog.sh
index 0cf35cbd6da6c180fae3b30470389ef6bf07f828..48d9d787f04f50f4bce7096732a5ebebb0200a59 100644
--- a/03-skylake_0096-partial-node/01a-core-based-single-prog.sh
+++ b/03-skylake_0096-partial-node/01a-core-based-single-prog.sh
@@ -25,5 +25,6 @@
 #SBATCH --mem=8G
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
diff --git a/03-skylake_0096-partial-node/01b-core-based-multi-prog.sh b/03-skylake_0096-partial-node/01b-core-based-multi-prog.sh
index 2a549902dabc0d763bab6da00670074d830a73d7..ad0667a5e8092635e817bc3b1b5155c41cfc7ad2 100644
--- a/03-skylake_0096-partial-node/01b-core-based-multi-prog.sh
+++ b/03-skylake_0096-partial-node/01b-core-based-multi-prog.sh
@@ -26,6 +26,7 @@
 #SBATCH --mem=8G
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/03-skylake_0096-partial-node/02-task-based-default.sh b/03-skylake_0096-partial-node/02-task-based-default.sh
index 33d329d488e3c60bc0107fc8963dc9cc8f546704..99d52be242bd113c959007fd4c2864c2db8648cf 100644
--- a/03-skylake_0096-partial-node/02-task-based-default.sh
+++ b/03-skylake_0096-partial-node/02-task-based-default.sh
@@ -25,6 +25,7 @@
 #SBATCH --mem=4G
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/03-skylake_0096-partial-node/03-task-based-multiple-cpus.sh b/03-skylake_0096-partial-node/03-task-based-multiple-cpus.sh
index e3efa3f292cb0980936551166b200e79a16e2416..5f516ed274a2d5f38ecd46688999fef71ffcf1ad 100644
--- a/03-skylake_0096-partial-node/03-task-based-multiple-cpus.sh
+++ b/03-skylake_0096-partial-node/03-task-based-multiple-cpus.sh
@@ -28,6 +28,7 @@
 #SBATCH --mem=8G
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/03-skylake_0096-partial-node/04-task-based-multiple-programs.sh b/03-skylake_0096-partial-node/04-task-based-multiple-programs.sh
index 331ee8e8b0f154ec8e806c27c7c8a7e5385f0dc4..fbb08020868236b48aef6c61ff8bebc361d38b1d 100644
--- a/03-skylake_0096-partial-node/04-task-based-multiple-programs.sh
+++ b/03-skylake_0096-partial-node/04-task-based-multiple-programs.sh
@@ -28,6 +28,7 @@
 #SBATCH --mem=8G
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/03-skylake_0096-partial-node/05-task-based-hetjob.sh b/03-skylake_0096-partial-node/05-task-based-hetjob.sh
index 0d030863f612bcec2ab7f51a099f229ac48eb6ff..8d1bf6f6feff3352d1b6f7fa9cb8b5b42411da92 100644
--- a/03-skylake_0096-partial-node/05-task-based-hetjob.sh
+++ b/03-skylake_0096-partial-node/05-task-based-hetjob.sh
@@ -35,6 +35,7 @@
 #SBATCH --mem=8G
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/04-skylake_0096-multi-node/01-multi-node-full-node.sh b/04-skylake_0096-multi-node/01-multi-node-full-node.sh
index 34cbb1d6804401637bcc62ff58124feabbfbc43a..8fceefa71c7adb5fec346fbb7894cf24872dcbd6 100644
--- a/04-skylake_0096-multi-node/01-multi-node-full-node.sh
+++ b/04-skylake_0096-multi-node/01-multi-node-full-node.sh
@@ -26,6 +26,7 @@
 #SBATCH --cpus-per-task=48
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/05-skylake_0096-job-array/01-core-based-job-array.sh b/05-skylake_0096-job-array/01-core-based-job-array.sh
index b54a93ebb4b12f470b16d5ca5cb6bfbc05522dd4..2e33fff61ee2f4cc57cdd440f674de656bd52bc8 100644
--- a/05-skylake_0096-job-array/01-core-based-job-array.sh
+++ b/05-skylake_0096-job-array/01-core-based-job-array.sh
@@ -26,6 +26,7 @@
 #SBATCH --mem=4G
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
  
diff --git a/05-skylake_0096-job-array/02-task-based-job-array.sh b/05-skylake_0096-job-array/02-task-based-job-array.sh
index db43815fd8ce67fe99c8bb1945365970561fe1c3..949c16522cf5fd866dee2c0437d1bd4ef223a8de 100644
--- a/05-skylake_0096-job-array/02-task-based-job-array.sh
+++ b/05-skylake_0096-job-array/02-task-based-job-array.sh
@@ -26,6 +26,7 @@
 #SBATCH --mem=4G
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/05-skylake_0096-job-array/03-node-based-job-array.sh b/05-skylake_0096-job-array/03-node-based-job-array.sh
index 87d5cb6920b7530d61ac8ec604cac3d8c3eb8ecb..5aef036107a8e7cdd32271bf429b1aa9c0a8a228 100644
--- a/05-skylake_0096-job-array/03-node-based-job-array.sh
+++ b/05-skylake_0096-job-array/03-node-based-job-array.sh
@@ -20,6 +20,7 @@
 #SBATCH --nodes=1
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/05-skylake_0096-job-array/04-node-based-job-array-throttled.sh b/05-skylake_0096-job-array/04-node-based-job-array-throttled.sh
index 2d1d1e6754b73ab018ea1c741acdf490ca990faa..96a364761b4273b2250f19ebaef162abdd1f8237 100644
--- a/05-skylake_0096-job-array/04-node-based-job-array-throttled.sh
+++ b/05-skylake_0096-job-array/04-node-based-job-array-throttled.sh
@@ -21,6 +21,7 @@
 #SBATCH --nodes=1
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/05-skylake_0096-job-array/05-node-based-multiple-tasks-job-array.sh b/05-skylake_0096-job-array/05-node-based-multiple-tasks-job-array.sh
index 38bc1800b5ab233faf8c567c39665b9917fe9463..caba7b442a9d375bbbebb1bec9ad0fc4402085d8 100644
--- a/05-skylake_0096-job-array/05-node-based-multiple-tasks-job-array.sh
+++ b/05-skylake_0096-job-array/05-node-based-multiple-tasks-job-array.sh
@@ -25,6 +25,7 @@
 #SBATCH --cpus-per-task=24
 #SBATCH --partition=skylake_0096
 #SBATCH --qos=skylake_0096
+#SBATCH --time=00:05:00                 # set low time limit for testing
 
 ../util/print_job_info.sh
 
diff --git a/06-zen3_0512_a100x2-gpu-based/01-half-node.sh b/06-zen3_0512_a100x2-gpu-based/01-half-node.sh
index e67ee30cadac06a6ce3985d1b13d47b71e05949a..b046670b3e5b3096d4cdbdbd09aff5067231ba57 100644
--- a/06-zen3_0512_a100x2-gpu-based/01-half-node.sh
+++ b/06-zen3_0512_a100x2-gpu-based/01-half-node.sh
@@ -24,5 +24,11 @@
 #SBATCH --partition=zen3_0512_a100x2
 #SBATCH --qos=zen3_0512_a100x2
 #SBATCH --gres=gpu:1
+#SBATCH --time=00:05:00                 # set low time limit for testing
+
+# purge all previously loaded modules
+module purge
+# enable cuda-zen tree to have the gpu software packages available
+spackup cuda-zen
 
 ../util/print_job_info.sh
diff --git a/06-zen3_0512_a100x2-gpu-based/02-full-node.sh b/06-zen3_0512_a100x2-gpu-based/02-full-node.sh
index 49a66bef2a1256d6cfe64b7e08f5ca9f8bcc7e61..c721018f5633f6eaf1b5da7c4b65acc00a65c0c4 100644
--- a/06-zen3_0512_a100x2-gpu-based/02-full-node.sh
+++ b/06-zen3_0512_a100x2-gpu-based/02-full-node.sh
@@ -23,5 +23,11 @@
 #SBATCH --partition=zen3_0512_a100x2
 #SBATCH --qos=zen3_0512_a100x2
 #SBATCH --gres=gpu:2
+#SBATCH --time=00:05:00                 # set low time limit for testing
+
+# purge all previously loaded modules
+module purge
+# enable cuda-zen tree to have the gpu software packages available
+spackup cuda-zen
 
 ../util/print_job_info.sh
diff --git a/07-zen2_0256_a40x2-gpu-based/01-half-node.sh b/07-zen2_0256_a40x2-gpu-based/01-half-node.sh
new file mode 100644
index 0000000000000000000000000000000000000000..88c9d901dbe8ba8d8e772f2c833f6d9e3205dbe0
--- /dev/null
+++ b/07-zen2_0256_a40x2-gpu-based/01-half-node.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+##############################################################################
+# User Request:
+#   - run in qos 'zen2_0256_a40x2'
+#   - get access to 1 gpu
+#
+# Provided Allocation:
+#   - non-exclusive (shared not set)
+#   - 8 physical cores / 16 logical cores
+#   - 128 GB memory
+#   - (implicitly): 8 tasks on 1 node
+#   - (implicitly): 1 physical core bound to each task
+#
+# VSC policy:
+#   - 'SingleCore' feature set -> only gets scheduled on `SingleCore` nodes
+#   - '--ntasks-per-node' & '--ntasks' implicitly set to 8
+#   - '--mem' (per node) implicitly set to 128 GB
+#   
+# Accounting:
+#   - 8 core hours / hour
+##############################################################################
+
+#SBATCH --partition=zen2_0256_a40x2
+#SBATCH --qos=zen2_0256_a40x2
+#SBATCH --gres=gpu:1
+#SBATCH --time=00:05:00                 # set low time limit for testing
+
+# purge all previously loaded modules
+module purge
+# enable cuda-zen tree to have the gpu software packages available
+spackup cuda-zen
+
+../util/print_job_info.sh
diff --git a/07-zen2_0256_a40x2-gpu-based/02-full-node.sh b/07-zen2_0256_a40x2-gpu-based/02-full-node.sh
new file mode 100644
index 0000000000000000000000000000000000000000..991fef75e7e72c9804730b43ab6efe2b51d4060b
--- /dev/null
+++ b/07-zen2_0256_a40x2-gpu-based/02-full-node.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+##############################################################################
+# User Request:
+#   - run in qos 'zen2_0256_a40x2'
+#   - get access to 2 gpus
+#
+# Provided Allocation:
+#   - exclusive access
+#   - 16 physical cores / 32 logical cores
+#   - 256 GB memory
+#   - (implicitly): 16 tasks on 1 node
+#   - (implicitly): 1 physical core bound to each task
+#
+# VSC policy:
+#   - '--ntasks-per-node' & '--ntasks' implicitly set to 16
+#   - '--mem' (per node) implicitly set to 256 GB
+#   
+# Accounting:
+#   - 16 core hours / hour
+##############################################################################
+
+#SBATCH --partition=zen2_0256_a40x2
+#SBATCH --qos=zen2_0256_a40x2
+#SBATCH --gres=gpu:2
+#SBATCH --time=00:05:00                 # set low time limit for testing
+
+# purge all previously loaded modules
+module purge
+# enable cuda-zen tree to have the gpu software packages available
+spackup cuda-zen
+
+../util/print_job_info.sh
diff --git a/96-frameworks-pytorch-cuda/01-pytorch-python-single-gpu.sh b/80-frameworks-pytorch-cuda/01-pytorch-python-single-gpu.sh
similarity index 82%
rename from 96-frameworks-pytorch-cuda/01-pytorch-python-single-gpu.sh
rename to 80-frameworks-pytorch-cuda/01-pytorch-python-single-gpu.sh
index b26f823f62988162c4e1943328a3d6656012d574..ca6f7dd65621a97a8c6b7ac85060a2c57c3d7b4f 100644
--- a/96-frameworks-pytorch-cuda/01-pytorch-python-single-gpu.sh
+++ b/80-frameworks-pytorch-cuda/01-pytorch-python-single-gpu.sh
@@ -21,9 +21,13 @@
 #SBATCH --partition=zen2_0256_a40x2
 #SBATCH --qos=zen2_0256_a40x2
 #SBATCH --gres=gpu:1
+#SBATCH --time=00:10:00                 # set low time limit for testing
+
+# purge all previously loaded modules
+module purge
 
 # optionally activate a conda or python environment
-module load miniconda3
+module load miniconda3/latest
 eval "$(conda shell.bash hook)"
 conda activate pytorch-cuda
 
diff --git a/96-frameworks-pytorch-cuda/README.md b/80-frameworks-pytorch-cuda/README.md
similarity index 96%
rename from 96-frameworks-pytorch-cuda/README.md
rename to 80-frameworks-pytorch-cuda/README.md
index 1438a340dba29664fdc2e535cc41cd8dfc3e690b..1385f81ecae683a6a81e9ef3d7a47a8db1c03d95 100644
--- a/96-frameworks-pytorch-cuda/README.md
+++ b/80-frameworks-pytorch-cuda/README.md
@@ -3,7 +3,7 @@
 To install the environment simply use conda
 
 ```bash
-module load miniconda3
+module load miniconda3/latest
 eval "$(conda shell.bash hook)"
 conda env create -f environment.yaml
 ```
diff --git a/96-frameworks-pytorch-cuda/environment.yaml b/80-frameworks-pytorch-cuda/environment.yaml
similarity index 100%
rename from 96-frameworks-pytorch-cuda/environment.yaml
rename to 80-frameworks-pytorch-cuda/environment.yaml
diff --git a/96-frameworks-pytorch-cuda/pytorch-test.py b/80-frameworks-pytorch-cuda/pytorch-test.py
similarity index 100%
rename from 96-frameworks-pytorch-cuda/pytorch-test.py
rename to 80-frameworks-pytorch-cuda/pytorch-test.py
diff --git a/97-frameworks-tensorflow-cuda/01-tensorflow-python-single-node.sh b/81-frameworks-tensorflow-cuda/01-tensorflow-python-single-node.sh
similarity index 84%
rename from 97-frameworks-tensorflow-cuda/01-tensorflow-python-single-node.sh
rename to 81-frameworks-tensorflow-cuda/01-tensorflow-python-single-node.sh
index 1ae0624c23efae49d4b0e0712c82bceeff5272ce..7e6b48c211ac34ec1ef7777089e8fb5b72789a72 100644
--- a/97-frameworks-tensorflow-cuda/01-tensorflow-python-single-node.sh
+++ b/81-frameworks-tensorflow-cuda/01-tensorflow-python-single-node.sh
@@ -21,9 +21,13 @@
 #SBATCH --partition=zen2_0256_a40x2
 #SBATCH --qos=zen2_0256_a40x2
 #SBATCH --gres=gpu:2
+#SBATCH --time=00:10:00                 # set low time limit for testing
+
+# purge all previously loaded modules
+module purge
 
 # optionally activate a conda or python environment
-module load miniconda3
+module load miniconda3/latest
 eval "$(conda shell.bash hook)"
 conda activate tensorflow-cuda
 
diff --git a/97-frameworks-tensorflow-cuda/environment.yaml b/81-frameworks-tensorflow-cuda/environment.yaml
similarity index 100%
rename from 97-frameworks-tensorflow-cuda/environment.yaml
rename to 81-frameworks-tensorflow-cuda/environment.yaml
diff --git a/97-frameworks-tensorflow-cuda/tensorflow-test.py b/81-frameworks-tensorflow-cuda/tensorflow-test.py
similarity index 100%
rename from 97-frameworks-tensorflow-cuda/tensorflow-test.py
rename to 81-frameworks-tensorflow-cuda/tensorflow-test.py
diff --git a/98-frameworks-ray/01-ray-python-multi-node.sh b/82-frameworks-ray/01-ray-python-multi-node.sh
similarity index 90%
rename from 98-frameworks-ray/01-ray-python-multi-node.sh
rename to 82-frameworks-ray/01-ray-python-multi-node.sh
index bee3518a1e1e6935865691d92f3f56b711ede1f1..6d07b83e0d32f26f6695132ba38727440b7062fc 100755
--- a/98-frameworks-ray/01-ray-python-multi-node.sh
+++ b/82-frameworks-ray/01-ray-python-multi-node.sh
@@ -22,15 +22,18 @@
 ##############################################################################
 
 #SBATCH --job-name=ray-test
-#SBATCH --qos=zen3_0512				# select zen3_0512 default qos
-#SBATCH --partition=zen3_0512		# select zen3_0512 hardware
-#SBATCH --nodes=3 					# tell VSC slurm to allocate 3 exclusive nodes
-#SBATCH --time=00:10:00      		# set time limit of 5 min for testing
+#SBATCH --qos=zen3_0512             # select zen3_0512 default qos
+#SBATCH --partition=zen3_0512       # select zen3_0512 hardware
+#SBATCH --nodes=3                   # tell VSC slurm to allocate 3 exclusive nodes
+#SBATCH --time=00:10:00             # set low time limit for testing
 #SBATCH --tasks-per-node=1          # 1 task per node (1 head + 2 workers)
 
+# purge all previously loaded modules
+module purge
+
 # optionally activate a conda or python environment
 module load openmpi/4.1.6-gcc-12.2.0-exh7lqk
-module load miniconda3
+module load miniconda3/latest
 eval "$(conda shell.bash hook)"
 conda activate ray
 
diff --git a/98-frameworks-ray/02-ray-python-multi-node-gpu.sh b/82-frameworks-ray/02-ray-python-multi-node-gpu.sh
similarity index 93%
rename from 98-frameworks-ray/02-ray-python-multi-node-gpu.sh
rename to 82-frameworks-ray/02-ray-python-multi-node-gpu.sh
index d37a4731d4c4a936ca7c59f356a8b9150f1a8138..afdabb8b566157faf2c1137be1e705eea9c954d0 100755
--- a/98-frameworks-ray/02-ray-python-multi-node-gpu.sh
+++ b/82-frameworks-ray/02-ray-python-multi-node-gpu.sh
@@ -24,17 +24,20 @@
 
 #SBATCH --job-name=ray-test
 #SBATCH --qos=zen3_0512_a100x2_devel    # select zen3_0512_a100x2_devel devel qos for testing
-#SBATCH --partition=zen3_0512_a100x2	# select zen3_0512 hardware
-#SBATCH --time=00:10:00      		    # set time limit of 10 min for testing
-#SBATCH --nodes=2 					    # tell VSC slurm to allocate 2 exclusive nodes
+#SBATCH --partition=zen3_0512_a100x2    # select zen3_0512 hardware
+#SBATCH --time=00:10:00                 # set low time limit for testing
+#SBATCH --nodes=2                       # tell VSC slurm to allocate 2 exclusive nodes
 #SBATCH --gres=gpu:2                    # furthermore allocate 2 gpus per node (=full node)
 #SBATCH --tasks-per-node=1              # 1 task per node (1 head + 1 worker)
 #SBATCH --cpus-per-task=128             # 128 (logical) cpus per task
 #SBATCH --gpus-per-task=2               # 2 gpus per task
 #SBATCH --hint=nomultithread            # specify this to get 1 thread per physical core
 
+# purge all previously loaded modules
+module purge
+
 # optionally load packages and/or activate a conda environment...
-module load miniconda3
+module load miniconda3/latest
 eval "$(conda shell.bash hook)"
 conda activate ray
 
diff --git a/98-frameworks-ray/environment.yaml b/82-frameworks-ray/environment.yaml
similarity index 100%
rename from 98-frameworks-ray/environment.yaml
rename to 82-frameworks-ray/environment.yaml
diff --git a/98-frameworks-ray/ray-test.py b/82-frameworks-ray/ray-test.py
similarity index 100%
rename from 98-frameworks-ray/ray-test.py
rename to 82-frameworks-ray/ray-test.py
diff --git a/README.md b/README.md
index d87115d259994425156fc6f6106c2bfa5edcf507..297cdf63f7500f595d0927ea929bde271fd8c915 100644
--- a/README.md
+++ b/README.md
@@ -2,10 +2,16 @@
 
 The purpose of this repository is to have a set of slurm job scripts with expected results.
 
-This way we have example we can give to users as a starting point as well as have something to test our lua implementation against.
+This way we have example we can give to users as a starting point as well as have something to test our slurm lua implementation against.
 
+## How to run examples
 
-# Explanations
+To run an example `cd` into the folder and use `sbatch` to schedule a slurm script.
+
+```
+cd 01-basics
+sbatch 01-node-simple.sh
+```
 
 ## Exclusive/Shared Nodes (OverSubscribe)
 
diff --git a/util/print_job_info.sh b/util/print_job_info.sh
index 00ccc3596b969077a097cde755814d75649ffb94..9f64862d2c527afc5cf8f1984d5d6c9138dcfd9e 100755
--- a/util/print_job_info.sh
+++ b/util/print_job_info.sh
@@ -21,3 +21,5 @@ $SCRIPT_DIR/computed_available_resources.sh
 $SCRIPT_DIR/cgroup_resources.sh "/slurm/uid_${SLURM_JOB_UID}/job_${SLURM_JOB_ID}"
 
 $SCRIPT_DIR/slurm_vars.sh
+
+$SCRIPT_DIR/spack_info.sh
\ No newline at end of file
diff --git a/util/spack_info.sh b/util/spack_info.sh
new file mode 100755
index 0000000000000000000000000000000000000000..03061b50a63cdd744589bdc198bc72e8db9fa5af
--- /dev/null
+++ b/util/spack_info.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+echo ""
+echo "# spack env"
+
+if [[ -z "$SPACK_TREE" ]]; then
+    echo " <no spack env info found or incomplete>"
+else
+    echo " - name: $SPACK_TREE"
+    echo " - root: $SPACK_ROOT"
+fi
diff --git a/util/unload_jupyter_env.sh b/util/unload_jupyter_env.sh
new file mode 100644
index 0000000000000000000000000000000000000000..595845804355016fd90e7d42ccff291046395895
--- /dev/null
+++ b/util/unload_jupyter_env.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+
+##
+# source this script from a jupyter terminal or notebook cell
+# to unset all jupyter related env variables and functions 
+#  
+# in terminal:
+#   $ source ./unload_jupyter_env.sh
+#   $ sbatch myjob.sh
+#
+# in jupyter notebook:
+#   ! source ./unload_jupyter_env.sh && sbatch myjob.sh
+##
+
+DEBUG="$1"
+
+function debug_print() {
+    if [ -z "$DEBUG" ]; then
+        return
+    fi
+    echo "$@"
+}
+
+
+if conda -V >/dev/null 2>&1; then
+    eval "$(conda shell.bash hook)"
+
+    for i in $(seq ${CONDA_SHLVL}); do
+        conda deactivate
+    done
+
+    debug_print "Deactivated all conda envs ..."
+else
+    debug_print "No conda found."
+fi
+
+PREVIOUS_IFS="$IFS"
+IFS=$'\n'
+SLURM_VARS=$( env | sort | grep -E "^SLURM_.*=" | sed "s/=.*//g" )
+for var in $SLURM_VARS; do
+    unset $var
+done
+debug_print "Unset all SLURM_* env variables ..."
+IFS="$PREVIOUS_IFS"
+
+spack unload
+debug_print "Unloaded all spack packages ..."
+
+module purge
+debug_print "Unloaded all modules ..."
+
+# sanitize LD_LIBRARY_PATH by removing all paths from spack base
+spack_base=$( readlink -f "$( dirname $( which spack ) )/../" )
+library_path=${LD_LIBRARY_PATH//:/ }
+new_library_path=
+for path in $library_path; do
+    if [[ $path =~ $spack_base ]]; then
+        continue
+    fi
+    if [[ $new_library_path =~ $path ]]; then
+        continue
+    fi
+    if [ -z "$new_library_path" ]; then
+        new_library_path="$path"
+    else
+        new_library_path="$new_library_path:$path"
+    fi
+done
+export LD_LIBRARY_PATH="$new_library_path"
+export LIBRARY_PATH=
+debug_print "Removed all spack library paths ..."
+
+echo "Jupyter env (conda, slurm & spack) unloaded."