*** Wartungsfenster jeden ersten Mittwoch vormittag im Monat ***

Skip to content
Snippets Groups Projects
Commit ce898a49 authored by Pfister, Martin's avatar Pfister, Martin
Browse files

Update llama3.1-70b run_leonardo.slurm (single GPU)

parent 717d0af1
Branches
No related tags found
No related merge requests found
#!/bin/bash #!/bin/bash
#SBATCH --partition=boost_usr_prod #SBATCH --partition=boost_usr_prod
# #SBATCH --qos=boost_qos_dbg #SBATCH --qos=boost_qos_dbg
## Specify resources: ## Specify resources:
## Leonardo Booster: 32 CPU cores and 4 GPUs per node => 8 cores per GPU ## Leonardo Booster: 32 CPU cores and 4 GPUs per node => 8 cores per GPU
#SBATCH --nodes=1 #SBATCH --nodes=1
#SBATCH --gpus-per-task=1 # up to 4 on Leonardo
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --gpus-per-task=1 #SBATCH --mem-per-gpu=120GB
#SBATCH --mem-per-gpu=25GB #SBATCH --cpus-per-task=8 # should be 8 * gpus-per-task on Leonardo
#SBATCH --cpus-per-task=8
#SBATCH --time=1:00:00 #SBATCH --time=0:30:00 # up to 0:30:00 for boost_qos_dbg
# Set conda environment: # Set conda environment:
CONDA_ENV=finetuning CONDA_ENV=finetuning
...@@ -34,5 +34,4 @@ nvidia-smi ...@@ -34,5 +34,4 @@ nvidia-smi
# Run AI scripts: # Run AI scripts:
time conda run -n $CONDA_ENV --no-capture-output python llama3.1-70b_train.py time conda run -n $CONDA_ENV --no-capture-output python llama3.1-70b_train.py
time conda run -n $CONDA_ENV --no-capture-output python llama3.1-70b_test.py # time conda run -n $CONDA_ENV --no-capture-output python llama3.1-70b_test.py
# time conda run -n $CONDA_ENV --no-capture-output torchrun --nproc_per_node 4 llama3.1-70b_train.py
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment