Skip to content

Commit

Permalink
change path to the dataset
Browse files Browse the repository at this point in the history
  • Loading branch information
sab148 committed Dec 6, 2023
1 parent 5e084ef commit af48c41
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion code/parallelize/ddp_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
nnodes = os.getenv("SLURM_NNODES")

# 1. Organize the data
datamodule = ImageNetDataModule("/p/scratch/training2324/data/", 128, \
datamodule = ImageNetDataModule("/p/scratch/training2338/data/", 128, \
int(os.getenv('SLURM_CPUS_PER_TASK')), transform)

# 2. Build the model using desired Task
Expand Down
2 changes: 1 addition & 1 deletion code/parallelize/gpu_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
])

# 1. Organize the data
datamodule = ImageNetDataModule("/p/scratch/training2324/data/", 256, \
datamodule = ImageNetDataModule("/p/scratch/training2338/data/", 256, \
int(os.getenv('SLURM_CPUS_PER_TASK')), transform)

# 2. Build the model using desired Task
Expand Down
4 changes: 2 additions & 2 deletions code/parallelize/run_ddp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
#SBATCH --cpus-per-task=24 # Divide the number of cpus (96) by the number of GPUs (4)
#SBATCH --time=00:30:00
#SBATCH --partition=booster
#SBATCH --account=training2324
#SBATCH --account=training2338
#SBATCH --output=%j.out
#SBATCH --error=%j.err
#SBATCH --reservation=ai_on_sc_day2
#SBATCH --reservation=training2338-day2

export CUDA_VISIBLE_DEVICES=0,1,2,3 # Very important to make the GPUs visible
export SRUN_CPUS_PER_TASK="$SLURM_CPUS_PER_TASK"
Expand Down
4 changes: 2 additions & 2 deletions code/parallelize/run_multi_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
#SBATCH --cpus-per-task=24 # Divide the number of cpus (96) by the number of GPUs (4)
#SBATCH --time=02:00:00
#SBATCH --partition=booster
#SBATCH --account=training2324
#SBATCH --account=training2338
#SBATCH --output=%j.out
#SBATCH --error=%j.err
#SBATCH --reservation=ai_on_sc_day2
#SBATCH --reservation=training2338-day2

export CUDA_VISIBLE_DEVICES=0,1,2,3 # Very important to make the GPUs visible
export SRUN_CPUS_PER_TASK="$SLURM_CPUS_PER_TASK"
Expand Down
4 changes: 2 additions & 2 deletions code/parallelize/run_one_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
#SBATCH --cpus-per-task=96
#SBATCH --time=06:00:00
#SBATCH --partition=booster
#SBATCH --account=training2324
#SBATCH --account=training2338
#SBATCH --output=%j.out
#SBATCH --error=%j.err

#SBATCH --reservation=ai_on_sc_day2
#SBATCH --reservation=training2338-day2

# To get number of cpu per task
export SRUN_CPUS_PER_TASK="$SLURM_CPUS_PER_TASK"
Expand Down

0 comments on commit af48c41

Please sign in to comment.