Skip to content

Commit c8842a0

Browse files
author
pytorchbot
committed
2023-12-15 nightly release (c35d385)
1 parent 58e6546 commit c8842a0

File tree

18 files changed

+29
-26
lines changed

18 files changed

+29
-26
lines changed

references/classification/train.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,8 @@ def load_data(traindir, valdir, args):
127127
if args.cache_dataset and os.path.exists(cache_path):
128128
# Attention, as the transforms are also cached!
129129
print(f"Loading dataset_train from {cache_path}")
130-
dataset, _ = torch.load(cache_path)
130+
# TODO: this could probably be weights_only=True
131+
dataset, _ = torch.load(cache_path, weights_only=False)
131132
else:
132133
# We need a default value for the variables below because args may come
133134
# from train_quantization.py which doesn't define them.
@@ -159,7 +160,8 @@ def load_data(traindir, valdir, args):
159160
if args.cache_dataset and os.path.exists(cache_path):
160161
# Attention, as the transforms are also cached!
161162
print(f"Loading dataset_test from {cache_path}")
162-
dataset_test, _ = torch.load(cache_path)
163+
# TODO: this could probably be weights_only=True
164+
dataset_test, _ = torch.load(cache_path, weights_only=False)
163165
else:
164166
if args.weights and args.test_only:
165167
weights = torchvision.models.get_weight(args.weights)
@@ -337,7 +339,7 @@ def collate_fn(batch):
337339
model_ema = utils.ExponentialMovingAverage(model_without_ddp, device=device, decay=1.0 - alpha)
338340

339341
if args.resume:
340-
checkpoint = torch.load(args.resume, map_location="cpu")
342+
checkpoint = torch.load(args.resume, map_location="cpu", weights_only=True)
341343
model_without_ddp.load_state_dict(checkpoint["model"])
342344
if not args.test_only:
343345
optimizer.load_state_dict(checkpoint["optimizer"])

references/classification/train_quantization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def main(args):
7474
model_without_ddp = model.module
7575

7676
if args.resume:
77-
checkpoint = torch.load(args.resume, map_location="cpu")
77+
checkpoint = torch.load(args.resume, map_location="cpu", weights_only=True)
7878
model_without_ddp.load_state_dict(checkpoint["model"])
7979
optimizer.load_state_dict(checkpoint["optimizer"])
8080
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])

references/classification/utils.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -287,8 +287,7 @@ def average_checkpoints(inputs):
287287
for fpath in inputs:
288288
with open(fpath, "rb") as f:
289289
state = torch.load(
290-
f,
291-
map_location=(lambda s, _: torch.serialization.default_restore_location(s, "cpu")),
290+
f, map_location=(lambda s, _: torch.serialization.default_restore_location(s, "cpu")), weights_only=True
292291
)
293292
# Copies over the settings from the first checkpoint
294293
if new_state is None:
@@ -367,7 +366,7 @@ def store_model_weights(model, checkpoint_path, checkpoint_key="model", strict=T
367366

368367
# Deep copy to avoid side effects on the model object.
369368
model = copy.deepcopy(model)
370-
checkpoint = torch.load(checkpoint_path, map_location="cpu")
369+
checkpoint = torch.load(checkpoint_path, map_location="cpu", weights_only=True)
371370

372371
# Load the weights to the model to validate that everything works
373372
# and remove unnecessary weights (such as auxiliaries, etc.)

references/depth/stereo/cascade_evaluation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ def load_checkpoint(args):
262262
utils.setup_ddp(args)
263263

264264
if not args.weights:
265-
checkpoint = torch.load(args.checkpoint, map_location=torch.device("cpu"))
265+
checkpoint = torch.load(args.checkpoint, map_location=torch.device("cpu"), weights_only=True)
266266
if "model" in checkpoint:
267267
experiment_args = checkpoint["args"]
268268
model = torchvision.prototype.models.depth.stereo.__dict__[experiment_args.model](weights=None)

references/depth/stereo/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,7 @@ def main(args):
498498
# load them from checkpoint if needed
499499
args.start_step = 0
500500
if args.resume_path is not None:
501-
checkpoint = torch.load(args.resume_path, map_location="cpu")
501+
checkpoint = torch.load(args.resume_path, map_location="cpu", weights_only=True)
502502
if "model" in checkpoint:
503503
# this means the user requested to resume from a training checkpoint
504504
model_without_ddp.load_state_dict(checkpoint["model"])

references/detection/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ def main(args):
288288
)
289289

290290
if args.resume:
291-
checkpoint = torch.load(args.resume, map_location="cpu")
291+
checkpoint = torch.load(args.resume, map_location="cpu", weights_only=True)
292292
model_without_ddp.load_state_dict(checkpoint["model"])
293293
optimizer.load_state_dict(checkpoint["optimizer"])
294294
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])

references/optical_flow/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ def main(args):
226226
model_without_ddp = model
227227

228228
if args.resume is not None:
229-
checkpoint = torch.load(args.resume, map_location="cpu")
229+
checkpoint = torch.load(args.resume, map_location="cpu", weights_only=True)
230230
model_without_ddp.load_state_dict(checkpoint["model"])
231231

232232
if args.test_only:

references/segmentation/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ def main(args):
223223
lr_scheduler = main_lr_scheduler
224224

225225
if args.resume:
226-
checkpoint = torch.load(args.resume, map_location="cpu")
226+
checkpoint = torch.load(args.resume, map_location="cpu", weights_only=True)
227227
model_without_ddp.load_state_dict(checkpoint["model"], strict=not args.test_only)
228228
if not args.test_only:
229229
optimizer.load_state_dict(checkpoint["optimizer"])

references/similarity/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def main(args):
101101

102102
model = EmbeddingNet()
103103
if args.resume:
104-
model.load_state_dict(torch.load(args.resume))
104+
model.load_state_dict(torch.load(args.resume, weights_only=True))
105105

106106
model.to(device)
107107

references/video_classification/train.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def main(args):
164164

165165
if args.cache_dataset and os.path.exists(cache_path):
166166
print(f"Loading dataset_train from {cache_path}")
167-
dataset, _ = torch.load(cache_path)
167+
dataset, _ = torch.load(cache_path, weights_only=True)
168168
dataset.transform = transform_train
169169
else:
170170
if args.distributed:
@@ -201,7 +201,7 @@ def main(args):
201201

202202
if args.cache_dataset and os.path.exists(cache_path):
203203
print(f"Loading dataset_test from {cache_path}")
204-
dataset_test, _ = torch.load(cache_path)
204+
dataset_test, _ = torch.load(cache_path, weights_only=True)
205205
dataset_test.transform = transform_test
206206
else:
207207
if args.distributed:
@@ -295,7 +295,7 @@ def main(args):
295295
model_without_ddp = model.module
296296

297297
if args.resume:
298-
checkpoint = torch.load(args.resume, map_location="cpu")
298+
checkpoint = torch.load(args.resume, map_location="cpu", weights_only=True)
299299
model_without_ddp.load_state_dict(checkpoint["model"])
300300
optimizer.load_state_dict(checkpoint["optimizer"])
301301
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])

0 commit comments

Comments
 (0)