Skip to content

Commit fc26020

Browse files
authored
auto fixes from pre-commit.com hooks (Project-MONAI#1198)
### Checks <!--- Put an `x` in all the boxes that apply, and remove the not applicable items --> - [x] Avoid including large-size files in the PR. - [x] Clean up long text outputs from code cells in the notebook. - [x] For security purposes, please check the contents and remove any sensitive info such as user names and private key. - [x] Ensure (1) hyperlinks and markdown anchors are working (2) use relative paths for tutorial repo files (3) put figure and graphs in the `./figure` folder - [ ] Notebook runs automatically `./runner.sh -t <path to .ipynb file>` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 9e4ac86 commit fc26020

File tree

171 files changed

+3512
-4299
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

171 files changed

+3512
-4299
lines changed

.github/contributing_templates/notebook/example_class.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,15 @@
1717
from PIL import Image
1818

1919

20-
class ExampleImageGenerator():
20+
class ExampleImageGenerator:
2121
def __init__(self, num_image=40, image_size=(128, 128)):
2222
self.num_image = num_image
2323
self.image_size = image_size
2424

2525
def generate(self, tempdir):
2626
for i in range(self.num_image):
2727
im, seg = create_test_image_2d(
28-
self.image_size[0], self.image_size[1], num_seg_classes=1,random_state=np.random.RandomState(42)
28+
self.image_size[0], self.image_size[1], num_seg_classes=1, random_state=np.random.RandomState(42)
2929
)
3030
Image.fromarray((im * 255).astype("uint8")).save(os.path.join(tempdir, f"img{i:d}.png"))
3131
Image.fromarray((seg * 255).astype("uint8")).save(os.path.join(tempdir, f"seg{i:d}.png"))

.pre-commit-config.yaml

+5-24
Original file line numberDiff line numberDiff line change
@@ -22,30 +22,11 @@ repos:
2222
args: ['--maxkb=1024']
2323
- id: detect-private-key
2424

25-
#- repo: https://github.com/asottile/pyupgrade
26-
# rev: v2.23.2
27-
# hooks:
28-
# - id: pyupgrade
29-
# args: [--py36-plus]
30-
# name: Upgrade code
31-
32-
#- repo: https://github.com/asottile/yesqa
33-
# rev: v1.2.3
34-
# hooks:
35-
# - id: yesqa
36-
# name: Unused noqa
37-
38-
#- repo: https://github.com/PyCQA/isort
39-
# rev: 5.9.3
40-
# hooks:
41-
# - id: isort
42-
# name: Format imports
43-
44-
# - repo: https://github.com/psf/black
45-
# rev: 21.7b0
46-
# hooks:
47-
# - id: black
48-
# name: Format code
25+
- repo: https://github.com/psf/black
26+
rev: "22.12.0"
27+
hooks:
28+
- id: black
29+
- id: black-jupyter
4930

5031
#- repo: https://github.com/executablebooks/mdformat
5132
# rev: 0.7.8

2d_classification/mednist_tutorial.ipynb

+13-31
Original file line numberDiff line numberDiff line change
@@ -237,14 +237,10 @@
237237
}
238238
],
239239
"source": [
240-
"class_names = sorted(x for x in os.listdir(data_dir)\n",
241-
" if os.path.isdir(os.path.join(data_dir, x)))\n",
240+
"class_names = sorted(x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x)))\n",
242241
"num_class = len(class_names)\n",
243242
"image_files = [\n",
244-
" [\n",
245-
" os.path.join(data_dir, class_names[i], x)\n",
246-
" for x in os.listdir(os.path.join(data_dir, class_names[i]))\n",
247-
" ]\n",
243+
" [os.path.join(data_dir, class_names[i], x) for x in os.listdir(os.path.join(data_dir, class_names[i]))]\n",
248244
" for i in range(num_class)\n",
249245
"]\n",
250246
"num_each = [len(image_files[i]) for i in range(num_class)]\n",
@@ -341,9 +337,7 @@
341337
"test_x = [image_files_list[i] for i in test_indices]\n",
342338
"test_y = [image_class[i] for i in test_indices]\n",
343339
"\n",
344-
"print(\n",
345-
" f\"Training count: {len(train_x)}, Validation count: \"\n",
346-
" f\"{len(val_x)}, Test count: {len(test_x)}\")"
340+
"print(f\"Training count: {len(train_x)}, Validation count: \" f\"{len(val_x)}, Test count: {len(test_x)}\")"
347341
]
348342
},
349343
{
@@ -370,8 +364,7 @@
370364
" ]\n",
371365
")\n",
372366
"\n",
373-
"val_transforms = Compose(\n",
374-
" [LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity()])\n",
367+
"val_transforms = Compose([LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity()])\n",
375368
"\n",
376369
"y_pred_trans = Compose([Activations(softmax=True)])\n",
377370
"y_trans = Compose([AsDiscrete(to_onehot=num_class)])"
@@ -397,16 +390,13 @@
397390
"\n",
398391
"\n",
399392
"train_ds = MedNISTDataset(train_x, train_y, train_transforms)\n",
400-
"train_loader = DataLoader(\n",
401-
" train_ds, batch_size=300, shuffle=True, num_workers=10)\n",
393+
"train_loader = DataLoader(train_ds, batch_size=300, shuffle=True, num_workers=10)\n",
402394
"\n",
403395
"val_ds = MedNISTDataset(val_x, val_y, val_transforms)\n",
404-
"val_loader = DataLoader(\n",
405-
" val_ds, batch_size=300, num_workers=10)\n",
396+
"val_loader = DataLoader(val_ds, batch_size=300, num_workers=10)\n",
406397
"\n",
407398
"test_ds = MedNISTDataset(test_x, test_y, val_transforms)\n",
408-
"test_loader = DataLoader(\n",
409-
" test_ds, batch_size=300, num_workers=10)"
399+
"test_loader = DataLoader(test_ds, batch_size=300, num_workers=10)"
410400
]
411401
},
412402
{
@@ -430,8 +420,7 @@
430420
"outputs": [],
431421
"source": [
432422
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
433-
"model = DenseNet121(spatial_dims=2, in_channels=1,\n",
434-
" out_channels=num_class).to(device)\n",
423+
"model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=num_class).to(device)\n",
435424
"loss_function = torch.nn.CrossEntropyLoss()\n",
436425
"optimizer = torch.optim.Adam(model.parameters(), 1e-5)\n",
437426
"max_epochs = 4\n",
@@ -477,9 +466,7 @@
477466
" loss.backward()\n",
478467
" optimizer.step()\n",
479468
" epoch_loss += loss.item()\n",
480-
" print(\n",
481-
" f\"{step}/{len(train_ds) // train_loader.batch_size}, \"\n",
482-
" f\"train_loss: {loss.item():.4f}\")\n",
469+
" print(f\"{step}/{len(train_ds) // train_loader.batch_size}, \" f\"train_loss: {loss.item():.4f}\")\n",
483470
" epoch_len = len(train_ds) // train_loader.batch_size\n",
484471
" epoch_loss /= step\n",
485472
" epoch_loss_values.append(epoch_loss)\n",
@@ -509,8 +496,7 @@
509496
" if result > best_metric:\n",
510497
" best_metric = result\n",
511498
" best_metric_epoch = epoch + 1\n",
512-
" torch.save(model.state_dict(), os.path.join(\n",
513-
" root_dir, \"best_metric_model.pth\"))\n",
499+
" torch.save(model.state_dict(), os.path.join(root_dir, \"best_metric_model.pth\"))\n",
514500
" print(\"saved new best metric model\")\n",
515501
" print(\n",
516502
" f\"current epoch: {epoch + 1} current AUC: {result:.4f}\"\n",
@@ -519,9 +505,7 @@
519505
" f\" at epoch: {best_metric_epoch}\"\n",
520506
" )\n",
521507
"\n",
522-
"print(\n",
523-
" f\"train completed, best_metric: {best_metric:.4f} \"\n",
524-
" f\"at epoch: {best_metric_epoch}\")"
508+
"print(f\"train completed, best_metric: {best_metric:.4f} \" f\"at epoch: {best_metric_epoch}\")"
525509
]
526510
},
527511
{
@@ -581,8 +565,7 @@
581565
"metadata": {},
582566
"outputs": [],
583567
"source": [
584-
"model.load_state_dict(torch.load(\n",
585-
" os.path.join(root_dir, \"best_metric_model.pth\")))\n",
568+
"model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\")))\n",
586569
"model.eval()\n",
587570
"y_true = []\n",
588571
"y_pred = []\n",
@@ -626,8 +609,7 @@
626609
}
627610
],
628611
"source": [
629-
"print(classification_report(\n",
630-
" y_true, y_pred, target_names=class_names, digits=4))"
612+
"print(classification_report(y_true, y_pred, target_names=class_names, digits=4))"
631613
]
632614
},
633615
{

2d_registration/registration_mednist.ipynb

+19-17
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,8 @@
230230
"train_data = MedNISTDataset(root_dir=root_dir, section=\"training\", download=True, transform=None)\n",
231231
"training_datadict = [\n",
232232
" {\"fixed_hand\": item[\"image\"], \"moving_hand\": item[\"image\"]}\n",
233-
" for item in train_data.data if item[\"label\"] == 4 # label 4 is for xray hands\n",
233+
" for item in train_data.data\n",
234+
" if item[\"label\"] == 4 # label 4 is for xray hands\n",
234235
"]\n",
235236
"print(\"\\n first training items: \", training_datadict[:3])"
236237
]
@@ -247,9 +248,15 @@
247248
" [\n",
248249
" LoadImageD(keys=[\"fixed_hand\", \"moving_hand\"]),\n",
249250
" EnsureChannelFirstD(keys=[\"fixed_hand\", \"moving_hand\"]),\n",
250-
" ScaleIntensityRanged(keys=[\"fixed_hand\", \"moving_hand\"],\n",
251-
" a_min=0., a_max=255., b_min=0.0, b_max=1.0, clip=True,),\n",
252-
" RandRotateD(keys=[\"moving_hand\"], range_x=np.pi/4, prob=1.0, keep_size=True, mode=\"bicubic\"),\n",
251+
" ScaleIntensityRanged(\n",
252+
" keys=[\"fixed_hand\", \"moving_hand\"],\n",
253+
" a_min=0.0,\n",
254+
" a_max=255.0,\n",
255+
" b_min=0.0,\n",
256+
" b_max=1.0,\n",
257+
" clip=True,\n",
258+
" ),\n",
259+
" RandRotateD(keys=[\"moving_hand\"], range_x=np.pi / 4, prob=1.0, keep_size=True, mode=\"bicubic\"),\n",
253260
" RandZoomD(keys=[\"moving_hand\"], min_zoom=0.9, max_zoom=1.1, prob=1.0, mode=\"bicubic\", align_corners=False),\n",
254261
" ]\n",
255262
")"
@@ -347,8 +354,7 @@
347354
}
348355
],
349356
"source": [
350-
"train_ds = CacheDataset(data=training_datadict[:1000], transform=train_transforms,\n",
351-
" cache_rate=1.0, num_workers=4)\n",
357+
"train_ds = CacheDataset(data=training_datadict[:1000], transform=train_transforms, cache_rate=1.0, num_workers=4)\n",
352358
"train_loader = DataLoader(train_ds, batch_size=16, shuffle=True, num_workers=2)"
353359
]
354360
},
@@ -370,11 +376,8 @@
370376
"source": [
371377
"device = torch.device(\"cuda:0\")\n",
372378
"model = GlobalNet(\n",
373-
" image_size=(64, 64),\n",
374-
" spatial_dims=2,\n",
375-
" in_channels=2, # moving and fixed\n",
376-
" num_channel_initial=16,\n",
377-
" depth=3).to(device)\n",
379+
" image_size=(64, 64), spatial_dims=2, in_channels=2, num_channel_initial=16, depth=3 # moving and fixed\n",
380+
").to(device)\n",
378381
"image_loss = MSELoss()\n",
379382
"if USE_COMPILED:\n",
380383
" warp_layer = Warp(3, \"border\").to(device)\n",
@@ -498,8 +501,7 @@
498501
}
499502
],
500503
"source": [
501-
"val_ds = CacheDataset(data=training_datadict[2000:2500], transform=train_transforms,\n",
502-
" cache_rate=1.0, num_workers=0)\n",
504+
"val_ds = CacheDataset(data=training_datadict[2000:2500], transform=train_transforms, cache_rate=1.0, num_workers=0)\n",
503505
"val_loader = DataLoader(val_ds, batch_size=16, num_workers=0)\n",
504506
"for batch_data in val_loader:\n",
505507
" moving = batch_data[\"moving_hand\"].to(device)\n",
@@ -543,20 +545,20 @@
543545
"for b in range(batch_size):\n",
544546
" # moving image\n",
545547
" plt.subplot(batch_size, 3, b * 3 + 1)\n",
546-
" plt.axis('off')\n",
548+
" plt.axis(\"off\")\n",
547549
" plt.title(\"moving image\")\n",
548550
" plt.imshow(moving_image[b], cmap=\"gray\")\n",
549551
" # fixed image\n",
550552
" plt.subplot(batch_size, 3, b * 3 + 2)\n",
551-
" plt.axis('off')\n",
553+
" plt.axis(\"off\")\n",
552554
" plt.title(\"fixed image\")\n",
553555
" plt.imshow(fixed_image[b], cmap=\"gray\")\n",
554556
" # warped moving\n",
555557
" plt.subplot(batch_size, 3, b * 3 + 3)\n",
556-
" plt.axis('off')\n",
558+
" plt.axis(\"off\")\n",
557559
" plt.title(\"predicted image\")\n",
558560
" plt.imshow(pred_image[b], cmap=\"gray\")\n",
559-
"plt.axis('off')\n",
561+
"plt.axis(\"off\")\n",
560562
"plt.show()"
561563
]
562564
}

2d_segmentation/torch/unet_evaluation_dict.py

+9-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,15 @@
2323
from monai.inferers import sliding_window_inference
2424
from monai.metrics import DiceMetric
2525
from monai.networks.nets import UNet
26-
from monai.transforms import Activations, EnsureChannelFirstd, AsDiscrete, Compose, LoadImaged, SaveImage, ScaleIntensityd
26+
from monai.transforms import (
27+
Activations,
28+
EnsureChannelFirstd,
29+
AsDiscrete,
30+
Compose,
31+
LoadImaged,
32+
SaveImage,
33+
ScaleIntensityd,
34+
)
2735

2836

2937
def main(tempdir):

3d_classification/densenet_training_array.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
"metadata": {},
3939
"outputs": [],
4040
"source": [
41-
"!python -c \"import monai\" || pip install -q \"monai-weekly[nibabel, tqdm]\"\n"
41+
"!python -c \"import monai\" || pip install -q \"monai-weekly[nibabel, tqdm]\""
4242
]
4343
},
4444
{
@@ -463,7 +463,7 @@
463463
"# For the other dimensions (channel, width, height), use\n",
464464
"# -1 to use 0 and img.shape[x]-1 for min and max, respectively\n",
465465
"depth_slice = img.shape[2] // 2\n",
466-
"occ_sens_b_box = [depth_slice-1, depth_slice, -1, -1, -1, -1]\n",
466+
"occ_sens_b_box = [depth_slice - 1, depth_slice, -1, -1, -1, -1]\n",
467467
"\n",
468468
"occ_result, _ = occ_sens(x=img, b_box=occ_sens_b_box)\n",
469469
"occ_result = occ_result[0, label.argmax().item()][None]\n",

3d_classification/ignite/densenet_training_dict.py

+11-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,17 @@ def prepare_batch(batch, device=None, non_blocking=False):
125125
post_pred = Compose([Activations(softmax=True)])
126126
# Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,
127127
# user can add output_transform to return other values
128-
evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch, output_transform=lambda x, y, y_pred: ([post_pred(i) for i in decollate_batch(y_pred)], [post_label(i) for i in decollate_batch(y, detach=False)]))
128+
evaluator = create_supervised_evaluator(
129+
net,
130+
val_metrics,
131+
device,
132+
True,
133+
prepare_batch=prepare_batch,
134+
output_transform=lambda x, y, y_pred: (
135+
[post_pred(i) for i in decollate_batch(y_pred)],
136+
[post_label(i) for i in decollate_batch(y, detach=False)],
137+
),
138+
)
129139

130140
# add stats event handler to print validation stats via evaluator
131141
val_stats_handler = StatsHandler(

0 commit comments

Comments
 (0)