Skip to content

Commit d1a3c55

Browse files
framework-code-initial-commit
1 parent be5607b commit d1a3c55

28 files changed

+2373
-1
lines changed

config.py

+113
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
from tensorflow.keras.layers import Input
2+
from easydict import EasyDict as edict
3+
import tensorflow as tf
4+
5+
config = edict()
6+
7+
# original height and width of image
8+
config.ORIGINAL_HEIGHT = 2400
9+
config.ORIGINAL_WIDTH = 1935
10+
11+
# height and width to resize image
12+
config.HEIGHT = 320
13+
config.WIDTH = 256
14+
15+
# input cephalogram image to the base network
16+
config.IMAGE_INPUT = Input(shape=(config.HEIGHT, config.WIDTH, 3), name="cephalogram")
17+
18+
# landmark region proposals (LRPs) input to the landmark detection network
19+
config.PROPOSALS_INPUT = Input(shape=(None, 4), name="landmark_region_proposals")
20+
21+
# Image resolution (mm/pixel)
22+
config.IMAGE_RESOLUTION = 0.1
23+
24+
# cephalometric landmarks
25+
config.ANATOMICAL_LANDMARKS = {
26+
"0": "Sella",
27+
"1": "Nasion",
28+
"2": "Orbitale",
29+
"3": "Porion",
30+
"4": "A-point",
31+
"5": "B-point",
32+
"6": "Pogonion",
33+
"7": "Menton",
34+
"8": "Gnathion",
35+
"9": "Gonion",
36+
"10": "Lower Incisal Incision",
37+
"11": "Upper Incisal Incision",
38+
"12": "Upper Lip",
39+
"13": "Lower Lip",
40+
"14": "Subnasale",
41+
"15": "Soft Tissue Pogonion",
42+
"16": "Posterior Nasal Spine",
43+
"17": "Anterior Nasal Spine",
44+
"18": "Articulare",
45+
}
46+
47+
# number of cephalometric landmarks
48+
config.NUM_LANDMARKS = 19
49+
50+
51+
config.BACKBONE_BLOCKS_INFO = {
52+
"vgg16": {
53+
"C1": "block1_conv2",
54+
"C2": "block2_conv2",
55+
"C3": "block3_conv3",
56+
"C4": "block4_conv3",
57+
"C5": "block5_conv3"
58+
},
59+
"vgg19": {
60+
"C1": "block1_conv2",
61+
"C2": "block2_conv2",
62+
"C3": "block3_conv4",
63+
"C4": "block4_conv4",
64+
"C5": "block5_conv4"
65+
},
66+
"darknet19": {
67+
"C1": "block1_conv1",
68+
"C2": "block2_conv1",
69+
"C3": "block3_conv3",
70+
"C4": "block4_conv3",
71+
"C5": "block5_conv5",
72+
"C6": "block6_conv5",
73+
},
74+
"darknet53": {
75+
"C1": "block1.1_out",
76+
"C2": "block2.2_out",
77+
"C3": "block3.8_out",
78+
"C4": "block4.8_out",
79+
"C5": "block5.4_out"
80+
},
81+
"resnet18": {
82+
"C2": "block2.2_out",
83+
"C3": "block3.2_out",
84+
"C4": "block4.2_out",
85+
"C5": "block5.2_out"
86+
},
87+
"resnet34": {
88+
"C2": "block2.3_out",
89+
"C3": "block3.4_out",
90+
"C4": "block4.6_out",
91+
"C5": "block5.3_out"
92+
},
93+
"resnet50": {
94+
"C2": "conv2_block3_out",
95+
"C3": "conv3_block4_out",
96+
"C4": "conv4_block6_out",
97+
"C5": "conv5_block3_out"
98+
}
99+
}
100+
101+
# Region of interest pool size
102+
config.ROI_POOL_SIZE = (5, 5)
103+
104+
# margin (in pixels) at each side of lateral skull face
105+
config.BOX_MARGIN = 32
106+
107+
config.TRAIN = edict()
108+
# number of epochs
109+
config.TRAIN.EPOCHS = 10
110+
# optimizer
111+
config.TRAIN.OPTIMIZER = tf.keras.optimizers.Adam(learning_rate=0.0001)
112+
113+
cfg = config

data/__init__.py

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
from data.isbi_dataset import ISBIDataset
2+
from data.pku_dataset import PKUDataset
3+
from data.dataset import Dataset
4+
from config import cfg
5+
6+
if __name__ == "__main__":
7+
import matplotlib.pyplot as plt
8+
import os
9+
10+
datasets_root_path = "../datasets"
11+
isbi_dataset_root = os.path.join(datasets_root_path, "ISBI Dataset")
12+
13+
data = Dataset(name="ISBI", mode="TRAIN", batch_size=4, shuffle=False)
14+
15+
images, landmarks = data[0]
16+
17+
index = 0
18+
plt.imshow(images[index])
19+
plt.scatter(landmarks[index, :, 0], landmarks[index, :, 1], color="green", s=[3] * cfg.NUM_LANDMARKS)
20+
plt.show()

data/dataset.py

+74
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
from data import ISBIDataset, PKUDataset
2+
from preprocessing import Augmentation
3+
from paths import Paths
4+
import tensorflow as tf
5+
from config import cfg
6+
import numpy as np
7+
import cv2
8+
9+
10+
def resize(image: np.ndarray, landmarks: np.ndarray):
11+
image_height, image_width = image.shape[0:2]
12+
ratio_height, ratio_width = (image_height / cfg.HEIGHT), (image_width / cfg.WIDTH)
13+
14+
image = cv2.resize(np.array(image), dsize=(cfg.WIDTH, cfg.HEIGHT), interpolation=cv2.INTER_CUBIC)
15+
landmarks = np.vstack([
16+
landmarks[:, 0] / ratio_width,
17+
landmarks[:, 1] / ratio_height
18+
]).T
19+
20+
return image, landmarks
21+
22+
23+
class Dataset(tf.keras.utils.Sequence):
24+
25+
def __init__(
26+
self,
27+
name: str,
28+
mode: str,
29+
batch_size: int = 1,
30+
augmentation: Augmentation = None,
31+
shuffle: bool = False,
32+
):
33+
34+
if name == "isbi":
35+
self.dataset = ISBIDataset(Paths.dataset_root_path(name), mode)
36+
elif name == "pku":
37+
self.dataset = PKUDataset(Paths.dataset_root_path(name), mode)
38+
else:
39+
raise ValueError("\'{}\' no such dataset exists in your datasets repository.".format(name))
40+
41+
self.batch_size = batch_size
42+
self.shuffle = shuffle
43+
44+
if self.shuffle:
45+
self.dataset.shuffle()
46+
47+
self.augmentation = augmentation
48+
49+
def on_epoch_end(self):
50+
if self.shuffle:
51+
self.dataset.shuffle()
52+
53+
def __getitem__(self, index: int):
54+
55+
start_index = index * self.batch_size
56+
end_index = min((index + 1) * self.batch_size, len(self.dataset))
57+
58+
images = []
59+
labels = []
60+
61+
for index in range(start_index, end_index):
62+
image, landmarks = self.dataset[index]
63+
64+
if self.augmentation is not None:
65+
image, landmarks = self.augmentation.apply(image, landmarks)
66+
67+
image, landmarks = resize(image, landmarks)
68+
images.append(image)
69+
labels.append(landmarks)
70+
71+
return tf.stack(images), tf.stack(labels)
72+
73+
def __len__(self):
74+
return len(self.dataset) // self.batch_size

data/isbi_dataset.py

+71
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
from config import cfg
2+
import numpy as np
3+
import random
4+
import cv2
5+
import os
6+
7+
8+
class ISBIDataset(object):
9+
10+
def __init__(
11+
self,
12+
dataset_folder_path: str,
13+
mode: str
14+
) -> None:
15+
16+
if mode in ["train", "valid", "test"]:
17+
self.mode = mode
18+
else:
19+
raise ValueError("mode could only be train, valid or test")
20+
21+
self.imgaes_root_path = os.path.join(dataset_folder_path, self.mode, "images")
22+
23+
self.annotations_root_path = os.path.join(dataset_folder_path, self.mode, "annotations")
24+
self.senior_annotations_root = os.path.join(self.annotations_root_path, "senior-orthodontist")
25+
self.junior_annotations_root = os.path.join(self.annotations_root_path, "junior-orthodontist")
26+
27+
self.images_list = list(sorted(os.listdir(self.imgaes_root_path)))
28+
29+
def __getitem__(self, index: int):
30+
image_file_name = self.images_list[index]
31+
label_file_name = self.images_list[index].split(".")[0] + "." + "txt"
32+
33+
image = self.get_image(image_file_name)
34+
label = self.get_label(label_file_name)
35+
36+
return image, label
37+
38+
def get_image(self, file_name: str):
39+
file_path = os.path.join(self.imgaes_root_path, file_name)
40+
41+
image = cv2.imread(file_path)
42+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
43+
44+
return image
45+
46+
def get_label(self, file_name: str) -> np.ndarray:
47+
file_path = os.path.join(self.senior_annotations_root, file_name)
48+
with open(file_path) as file:
49+
senior_annotations = [landmark.rstrip() for landmark in file]
50+
51+
senior_annotations = [[float(landmark.split(",")[0]), float(landmark.split(",")[1])] for landmark in senior_annotations[:cfg.NUM_LANDMARKS]]
52+
senior_annotations = np.array(senior_annotations, dtype=np.float32)
53+
54+
file_path = os.path.join(self.junior_annotations_root, file_name)
55+
with open(file_path) as file:
56+
junior_annotations = [landmark.rstrip() for landmark in file]
57+
58+
junior_annotations = [[float(landmark.split(",")[0]), float(landmark.split(",")[1])] for landmark in junior_annotations[:cfg.NUM_LANDMARKS]]
59+
junior_annotations = np.array(junior_annotations, dtype=np.float32)
60+
61+
landmarks = np.zeros(shape=(cfg.NUM_LANDMARKS, 2), dtype=np.int32)
62+
landmarks[:, 0] = np.ceil((0.5) * (junior_annotations[:, 0] + senior_annotations[:, 0]))
63+
landmarks[:, 1] = np.ceil((0.5) * (junior_annotations[:, 1] + senior_annotations[:, 1]))
64+
65+
return np.array(landmarks, dtype=np.float32)
66+
67+
def shuffle(self):
68+
random.shuffle(self.images_list)
69+
70+
def __len__(self):
71+
return len(self.images_list)

data/pku_dataset.py

+65
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
from config import cfg
2+
import numpy as np
3+
import random
4+
import cv2
5+
import os
6+
7+
8+
class PKUDataset(object):
9+
10+
def __init__(
11+
self,
12+
dataset_folder_path: str,
13+
mode: str = None
14+
):
15+
self.images_root_path = os.path.join(dataset_folder_path, "images")
16+
self.labels_root_path = os.path.join(dataset_folder_path, "annotations")
17+
18+
self.doctor1_annotations_root = os.path.join(self.labels_root_path, "orthodontist-1")
19+
self.doctor2_annotations_root = os.path.join(self.labels_root_path, "orthodontist-2")
20+
21+
self.images_list = os.listdir(self.images_root_path)
22+
23+
def __getitem__(self, index):
24+
image_file_name = self.images_list[index]
25+
label_file_name = self.images_list[index].split(".")[0] + "." + "txt"
26+
27+
image = self.get_image(image_file_name)
28+
landmarks = self.get_label(label_file_name)
29+
30+
return image, landmarks
31+
32+
def get_image(self, file_name: str):
33+
file_path = os.path.join(self.images_root_path, file_name)
34+
35+
image = cv2.imread(file_path)
36+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
37+
38+
return np.array(image, dtype=np.uint8)
39+
40+
def get_label(self, file_name: str):
41+
file_path = os.path.join(self.doctor1_annotations_root, file_name)
42+
with open(file_path) as file:
43+
doctor1_annotations = [landmark.rstrip() for landmark in file]
44+
45+
doctor1_annotations = [[float(landmark.split(",")[0]), float(landmark.split(",")[1])] for landmark in doctor1_annotations[:cfg.NUM_LANDMARKS]]
46+
doctor1_annotations = np.array(doctor1_annotations, dtype=np.float32)
47+
48+
file_path = os.path.join(self.doctor2_annotations_root, file_name)
49+
with open(file_path) as file:
50+
doctor2_annotations = [landmark.rstrip() for landmark in file]
51+
52+
doctor2_annotations = [[float(landmark.split(",")[0]), float(landmark.split(",")[1])] for landmark in doctor2_annotations[:cfg.NUM_LANDMARKS]]
53+
doctor2_annotations = np.array(doctor2_annotations, dtype=np.float32)
54+
55+
landmarks = np.zeros(shape=(cfg.NUM_LANDMARKS, 2), dtype=np.int32)
56+
landmarks[:, 0] = np.ceil((0.5) * (doctor1_annotations[:, 0] + doctor2_annotations[:, 0]))
57+
landmarks[:, 1] = np.ceil((0.5) * (doctor1_annotations[:, 1] + doctor2_annotations[:, 1]))
58+
59+
return np.array(landmarks, dtype=np.float32)
60+
61+
def shuffle(self):
62+
random.shuffle(self.images_list)
63+
64+
def __len__(self):
65+
return len(self.images_list)

docs/example.txt

-1
This file was deleted.

logs/statistics/train_stats.npy

464 Bytes
Binary file not shown.

models/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from models.backbone import Backbone

0 commit comments

Comments
 (0)