Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
Vaibhav-ML authored Jul 26, 2023
1 parent 6b85a74 commit 71af626
Show file tree
Hide file tree
Showing 7 changed files with 1,351 additions and 0 deletions.
187 changes: 187 additions & 0 deletions artifact_classifier.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
import os
### importing required packages
import pathlib

import matplotlib.pyplot as plt
#import cv2
import numpy as np
import tensorflow as tf
from PIL import Image
from skimage import io
from sklearn.metrics import (classification_report, confusion_matrix,
precision_score, recall_score)

data_dir_train= pathlib.Path('C:/Vaibhav/Classification_Small/train')
data_dir_val= pathlib.Path('C:/Vaibhav/Classification_Small/val')
data_dir_test= pathlib.Path('C:/Vaibhav/Classification_Small/test')

image_count_train = len(list(data_dir_train.glob('*/*.PNG')))
image_count_val = len(list(data_dir_val.glob('*/*.PNG')))
image_count_test = len(list(data_dir_test.glob('*/*.PNG')))
print(image_count_train)
print(image_count_val)
print(image_count_test)

### setting the batch size to 8 and image height and width
batch_size = 2
img_height = 256
img_width = 256

train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255,
horizontal_flip=True,
shear_range=0.2,
rotation_range=30,

)

val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(data_dir_train,
target_size=(256,256),
batch_size=batch_size,
class_mode='categorical',
color_mode='grayscale'
)
val_generator= val_datagen.flow_from_directory(data_dir_val,
target_size=(256,256),
batch_size=2,
class_mode='categorical',
color_mode='grayscale'
)

num_classes = 2
classes = ['Artifact','Nuclei'] #Classification classes
epochs = 10


#A simple CNN followed by dense layers
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(256,(5,5),input_shape=(256,256,1),padding='VALID',activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(128,(3,3),activation='relu'),
tf.keras.layers.Conv2D(128,(3,3),activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(128,(3,3),activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32,(3,3),activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128,activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(32,activation='relu'),
tf.keras.layers.Dense(num_classes,activation='softmax')

]
)


class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epochs,logs={}):
if(logs.get('val_accuracy')>=0.99):
print("Stopped training early!")
self.model.stop_training = True

callback = myCallback()

checkpoint_filepath = './'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor='val_accuracy',
mode='max',
save_best_only=True)

model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])


test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)


test_generator = test_datagen.flow_from_directory(data_dir_test,
target_size=(256,256),
batch_size=1,
class_mode='categorical',
color_mode='grayscale')

data_dir_pred = 'C:/Vaibhav/Classification_Small/predict'
pred_generator = test_datagen.flow_from_directory(data_dir_pred,
target_size=(256,256),
batch_size=1,
class_mode='categorical',
color_mode='grayscale')



history = model.fit_generator(train_generator,steps_per_epoch = int(len(train_generator)/batch_size),epochs=epochs,validation_data=val_generator,verbose=1)

model.save('artifact_classifier.h5')

#Plotting the results

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss= history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(epochs)

plt.figure(figsize=(7,7))

plt.subplot(1,2,1)
plt.plot(epochs_range,acc,label='Training Accuracy')
plt.plot(epochs_range,val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1,2,2)
plt.plot(epochs_range,loss,label='Training Loss')
plt.plot(epochs_range,val_loss, label='Validation Loss')
plt.legend(loc='lower right')
plt.title('Training and Validation Loss')

plt.savefig('plot.PNG')
image_format = 'svg' # e.g .png, .svg, etc.
image_name = 'myplot.svg'

plt.savefig(image_name, format=image_format, dpi=300)



# load model and make predictions

savedModel = tf.keras.models.load_model('artifact_classifier.h5')
img = io.imread("D:/DeDustProject/data/Classification_Small/test/Artifact/1778.PNG")
img = np.asarray(img,dtype='float32')
img /= 255
x = np.expand_dims(img,axis = 0)
x = np.expand_dims(x,axis = 3)
pred = savedModel.predict(x)
MaxPosition = np.argmax(pred)
prediction_label = classes[MaxPosition]


score = savedModel.evaluate(test_generator)
y_test = []
while image_count_test !=0 :
(_,y) = next(test_generator)
y_test.append(y[0])
image_count_test -=1

y_test = np.asarray(y_test,dtype='float32')
y_test.reshape((2000,2))

for (x,y) in test_generator:
y_test.append(y)
y_pred = savedModel.predict(test_generator)
y_pred = np.argmax(y_pred,axis=1)
y_test = np.argmax(y_test,axis=1)

#calculating precision and reall
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)

136 changes: 136 additions & 0 deletions bioligical_artifact_generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
import math
import os
import cv2
from data_augmentation import min_max_255
import numpy as np
from skimage.draw import bezier_curve
from skimage.morphology import skeletonize
from skimage import io
from skimage.filters import threshold_otsu
from skimage import io,transform as t,img_as_ubyte
from matplotlib import pyplot as plt


def single_artifact_generator(path_to_artifact_image):
'''
This function generates biologically inspired artifdacts from a given image containing artifacts.
:param path_to_artifact_image : PAth to the source image.
'''

source_image = cv2.imread(path_to_artifact_image)
imgray = cv2.cvtColor(source_image, cv2.COLOR_BGR2GRAY)
threshold = threshold_otsu(imgray) # Otsu thresholding
binary = imgray > threshold # Masked image.Multiplied by 0.7 to produce better masks.
binary = img_as_ubyte(binary)
# apply connected component analysis to the thresholded image
output = cv2.connectedComponentsWithStats(
binary, 8, cv2.CV_32S)

(numLabels, labels, stats, centroids) = output
mask = np.zeros(imgray.shape, dtype="uint8")

# loop over the number of unique connected component labels, skipping
# over the first label (as label zero is the background)
for i in range(1, numLabels):
# extract the connected component statistics for the current
# label
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
w = stats[i, cv2.CC_STAT_WIDTH]
h = stats[i, cv2.CC_STAT_HEIGHT]
area = stats[i, cv2.CC_STAT_AREA]

if area > 2000:

componentMask = (labels == i).astype("uint8") * 1
mask = cv2.bitwise_or(mask, componentMask)
artifact_image = mask[y:y+h,x:x+w]


skeleton = skeletonize(artifact_image)

nonzero = np.nonzero(skeleton)
# Returns a tuple of (nonzero_row_index, nonzero_col_index)
# That is (array([0, 0, 1, 1, 2]), array([0, 2, 1, 2, 0]))

nonzero_row = nonzero[0]
nonzero_col = nonzero[1]


count = 0


nonzero_row = list(dict.fromkeys(nonzero_row))
nonzero_col = list(dict.fromkeys(nonzero_col))
#Differentaiate along the x and y-axis of the artifact skeleton to compute contraol points for bezier curve fitting.
if not len(nonzero_row) > len(nonzero_col):

dx = np.diff(nonzero_row)
dy = np.diff(nonzero_col[:len(nonzero_row)])

else:
dx = np.diff(nonzero_row[:len(nonzero_col)])
dy = np.diff(nonzero_col)


try:
d = abs(dy/dx)
except ZeroDivisionError:
print('Divide by zero error!')
pass

if not len(np.where(d==np.amax(d))[0]) == 1:
max_index = int(np.where(d == np.amax(d))[0][0])
else :
max_index = int(np.where(d == np.amax(d))[0])





# Returns a tuple of (nonzero_row_index, nonzero_col_index)
# That is (array([0, 0, 1, 1, 2]), array([0, 2, 1, 2, 0]))

#nonzero = np.nonzero(mask)
length_row = len(nonzero_row)
length_col = len(nonzero_col)
#print('length is ',length_row,length_col)
x0 = nonzero_row[0]
y0 = nonzero_col[0]
x1 = nonzero_row[max_index]
y1 = nonzero_col[max_index]


x2 = nonzero_row[length_row -1]
y2 = nonzero_col[length_col-1]



rr,cc = bezier_curve(x0,y0,x1,y1,x2,y2,weight=3)
img= np.zeros((2160,2160),dtype='uint8')
img[rr,cc] = 255


nonzero_img = np.nonzero(img)
nonzero_row = nonzero_img[0]
nonzero_col = nonzero_img[1]

count = 0
img1 = np.zeros((artifact_image.shape[0],artifact_image.shape[1]),dtype='uint8')

for i, j in zip(nonzero_row, nonzero_col):

count += 1
#Introduce random thickness into the mimicked artifact.
thickness_x = np.random.randint(0, 10)
thickness_y = np.random.randint(0, 10)

offset_x_1 = 0
offset_x_2 = 0
offset_y_1 = 0
offset_y_2 = 0
img1[i - thickness_x + offset_x_1:i + thickness_x + offset_x_2,j - thickness_y + offset_y_1:j + thickness_y + offset_y_2] = 255 #Final artifact

#io.imsave('artificial_artifact.PNG',img1)

Loading

0 comments on commit 71af626

Please sign in to comment.