From 21099e396aaad0693f47947842e223d8b0935088 Mon Sep 17 00:00:00 2001 From: Korat-Dishant <86142546+Korat-Dishant@users.noreply.github.com> Date: Tue, 30 May 2023 22:53:33 +0530 Subject: [PATCH 1/8] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 81463bfd7..d733c19fa 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ # intel-oneAPI -#### Team Name - +#### Team Name - BrainX #### Problem Statement - +object detection for autonomous vehicles #### Team Leader Email - +koratdishant536631@gmail.com ## A Brief of the Prototype: This section must include UML Daigrms and prototype description From 3e435b46307d6ba7843ed12f90ce8ac4c7fc2be9 Mon Sep 17 00:00:00 2001 From: Korat-Dishant <86142546+Korat-Dishant@users.noreply.github.com> Date: Sun, 4 Jun 2023 23:25:38 +0530 Subject: [PATCH 2/8] Update README.md --- README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d733c19fa..8ee33cec5 100644 --- a/README.md +++ b/README.md @@ -7,11 +7,26 @@ object detection for autonomous vehicles koratdishant536631@gmail.com ## A Brief of the Prototype: - This section must include UML Daigrms and prototype description + +In the domain of autonomous cars, object detection for self-driving vehicles is a major challenge. It must be able to precisely detect and identify objects in its surroundings, such as pedestrians, other cars, traffic signs, and barriers, in order to function safely and effectively. + +By utilizing Intel OneAPI, the main objective of this project is to develop a reliable, accurate, and effective object detection system that will support the development of the next generation of autonomous vehicles. + +to achieve this goal we are using state of the art mask-RCNN model. which is quite good model for instance segmentation. we are trying to train this model on a little part of city scape dataset. once we train the model we will try to optimize it using oneAPI. we will record the data for both of the scenarios and onece we are done we will make this data available on this repository. + +architecture of mask-RCNN + +![Screenshot 2023-06-04 231513](https://github.com/Korat-Dishant/intel-oneAPI/assets/86142546/49d9e6e1-0b4c-4c06-be1f-5498fd6c13e7) + +real time detection can also be perfrmed like shown in this diagram +![20230507_200057](https://github.com/Korat-Dishant/intel-oneAPI/assets/86142546/9f587a4f-cc47-4ce8-aebd-ed9265747b3f) + + ## Tech Stack: List Down all technologies used to Build the prototype **Clearly mentioning IntelĀ® AI Analytics Toolkits, it's libraries and the SYCL/DCP++ Libraries used** + ## Step-by-Step Code Execution Instructions: This Section must contain set of instructions required to clone and run the prototype, so that it can be tested and deeply analysed From 5573850b45c237493965be6f0525c8cece031527 Mon Sep 17 00:00:00 2001 From: Dishant Date: Fri, 9 Jun 2023 16:50:46 +0530 Subject: [PATCH 3/8] coddee --- onednn.txt | 180 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 onednn.txt diff --git a/onednn.txt b/onednn.txt new file mode 100644 index 000000000..e04abd5e5 --- /dev/null +++ b/onednn.txt @@ -0,0 +1,180 @@ +!pip install ngraph onednn + +import numpy as np +import pandas as pd +import os +import cv2 +import tensorflow as tf +import matplotlib.pyplot as plt +import seaborn as sns +from PIL import Image +from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, UpSampling2D, Concatenate, Input, Softmax +from tensorflow.keras import Model +from tensorflow.keras.callbacks import ModelCheckpoint +import ngraph as ng +import onednn as dnnl + +EPOCHS=7 +BATCH_SIZE=10 +HEIGHT=256 +WIDTH=256 +N_CLASSES=13 + +def LoadImage(name, path): + img = Image.open(os.path.join(path, name)) + img = np.array(img) + + image = img[:, :256] + mask = img[:, 256:] + + return image, mask + +def bin_image(mask): + bins = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240]) + new_mask = np.digitize(mask, bins) + return new_mask + +def getSegmentationArr(image, classes, width=WIDTH, height=HEIGHT): + seg_labels = np.zeros((height, width, classes)) + img = image[:, :, 0] + + for c in range(classes): + seg_labels[:, :, c] = (img == c).astype(int) + return seg_labels + +def give_color_to_seg_img(seg, n_classes=N_CLASSES): + seg_img = np.zeros((seg.shape[0], seg.shape[1], 3)).astype('float') + colors = sns.color_palette("hls", n_classes) + + for c in range(n_classes): + segc = (seg == c) + seg_img[:, :, 0] += (segc * (colors[c][0])) + seg_img[:, :, 1] += (segc * (colors[c][1])) + seg_img[:, :, 2] += (segc * (colors[c][2])) + + return seg_img + +train_folder = "/content/drive/MyDrive/Intel OneAPI Hackathon Project/Dataset/cityscapes_paris/train" +valid_folder = "/content/drive/MyDrive/Intel OneAPI Hackathon Project/Dataset/cityscapes_paris/val" + +num_of_training_samples = len(os.listdir(train_folder)) +num_of_valid_samples = len(os.listdir(valid_folder)) + +def DataGenerator(path, batch_size=BATCH_SIZE, classes=N_CLASSES): + files = os.listdir(path) + while True: + for i in range(0, len(files), batch_size): + batch_files = files[i: i + batch_size] + imgs = [] + segs = [] + for file in batch_files: + image, mask = LoadImage(file, path) + mask_binned = bin_image(mask) + labels = getSegmentationArr(mask_binned, classes) + + imgs.append(image) + segs.append(labels) + + yield np.array(imgs), np.array(segs) + +train_gen = DataGenerator(train_folder, batch_size=BATCH_SIZE) +val_gen = DataGenerator(valid_folder, batch_size=BATCH_SIZE) + +imgs, segs = next(train_gen) +imgs.shape, segs.shape + +image = imgs[0] +mask = give_color_to_seg_img(np.argmax(segs[0], axis=-1)) +masked_image = cv2.addWeighted(image/255, 0.5, mask, 0.5, 0) + +fig, axs = plt.subplots(1, 3, figsize=(20, 20)) +axs[0].imshow(image) +axs[0].set_title('Original Image') +axs[1].imshow(mask) +axs[1].set_title('Segmentation Mask') +axs[2].imshow(masked_image) +axs[2].set_title('Masked Image') +plt.show() + +def conv2d_layer(x, filters, kernel_size=(3, 3), padding="same", strides=1): + input_tensor = ng.constant(x, dtype=np.float32) + filters_tensor = ng.constant(filters, dtype=np.float32) + strides_tensor = ng.constant((1, strides, strides, 1), dtype=np.int32) + padding_tensor = ng.constant(padding, dtype=np.string) + + input_shape = input_tensor.get_shape() + filter_shape = filters_tensor.get_shape() + + input_tensor_reshaped = ng.reshape(input_tensor, (1, input_shape[0], input_shape[1], input_shape[2])) + filters_tensor_reshaped = ng.reshape(filters_tensor, (filter_shape[0], filter_shape[1], input_shape[2], filter_shape[3])) + + output_shape = (input_shape[0], (input_shape[1] - 1) // strides + 1, (input_shape[2] - 1) // strides + 1, filter_shape[3]) + output_tensor = ng.tensor(output_shape, dtype=np.float32) + + conv_op = dnnl.convolution_forward(input_tensor_reshaped, filters_tensor_reshaped, strides_tensor, padding_tensor, output_tensor) + conv_op.execute(dnnl.stream(), { + input_tensor: input_tensor_reshaped, + filters_tensor: filters_tensor_reshaped, + output_tensor: output_tensor + }) + + return output_tensor.get_data().reshape(output_shape[1:]) + +def maxpool2d_layer(x, pool_size=(2, 2), strides=(2, 2)): + input_tensor = ng.constant(x, dtype=np.float32) + pool_size_tensor = ng.constant((1, pool_size[0], pool_size[1], 1), dtype=np.int32) + strides_tensor = ng.constant((1, strides[0], strides[1], 1), dtype=np.int32) + + input_shape = input_tensor.get_shape() + + input_tensor_reshaped = ng.reshape(input_tensor, (1, input_shape[0], input_shape[1], input_shape[2])) + + output_shape = (input_shape[0], (input_shape[1] - pool_size[0]) // strides[0] + 1, (input_shape[2] - pool_size[1]) // strides[1] + 1, input_shape[3]) + output_tensor = ng.tensor(output_shape, dtype=np.float32) + + pool_op = dnnl.max_pooling_forward(input_tensor_reshaped, pool_size_tensor, strides_tensor, output_tensor) + pool_op.execute(dnnl.stream(), { + input_tensor: input_tensor_reshaped, + output_tensor: output_tensor + }) + + return output_tensor.get_data().reshape(output_shape[1:]) + +def down_block(x, filters, kernel_size=(3, 3), padding="same", strides=1): + c = conv2d_layer(x, filters, kernel_size, padding, strides) + c = conv2d_layer(c, filters, kernel_size, padding, strides) + p = maxpool2d_layer(c, pool_size=(2, 2), strides=(2, 2)) + return c, p + +def up_block(x, skip, filters, kernel_size=(3, 3), padding="same", strides=1): + us = UpSampling2D()(x) + concat = Concatenate()([us, skip]) + c = conv2d_layer(concat, filters, kernel_size, padding, strides) + c = conv2d_layer(c, filters, kernel_size, padding, strides) + return c + +def build_unet(input_shape, num_classes): + inputs = Input(input_shape) + + c1, p1 = down_block(inputs, 16, kernel_size=(3, 3), padding="same") + c2, p2 = down_block(p1, 32, kernel_size=(3, 3), padding="same") + c3, p3 = down_block(p2, 64, kernel_size=(3, 3), padding="same") + c4, p4 = down_block(p3, 128, kernel_size=(3, 3), padding="same") + + c5 = conv2d_layer(p4, 256, kernel_size=(3, 3), padding="same", strides=1) + + u6 = up_block(c5, c4, 128, kernel_size=(3, 3), padding="same", strides=1) + u7 = up_block(u6, c3, 64, kernel_size=(3, 3), padding="same", strides=1) + u8 = up_block(u7, c2, 32, kernel_size=(3, 3), padding="same", strides=1) + u9 = up_block(u8, c1, 16, kernel_size=(3, 3), padding="same", strides=1) + + outputs = Conv2D(num_classes, (1, 1), padding="same", activation=Softmax(axis=-1))(u9) + model = Model(inputs, outputs) + return model + +input_shape = (HEIGHT, WIDTH, 3) +model = build_unet(input_shape, N_CLASSES) +model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss='categorical_crossentropy', metrics=['accuracy']) + +checkpoint = ModelCheckpoint("model_weights.h5", save_weights_only=True, save_best_only=True, verbose=1) +model.fit(train_gen, validation_data=val_gen, steps_per_epoch=num_of_training_samples//BATCH_SIZE, validation_steps=num_of_valid_samples//BATCH_SIZE, epochs=EPOCHS, callbacks=[checkpoint]) From 8573aa760839e49c53528d7394ca11f5675e1942 Mon Sep 17 00:00:00 2001 From: Korat-Dishant <86142546+Korat-Dishant@users.noreply.github.com> Date: Fri, 9 Jun 2023 23:07:02 +0530 Subject: [PATCH 4/8] Update README.md updated --- README.md | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 8ee33cec5..d92e99fbf 100644 --- a/README.md +++ b/README.md @@ -12,11 +12,12 @@ In the domain of autonomous cars, object detection for self-driving vehicles is By utilizing Intel OneAPI, the main objective of this project is to develop a reliable, accurate, and effective object detection system that will support the development of the next generation of autonomous vehicles. -to achieve this goal we are using state of the art mask-RCNN model. which is quite good model for instance segmentation. we are trying to train this model on a little part of city scape dataset. once we train the model we will try to optimize it using oneAPI. we will record the data for both of the scenarios and onece we are done we will make this data available on this repository. +to achieve this goal we are using state of the art U-net. which is quite good model for instance segmentation. we are trying to train this model on a little part of city scape dataset. once we train the model we will try to optimize it using oneAPI. we will record the data for both of the scenarios and onece we are done we will make this data available on this repository. -architecture of mask-RCNN +architecture of U-net + +![unet_architecture](https://github.com/Korat-Dishant/intel-oneAPI/assets/86142546/3f62646f-c25e-42da-b16a-810046fb654e) -![Screenshot 2023-06-04 231513](https://github.com/Korat-Dishant/intel-oneAPI/assets/86142546/49d9e6e1-0b4c-4c06-be1f-5498fd6c13e7) real time detection can also be perfrmed like shown in this diagram ![20230507_200057](https://github.com/Korat-Dishant/intel-oneAPI/assets/86142546/9f587a4f-cc47-4ce8-aebd-ed9265747b3f) @@ -24,11 +25,17 @@ real time detection can also be perfrmed like shown in this diagram ## Tech Stack: - List Down all technologies used to Build the prototype **Clearly mentioning IntelĀ® AI Analytics Toolkits, it's libraries and the SYCL/DCP++ Libraries used** - +INTEL oneapi AI analytics toolkit, +OneDNN, +python3, +opencv, +pillow, +keras, +intel-tensorflow, +intel devcloud, ## Step-by-Step Code Execution Instructions: This Section must contain set of instructions required to clone and run the prototype, so that it can be tested and deeply analysed ## What I Learned: - Write about the biggest learning you had while developing the prototype +I learnt how to work with Intel oneapi and devcloud. Intel oneapi has also given me a chance to explore various different libraries and toolkits that had helped us to optimize the performance of our model. From 8df3a3559b70eae7d56cfce35fb578ecbeb5d1c2 Mon Sep 17 00:00:00 2001 From: Korat-Dishant <86142546+Korat-Dishant@users.noreply.github.com> Date: Fri, 9 Jun 2023 23:11:19 +0530 Subject: [PATCH 5/8] Update README.md updated --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d92e99fbf..8ca254b13 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,8 @@ intel-tensorflow, intel devcloud, ## Step-by-Step Code Execution Instructions: - This Section must contain set of instructions required to clone and run the prototype, so that it can be tested and deeply analysed - +step-1 : clone this repository +step-2 : run train.py + ## What I Learned: I learnt how to work with Intel oneapi and devcloud. Intel oneapi has also given me a chance to explore various different libraries and toolkits that had helped us to optimize the performance of our model. From 0b1ebd9993bed8046405a2ca10cee9fb988db350 Mon Sep 17 00:00:00 2001 From: Korat-Dishant <86142546+Korat-Dishant@users.noreply.github.com> Date: Fri, 9 Jun 2023 23:12:57 +0530 Subject: [PATCH 6/8] Rename onednn.txt to train.py name changes --- onednn.txt => train.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename onednn.txt => train.py (100%) diff --git a/onednn.txt b/train.py similarity index 100% rename from onednn.txt rename to train.py From d9e472e92db8731fcfa0e730776f5e886999ba2e Mon Sep 17 00:00:00 2001 From: Korat-Dishant <86142546+Korat-Dishant@users.noreply.github.com> Date: Fri, 9 Jun 2023 23:39:09 +0530 Subject: [PATCH 7/8] Update README.md drive --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 8ca254b13..830577d4a 100644 --- a/README.md +++ b/README.md @@ -38,5 +38,10 @@ intel devcloud, step-1 : clone this repository step-2 : run train.py +for more information please visit our [medium page ... ](https://medium.com/@sidd6758harth/optimizing-object-detection-models-for-autonomous-vehicle-using-intel-oneapi-6dc674e40c71) + +## output video +[drive linkk](https://drive.google.com/file/d/17MbKJJEGVEm68UgP8DH8fiSOFjdJtLLe/view?usp=sharing) + ## What I Learned: I learnt how to work with Intel oneapi and devcloud. Intel oneapi has also given me a chance to explore various different libraries and toolkits that had helped us to optimize the performance of our model. From 41167b6c5d03b27a2881959ba5072324c9aa5a45 Mon Sep 17 00:00:00 2001 From: Korat-Dishant <86142546+Korat-Dishant@users.noreply.github.com> Date: Tue, 20 Jun 2023 13:40:41 +0530 Subject: [PATCH 8/8] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 830577d4a..4208b7a1c 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,8 @@ step-2 : run train.py for more information please visit our [medium page ... ](https://medium.com/@sidd6758harth/optimizing-object-detection-models-for-autonomous-vehicle-using-intel-oneapi-6dc674e40c71) ## output video -[drive linkk](https://drive.google.com/file/d/17MbKJJEGVEm68UgP8DH8fiSOFjdJtLLe/view?usp=sharing) +[drive linkk](https://drive.google.com/file/d/1uvay32qpHSnBQr3oANSAu9PCXqtFR5HY/view?usp=drive_link) + ## What I Learned: I learnt how to work with Intel oneapi and devcloud. Intel oneapi has also given me a chance to explore various different libraries and toolkits that had helped us to optimize the performance of our model.