diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..d6e795b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+
+Pipfile.lock
+
+yolov8x.pt
+
+.vscode/
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..bf26d6d
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,24 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(road_segmentation)
+
+find_package(
+ catkin REQUIRED
+ COMPONENTS rospy
+ std_msgs
+ sensor_msgs
+ cv_bridge
+ jsk_recognition_msgs
+)
+
+catkin_python_setup()
+
+catkin_package(
+ CATKIN_DEPENDS
+ rospy
+ std_msgs
+ sensor_msgs
+ cv_bridge
+ jsk_recognition_msgs
+)
+
+include_directories(${catkin_INCLUDE_DIRS})
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Pipfile b/Pipfile
new file mode 100644
index 0000000..e473071
--- /dev/null
+++ b/Pipfile
@@ -0,0 +1,30 @@
+[[source]]
+name = "pypi"
+url = "https://pypi.org/simple"
+verify_ssl = true
+
+[[source]]
+name = "pytorch_cuda"
+url = "https://download.pytorch.org/whl/cu118"
+verify_ssl = false
+
+[dev-packages]
+
+[packages]
+networkx = "==2.8.8"
+torch = {index = "pytorch_cuda",version = "==2.0.0+cu118"}
+torchvision = {index = "pytorch_cuda",version = "==0.15.1+cu118"}
+tensorflow = "==2.4.0"
+opencv-python = "*"
+numpy = "*"
+pandas = "*"
+matplotlib = "*"
+keras = "*"
+requests = "*"
+pyyaml = "*"
+rospkg = "*"
+ipython = "*"
+scikit-learn = "*"
+
+[requires]
+python_version = "3.8"
diff --git a/README.md b/README.md
index 89109d6..8a900ea 100644
--- a/README.md
+++ b/README.md
@@ -11,7 +11,21 @@
* ### Development environment
- ubuntu 18.04, tensorflow 2.0.0, opencv-python 4.2.0.32, numpy 1.18.2
+ ubuntu 20.04, tensorflow 2.4.0, opencv-python 4.2.0.32, numpy 1.18.2
+
+* ### Setup
+- Install cuda 11.8
+- Install cuDNN 8.9.7 for cuda 11.8
+- add below in ~/.bashrc
+```bash
+## CUDA and cuDNN paths
+export PATH=/usr/local/cuda-11.8/bin:${PATH}
+export LD_LIBRARY_PATH=/usr/local/cuda-11.8/lib64:${LD_LIBRARY_PATH}
+export LD_LIBRARY_PATH=/usr/local/cuda-11.8/targets/x86_64-linux/lib:${LD_LIBRARY_PATH}
+```
+- run `sudo ln -s /usr/local/cuda-11.8/targets/x86_64-linux/lib/libcusolver.so.11 /usr/local/cuda-11.8/targets/x86_64-linux/lib/libcusolver.so.10`
+ - Reference:[WSL2 + Ubuntu20.04 + CUDA 11.4 で TensorFlow 環境構築](https://zenn.dev/ylabo0717/articles/48796b7f3470c7)
+- run `pipenv install` in this package
* ### model
diff --git a/config/class.yaml b/config/class.yaml
new file mode 100644
index 0000000..960ae85
--- /dev/null
+++ b/config/class.yaml
@@ -0,0 +1,8 @@
+class:
+ Background: 0
+ Bike_lane: 1
+ Caution_zone: 2
+ Crosswalk: 3
+ braille_guide_blocks: 4
+ Roadway: 5
+ Sidewalk: 6
diff --git a/data_loader/split_train_test.py b/data_loader/split_train_test.py
deleted file mode 100644
index 0915a37..0000000
--- a/data_loader/split_train_test.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from data_loader import train_images, mask_images
-import shutil
-
-train_images = train_images()
-mask_images = mask_images()
-
-train_num = int(len(train_images)*0.75)
-
-test_img = train_images[train_num:]
-test_label = mask_images[train_num:]
-
-test_img_path = ['../dataset/test_img/' +test_img[i].split("/")[-1] for i in range(len(test_img))]
-test_label_path = ['../dataset/test_label/' +test_label[i].split("/")[-1] for i in range(len(test_label))]
-
-for i in range(len(test_img)):
- shutil.move(test_img[i], test_img_path[i])
-
-for i in range(len(test_label)):
- shutil.move(test_label[i], test_label_path[i])
\ No newline at end of file
diff --git a/demo.py b/demo.py
deleted file mode 100644
index 98146c7..0000000
--- a/demo.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import cv2
-import time
-import tensorflow as tf
-from model.pspunet import pspunet
-from data_loader.display import create_mask
-import numpy as np
-gpus = tf.config.experimental.list_physical_devices('GPU')
-
-if gpus:
- try:
- tf.config.experimental.set_virtual_device_configuration(
- gpus[0],
- [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=500)])
- except RuntimeError as e:
- print(e)
-
-cap= cv2.VideoCapture(YOUR_VIDEO_PATH)
-
-IMG_WIDTH = 480
-IMG_HEIGHT = 272
-n_classes = 7
-
-model = pspunet((IMG_HEIGHT, IMG_WIDTH ,3), n_classes)
-model.load_weights("pspunet_weight.h5")
-
-while True:
- start= time.time()
- try:
- _,frame = cap.read()
- frame = cv2.resize(frame, (IMG_WIDTH, IMG_HEIGHT))
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- frame = frame[tf.newaxis, ...]
- frame = frame/255
- except:
- cv2.destroyAllWindows()
- cap.release()
- break
-
- pre = model.predict(frame)
- pre = create_mask(pre).numpy()
-
- frame2 = frame/2
- frame2[0][(pre==1).all(axis=2)] += [0, 0, 0] #""bike_lane_normal", "sidewalk_asphalt", "sidewalk_urethane""
- frame2[0][(pre==2).all(axis=2)] += [0.5, 0.5,0] # "caution_zone_stairs", "caution_zone_manhole", "caution_zone_tree_zone", "caution_zone_grating", "caution_zone_repair_zone"]
- frame2[0][(pre==3).all(axis=2)] += [0.2, 0.7, 0.5] #"alley_crosswalk","roadway_crosswalk"
- frame2[0][(pre==4).all(axis=2)] += [0, 0.5, 0.5] #"braille_guide_blocks_normal", "braille_guide_blocks_damaged"
- frame2[0][(pre==5).all(axis=2)] += [0, 0, 0.5] #"roadway_normal","alley_normal","alley_speed_bump", "alley_damaged""
- frame2[0][(pre==6).all(axis=2)] += [0.5, 0, 0] #"sidewalk_blocks","sidewalk_cement" , "sidewalk_soil_stone", "sidewalk_damaged","sidewalk_other"
- video = np.uint8(frame2)
-
- print(1/(time.time()-start))
- cv2.waitKey(1)
-
diff --git a/launch/road_segmentation.launch b/launch/road_segmentation.launch
new file mode 100644
index 0000000..36d6370
--- /dev/null
+++ b/launch/road_segmentation.launch
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/launch/usb_camera.launch b/launch/usb_camera.launch
new file mode 100644
index 0000000..638acdc
--- /dev/null
+++ b/launch/usb_camera.launch
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/pspunet_weight.h5 b/models/pspunet_weight.h5
similarity index 100%
rename from pspunet_weight.h5
rename to models/pspunet_weight.h5
diff --git a/icnet_train.ipynb b/notebooks/icnet_train.ipynb
similarity index 100%
rename from icnet_train.ipynb
rename to notebooks/icnet_train.ipynb
diff --git a/train.ipynb b/notebooks/train.ipynb
similarity index 100%
rename from train.ipynb
rename to notebooks/train.ipynb
diff --git a/package.xml b/package.xml
new file mode 100644
index 0000000..5d20dd6
--- /dev/null
+++ b/package.xml
@@ -0,0 +1,23 @@
+
+
+ road_segmentation
+ 0.0.0
+ The road_segmentation package
+ SoftBank corp.
+ Apache 2.0
+ catkin
+
+ rospy
+ std_msgs
+ sensor_msgs
+ cv_bridge
+ jsk_recognition_msgs
+
+ rospy
+ std_msgs
+ sensor_msgs
+ cv_bridge
+ jsk_recognition_msgs
+ usb_cam
+
+
diff --git a/scripts/road_segmentation_node.py b/scripts/road_segmentation_node.py
new file mode 100755
index 0000000..45f7db8
--- /dev/null
+++ b/scripts/road_segmentation_node.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
+
+import os
+
+import cv2
+import numpy as np
+import rospy
+import tensorflow as tensorflow
+from cv_bridge import CvBridge
+from cv_bridge import CvBridgeError
+from sensor_msgs.msg import CameraInfo
+from sensor_msgs.msg import Image
+
+from road_segmentation.data_loader.display import create_mask
+
+bridge = CvBridge()
+
+
+class RoadSegmentationNode:
+ def __init__(self):
+ rospy.init_node('road_segmentation_node')
+
+ # GPU configuration
+ self.gpus = tensorflow.config.experimental.list_physical_devices('GPU')
+ if self.gpus:
+ try:
+ tensorflow.config.experimental.set_virtual_device_configuration(
+ self.gpus[0],
+ [tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit=500)]
+ )
+ except RuntimeError as e:
+ rospy.logerr("Error configuring GPU: {}".format(e))
+
+ # Set default model path to pspunet_weight.h5 in the models directory
+ default_model_path = os.path.join(
+ rospy.get_param('~model_dir', os.path.dirname(__file__) + '/../models'),
+ 'pspunet_weight.h5'
+ )
+ self.camera_topic = rospy.get_param('~camera_topic', '/camera/image_raw')
+ self.camera_info_topic = rospy.get_param('~camera_info_topic', '/camera/camera_info')
+ self.model_path = rospy.get_param('~model_path', default_model_path)
+ self.debug = rospy.get_param('~debug', True)
+
+ # Load class names and labels from ROS parameter
+ class_data = rospy.get_param('class')
+ self.class_names = list(class_data.keys())
+ self.num_classes = len(self.class_names)
+
+ # Load the segmentation model
+ self.model = tensorflow.keras.models.load_model(self.model_path)
+
+ # Get input image size from model
+ self.input_image_size = self.model.input_shape[1:3] # (height, width)
+
+ # Subscribe to the camera and camera_info topics
+ self.image_sub = rospy.Subscriber(self.camera_topic, Image, self.image_callback)
+ self.camera_info_sub = rospy.Subscriber(self.camera_info_topic, CameraInfo, self.camera_info_callback)
+
+ # Publisher for adjusted camera info and the segmentation result for each class
+ self.camera_info_pub = rospy.Publisher('~result/camera_info', CameraInfo, queue_size=1)
+ # Publisher for the segmentation result for each class
+ self.result_pubs = []
+ for class_name in self.class_names:
+ pub = rospy.Publisher(f'~result/{class_name}_mask', Image, queue_size=1)
+ self.result_pubs.append(pub)
+
+ # Publisher for debug image (only if debug mode is enabled)
+ if self.debug:
+ self.debug_image_pub = rospy.Publisher('~debug_image', Image, queue_size=1)
+
+ self.current_camera_info = None
+ rospy.loginfo("Road Segmentation Node Initialized")
+
+ def camera_info_callback(self, msg):
+ self.current_camera_info = msg
+
+ def perform_segmentation(self, image):
+ # Resize the input image to the size expected by the model
+ input_image = cv2.resize(image, (self.input_image_size[1], self.input_image_size[0])) # (width, height)
+ input_image = np.expand_dims(input_image, axis=0)
+ input_image = input_image / 255.0 # Normalize input
+
+ # Get the segmentation result (class map)
+ result = self.model.predict(input_image)
+
+ # Convert the result to a mask image using create_mask
+ result_mask = create_mask(result).numpy()
+
+ return result_mask
+
+ def create_debug_image(self, image, result_mask):
+ # Apply colormap to visualize the mask
+ result_mask_colored = cv2.applyColorMap((result_mask * 36).astype(np.uint8), cv2.COLORMAP_JET)
+
+ # Overlay the segmentation result on the original image with transparency
+ alpha = 0.6
+ debug_image = cv2.addWeighted(image, alpha, result_mask_colored, 1 - alpha, 0)
+
+ return debug_image
+
+ def publish_debug_image(self, debug_image):
+ try:
+ # Convert the CV2 image back to a ROS Image message
+ debug_image_msg = bridge.cv2_to_imgmsg(debug_image, encoding='bgr8')
+ self.debug_image_pub.publish(debug_image_msg)
+ except CvBridgeError as e:
+ rospy.logerr("CvBridge Error: {0}".format(e))
+
+ def adjust_camera_info(self, original_info, target_width, target_height):
+ scale_x = target_width / float(original_info.width)
+ scale_y = target_height / float(original_info.height)
+
+ adjusted_info = CameraInfo()
+ adjusted_info.header = original_info.header
+ adjusted_info.width = target_width
+ adjusted_info.height = target_height
+ adjusted_info.K = [scale_x * original_info.K[0], 0, scale_x * original_info.K[2],
+ 0, scale_y * original_info.K[4], scale_y * original_info.K[5],
+ 0, 0, 1]
+ adjusted_info.P = [scale_x * original_info.P[0], 0, scale_x * original_info.P[2], 0,
+ 0, scale_y * original_info.P[5], scale_y * original_info.P[6], 0,
+ 0, 0, 1, 0]
+ adjusted_info.D = original_info.D
+ adjusted_info.R = original_info.R
+
+ return adjusted_info
+
+ def image_callback(self, msg):
+ try:
+ # Convert the ROS Image message to a CV2 image
+ cv_image = bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
+ except CvBridgeError as e:
+ rospy.logerr("CvBridge Error: {0}".format(e))
+ return
+
+ # Perform segmentation on the received image
+ result_mask = self.perform_segmentation(cv_image)
+
+ # Resize the entire mask to match the original image size
+ result_mask_resized = cv2.resize(
+ result_mask,
+ (cv_image.shape[1],
+ cv_image.shape[0]),
+ interpolation=cv2.INTER_NEAREST)
+
+ # Adjust the camera info for the resized mask
+ if self.current_camera_info is not None:
+ adjusted_camera_info = self.adjust_camera_info(
+ self.current_camera_info, cv_image.shape[1], cv_image.shape[0])
+ self.camera_info_pub.publish(adjusted_camera_info)
+
+ # Publish the mask for each class
+ for i in range(self.num_classes):
+ class_mask = (result_mask_resized == i).astype(np.uint8) * 255
+ self.publish_class_mask(class_mask, self.result_pubs[i])
+
+ # If debug mode is enabled, publish the debug image
+ if self.debug:
+ debug_image = self.create_debug_image(cv_image, result_mask_resized)
+ self.publish_debug_image(debug_image)
+
+ def publish_class_mask(self, class_mask, pub):
+ try:
+ # Convert the CV2 image back to a ROS Image message
+ mask_msg = bridge.cv2_to_imgmsg(class_mask, encoding='mono8')
+ pub.publish(mask_msg)
+ except CvBridgeError as e:
+ rospy.logerr("CvBridge Error: {0}".format(e))
+
+
+if __name__ == '__main__':
+ try:
+ node = RoadSegmentationNode()
+ rospy.spin()
+ except rospy.ROSInterruptException:
+ pass
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..c40d52c
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,10 @@
+from distutils.core import setup
+from catkin_pkg.python_setup import generate_distutils_setup
+
+d = generate_distutils_setup(
+ packages=['road_segmentation'],
+ package_dir={'': 'src'}
+)
+
+setup(**d)
+
diff --git a/src/road_segmentation/__init__.py b/src/road_segmentation/__init__.py
new file mode 100644
index 0000000..4b2bbf5
--- /dev/null
+++ b/src/road_segmentation/__init__.py
@@ -0,0 +1,2 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
diff --git a/src/road_segmentation/__pycache__/model_loader.cpython-38.pyc b/src/road_segmentation/__pycache__/model_loader.cpython-38.pyc
new file mode 100644
index 0000000..dde2bf1
Binary files /dev/null and b/src/road_segmentation/__pycache__/model_loader.cpython-38.pyc differ
diff --git a/data_loader/data_loader.py b/src/road_segmentation/data_loader/data_loader.py
similarity index 98%
rename from data_loader/data_loader.py
rename to src/road_segmentation/data_loader/data_loader.py
index 250b475..9590291 100644
--- a/data_loader/data_loader.py
+++ b/src/road_segmentation/data_loader/data_loader.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import os
import cv2
import numpy as np
diff --git a/data_loader/display.py b/src/road_segmentation/data_loader/display.py
similarity index 75%
rename from data_loader/display.py
rename to src/road_segmentation/data_loader/display.py
index c50989c..fd0493d 100644
--- a/data_loader/display.py
+++ b/src/road_segmentation/data_loader/display.py
@@ -1,27 +1,32 @@
-from IPython.display import clear_output
-from data_loader import *
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
import tensorflow as tf
+from IPython.display import clear_output
+
+from .data_loader import *
def display(display_list):
plt.figure(figsize=(7, 7))
for i in range(3):
- plt.subplot(3, 3, i*3+1)
- plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i][0]/255))
+ plt.subplot(3, 3, i * 3 + 1)
+ plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i][0] / 255))
plt.axis('off')
- plt.subplot(3, 3, i*3+2)
- plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i][1]/255))
+ plt.subplot(3, 3, i * 3 + 2)
+ plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i][1] / 255))
plt.axis('off')
- plt.subplot(3, 3, i*3+3)
- plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i][2]/255))
+ plt.subplot(3, 3, i * 3 + 3)
+ plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i][2] / 255))
plt.axis('off')
plt.show()
+
def create_mask(pred_mask):
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
- return pred_mask[0]
+ return pred_mask[0]
+
def show_predictions(image, label, model):
@@ -29,7 +34,6 @@ def show_predictions(image, label, model):
pred_mask = [model.predict(image[tf.newaxis, ...]) for image in image]
display_list = [[image[i], label[i], create_mask(pred_mask[i])] for i in range(3)]
display(display_list)
- else :
+ else:
display_list = [[image[i], label[i], image[i]] for i in range(3)]
display(display_list)
-
\ No newline at end of file
diff --git a/src/road_segmentation/data_loader/split_train_test.py b/src/road_segmentation/data_loader/split_train_test.py
new file mode 100644
index 0000000..dd3eb34
--- /dev/null
+++ b/src/road_segmentation/data_loader/split_train_test.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
+import shutil
+
+from .data_loader import mask_images
+from .data_loader import train_images
+
+train_images = train_images()
+mask_images = mask_images()
+
+train_num = int(len(train_images) * 0.75)
+
+test_img = train_images[train_num:]
+test_label = mask_images[train_num:]
+
+test_img_path = ['../dataset/test_img/' + test_img[i].split("/")[-1] for i in range(len(test_img))]
+test_label_path = ['../dataset/test_label/' + test_label[i].split("/")[-1] for i in range(len(test_label))]
+
+for i in range(len(test_img)):
+ shutil.move(test_img[i], test_img_path[i])
+
+for i in range(len(test_label)):
+ shutil.move(test_label[i], test_label_path[i])
diff --git a/model/Deeplab_v3.py b/src/road_segmentation/model/Deeplab_v3.py
similarity index 99%
rename from model/Deeplab_v3.py
rename to src/road_segmentation/model/Deeplab_v3.py
index f687e88..98e9f1b 100644
--- a/model/Deeplab_v3.py
+++ b/src/road_segmentation/model/Deeplab_v3.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import os
import math
import keras
diff --git a/model/fcn.py b/src/road_segmentation/model/fcn.py
similarity index 98%
rename from model/fcn.py
rename to src/road_segmentation/model/fcn.py
index bb0d9c5..a1be181 100644
--- a/model/fcn.py
+++ b/src/road_segmentation/model/fcn.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.applications.vgg16 import VGG16
diff --git a/model/icnet.py b/src/road_segmentation/model/icnet.py
similarity index 99%
rename from model/icnet.py
rename to src/road_segmentation/model/icnet.py
index 75cd1de..4fc767a 100644
--- a/model/icnet.py
+++ b/src/road_segmentation/model/icnet.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
diff --git a/model/pspnet.py b/src/road_segmentation/model/pspnet.py
similarity index 98%
rename from model/pspnet.py
rename to src/road_segmentation/model/pspnet.py
index 4334d16..663ff87 100644
--- a/model/pspnet.py
+++ b/src/road_segmentation/model/pspnet.py
@@ -1,4 +1,5 @@
-
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
diff --git a/model/pspunet.py b/src/road_segmentation/model/pspunet.py
similarity index 99%
rename from model/pspunet.py
rename to src/road_segmentation/model/pspunet.py
index 5401b98..9ca6879 100644
--- a/model/pspunet.py
+++ b/src/road_segmentation/model/pspunet.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import numpy as np
from tensorflow.keras import Sequential
import tensorflow as tf
diff --git a/model/pspunet_vgg16.py b/src/road_segmentation/model/pspunet_vgg16.py
similarity index 98%
rename from model/pspunet_vgg16.py
rename to src/road_segmentation/model/pspunet_vgg16.py
index 62d8b9b..dc8af2a 100644
--- a/model/pspunet_vgg16.py
+++ b/src/road_segmentation/model/pspunet_vgg16.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import numpy as np
from tensorflow.keras import Sequential
import tensorflow as tf
diff --git a/model/unet.py b/src/road_segmentation/model/unet.py
similarity index 98%
rename from model/unet.py
rename to src/road_segmentation/model/unet.py
index db1403b..f01bf90 100644
--- a/model/unet.py
+++ b/src/road_segmentation/model/unet.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import numpy as np
from tensorflow.keras import Sequential
import tensorflow as tf
diff --git a/model/unet_crop_copy.py b/src/road_segmentation/model/unet_crop_copy.py
similarity index 98%
rename from model/unet_crop_copy.py
rename to src/road_segmentation/model/unet_crop_copy.py
index 8526848..94acc91 100644
--- a/model/unet_crop_copy.py
+++ b/src/road_segmentation/model/unet_crop_copy.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env pipenv-shebang
+# -*- coding:utf-8 -*-
import numpy as np
from tensorflow.keras import Sequential
import tensorflow as tf