forked from djnugent/lanetracker
-
Notifications
You must be signed in to change notification settings - Fork 5
/
lanedetect_edges.py
112 lines (85 loc) · 3.25 KB
/
lanedetect_edges.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import time
import cv2
import numpy as np
import math
from helpers import *
class LaneDetector():
def __init__(self,source):
self.source = source
#perspective mapping characteristics
self.size = 640,640
self.src_corners = np.float32([[0,719],[1279,719],[400,0],[880,0]])
self.dst_corners = np.float32([[300,self.size[1]],[self.size[0]-300,self.size[1]],[0,0],[self.size[0],0]])
#distortion characteristics
self.matrix = np.array([[764.36600634, 0.0, 663.58169499],
[0.0, 764.86442335, 363.45071788],
[0.0, 0.0, 1.0]])
self.distortion = np.array([-0.29435659, 0.14030301, 0.0, 0.0, 0.0])
def main(self):
#open camera/video
vid = cv2.VideoCapture(self.source)
if vid is None:
print "unable to open video"
return
while True:
ret, frame = vid.read()
cv2.imshow("original",frame)
if frame is None:
print "failed to grab frame"
continue
processed = self.analyze_frame(frame)
cv2.waitKey(1)
#extract lane data from frame
def analyze_frame(self, frame):
#grayscale
if(len(frame.shape)>2):
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#remove distortion
#frame = cv2.undistort(frame,self.matrix,self.distortion)
#crop
frame = frame[180:720]
#remap perspective
draw = transform(frame, self.src_corners, self.dst_corners, self.size)
#blur the image
frame = cv2.GaussianBlur(frame,(3,3),0)
#adaptive canny edge detector
avg, null, null, null = cv2.mean(frame)
avg = int(avg)
edges = cv2.Canny(frame,avg/2,avg)
edges = transform(edges, self.src_corners, self.dst_corners, self.size)
#clean up images
poly = np.array([[0,300],[0,639],[639,639],[639,300],[340,635],[300,635]])
cv2.fillPoly(edges, [poly],0)
'''
kernel = np.array([[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0]])
#edges = cv2.dilate(edges,kernel)
ret, edges = cv2.threshold(edges,30,255,cv2.THRESH_BINARY)
'''
cv2.imshow("edges", edges)
'''
#findContours
im2, contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
filled = np.zeros(edges.shape,np.uint8)
cv2.drawContours(filled, contours, -1, 255, -1)
#filled = transform(filled, self.src_corners, self.dst_corners, self.size)
cv2.imshow("filled",filled)
'''
'''
#thining
skel = thin(edges)
cv2.imshow("thin",skel)
'''
left_line, center_line, right_line = find_lane(edges)
for pnt in left_line:
cv2.circle(draw,pnt,5,(255,0,0),-1)
for pnt in center_line:
cv2.circle(draw,pnt,5,(255,255,0),-1)
for pnt in right_line:
cv2.circle(draw,pnt,5,(0,0,255),-1)
cv2.imshow("lane",draw)
return frame
if __name__ == '__main__':
detector = LaneDetector("test.mp4")
detector.main()