@@ -17,53 +17,57 @@ def __init__(self):
17
17
def video_capture (self ):
18
18
cap = cv2 .VideoCapture (0 )
19
19
while (True ):
20
- a = 'a'
20
+ found = False
21
+ a = raw_input ('Ready to find a sudokuboard?' )
21
22
22
23
# Capture frame-by-frame
23
24
ret , orig_img = cap .read ()
24
25
self .img_list .append (orig_img )
25
26
26
27
''' Finding board, return contour and coordinates to box'''
27
- processed_img = self .canny_edge_detector (deepcopy (orig_img ))
28
- box_points , contour_img , processed_img = self .find_sudoku_board (orig_img , processed_img )
29
- board_img = self .crop_image (orig_img , box_points )
30
- gray_board_img = cv2 .cvtColor (board_img , cv2 .COLOR_RGB2GRAY )
31
- board_processed_img = self .crop_image (processed_img , box_points )
32
- #self.img_list.append(board_processed_img)
28
+ #processed_img = self.canny_edge_detector(deepcopy(orig_img))
29
+ processed_img = self .preprocess_for_grid_detection (deepcopy (orig_img ))
30
+ box_points , board_img = self .find_sudoku_board (orig_img , processed_img )
31
+ board_processed_img = self .preprocess_for_grid_detection (deepcopy (board_img ))
33
32
self .img_list .append (processed_img )
33
+ self .img_list .append (board_img )
34
34
35
35
''' We have a board_img '''
36
- # if len(board_img > 0):
37
- #
38
- # ''' Computing hough lines in board '''
39
- # # Find HoughLines, merges and return #
40
- # merged_lines = self.hough_lines(deepcopy(board_img), board_processed_img)
41
- # # if len(merged_lines) > 0:
42
- # # print(merged_lines)
43
- # # print('lines ->', len(merged_lines))
44
- # self.visualize_grid_lines(board_img, merged_lines)
45
- # if len(merged_lines) == 20:
46
- # print('Correct grid detected!')
47
- # #self.visualize_grid(board_img, merged_lines)
48
- # # Extract grid coordinates #
49
- # grid_points = self.extract_grid(board_img, merged_lines)
50
- # # # Maps the grid points to cells #
51
- # mapped_grid = self.map_grid(board_img, grid_points)
52
- # # ''' We have a confirmed grid '''
53
- # if mapped_grid is not None:
54
- # #print('map_grid ->', len(mapped_grid))
55
- # prefilled = self.classify_cells(gray_board_img, mapped_grid)
56
- # sudoku_to_solve = self.create_array_with_prefilled(prefilled)
57
- # print(sudoku_to_solve)
58
- # # print(sudoku_to_solve)
59
- # # self.solve_sudoku_board(sudoku_to_solve)
60
- # a = raw_input('.')
36
+ if len (board_img > 0 ):
37
+
38
+ ''' Computing hough lines in board '''
39
+ # Find HoughLines, merges and return #
40
+ merged_lines = self .hough_lines (deepcopy (board_img ), board_processed_img )
41
+ self .visualize_grid_lines (board_img , merged_lines )
42
+
43
+ if len (merged_lines ) == 20 :
44
+ print ('Correct grid detected!' )
45
+ #self.visualize_grid(board_img, merged_lines)
46
+ # Extract grid coordinates #
47
+ grid_points = self .extract_grid (board_img , merged_lines )
48
+ # # Maps the grid points to cells #
49
+ mapped_grid = self .map_grid (board_img , grid_points )
50
+ # ''' We have a confirmed grid '''
51
+ if mapped_grid is not None :
52
+ #print('map_grid ->', len(mapped_grid))
53
+
54
+ prefilled = self .classify_cells_processed (board_processed_img , mapped_grid )
55
+
56
+ # gray_board_img = cv2.cvtColor(board_img, cv2.COLOR_RGB2GRAY)
57
+ # prefilled = self.classify_cells(gray_board_img, mapped_grid)
58
+
59
+ sudoku_to_solve = self .create_array_with_prefilled (prefilled )
60
+ print (sudoku_to_solve )
61
+ #self.solve_sudoku_board(sudoku_to_solve)
62
+ found = True
61
63
62
64
''' --- Show --- '''
63
65
self .display_images ()
64
66
self .img_list = [] # Need to clear image_list before next run
65
67
if cv2 .waitKey (1 ) & 0xFF == ord ('q' ) or a == 'q' :
66
68
self .quit_program (cap )
69
+ if found :
70
+ a = raw_input ('Sudokuboard found!' )
67
71
68
72
69
73
def solve_sudoku_board (self , board_to_solve ):
@@ -79,18 +83,40 @@ def create_array_with_prefilled(self, prefilled):
79
83
sudoku_to_solve [prefilled [1 ][i ]] = prefilled [0 ][i ]
80
84
return np .reshape (sudoku_to_solve , (9 ,9 )).astype (int )
81
85
86
+
87
+ def classify_cells_processed (self , board_img , mapped_grid ):
88
+ cells = np .asarray (self .crop_grid (board_img , mapped_grid ))
89
+ cl_cells = []
90
+ idx_list = []
91
+ for idx , c in enumerate (cells ):
92
+ c [:4 ,:] = 0.0
93
+ c [:,:4 ] = 0.0
94
+ c [24 :,:] = 0.0
95
+ c [:,24 :] = 0.0
96
+ if np .sum (c )/ 784 > 8 :
97
+ cl_cells .append (c )
98
+ idx_list .append (idx )
99
+ self .img_list .append (c )
100
+ print ('to classify: ' , len (cl_cells ))
101
+ pred = np .argmax (self .nc .classify_images (np .asarray (cl_cells )), axis = 1 )
102
+ prefilled = []
103
+ prefilled .append (pred )
104
+ prefilled .append (idx_list )
105
+ return prefilled
106
+
82
107
def classify_cells (self , board_img , mapped_grid ):
83
108
cells = np .asarray (self .crop_grid (board_img , mapped_grid ))
84
109
cells = cv2 .bitwise_not (cells ) / 255.0
110
+ #cells = cells / 255.0
85
111
cl_cells = []
86
112
idx_list = []
87
113
print ('Success' )
88
114
for idx , c in enumerate (cells ):
89
- c [c < 0.6 ] = 0.0 #Threshhold works for now#
90
- c [:5 ,:] = 0.0
91
- c [:,:5 ] = 0.0
92
- c [25 :,:] = 0.0
93
- c [:,25 :] = 0.0
115
+ c [c < 0.7 ] = 0.0 #Threshhold works for now#
116
+ c [:4 ,:] = 0.0
117
+ c [:,:4 ] = 0.0
118
+ c [24 :,:] = 0.0
119
+ c [:,24 :] = 0.0
94
120
c = c * 1.2
95
121
if idx == 0 :
96
122
print (c )
@@ -112,12 +138,9 @@ def classify_cells(self, board_img, mapped_grid):
112
138
return prefilled
113
139
114
140
def find_sudoku_board (self , orig_img , processed_img ):
115
- #gray_board_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2GRAY)
116
- #processed_img = self.preprocess_for_grid_detection(gray_board_img)
117
141
contour = self .find_contours (processed_img )
118
- contour_img , box_points = self .draw_contours (orig_img , contour )
119
- #cropped = self.crop_image(deepcopy(orig_img), box_points)
120
- return box_points , contour_img , processed_img
142
+ contour_img , box_points = self .draw_contours (deepcopy (orig_img ), contour )
143
+ return box_points , contour_img
121
144
122
145
123
146
def crop_grid (self , img , mapped_grid ):
@@ -248,23 +271,34 @@ def find_contours(self, img):
248
271
249
272
250
273
def preprocess_for_grid_detection (self , orig_img ):
251
- gaus_img = cv2 .GaussianBlur (orig_img , (11 ,11 ), 0 )
252
- thresh_img = cv2 .adaptiveThreshold (gaus_img , 255 , cv2 .ADAPTIVE_THRESH_GAUSSIAN_C , cv2 .THRESH_BINARY , 11 , 2 )
274
+ gaus_img = cv2 .GaussianBlur (orig_img , (5 ,5 ), 0 )
275
+ gaus_img = cv2 .cvtColor (gaus_img , cv2 .COLOR_RGB2GRAY )
276
+ thresh_img = cv2 .adaptiveThreshold (gaus_img , maxValue = 255 , adaptiveMethod = cv2 .ADAPTIVE_THRESH_GAUSSIAN_C ,
277
+ thresholdType = cv2 .THRESH_BINARY_INV , blockSize = 3 , C = 2 )
253
278
return thresh_img
254
279
255
280
256
281
def draw_contours (self , orig_img , contour ):
257
- ''' Can be improved, still a little unstable '''
258
- #contour_img = cv2.drawContours(orig_img, contour, -1, (0,255,0), 3)
259
- #perimeter = cv2.arcLength(contour, True)
260
- #epsilon = 0.1*cv2.arcLength(contour, True)
261
- #approx = cv2.approxPolyDP(contour, epsilon, True)
262
- x ,y ,w ,h = cv2 .boundingRect (contour )
263
- contour_img = cv2 .rectangle (orig_img , (x ,y ),(x + w ,y + h ),(0 ,255 ,0 ),2 )
264
- box_points = np .array ([[y , y + h ], [x , x + w ]])
265
- return contour_img , box_points
266
- #box_points = np.int0(box_points)
267
- # contour_img = cv2.drawContours(orig_img,[box_points], 0, (0,255,0), 3)
282
+ rect = cv2 .minAreaRect (contour )
283
+ box_points = cv2 .boxPoints (rect )
284
+ box_points = np .int0 (box_points )
285
+ #contour_img = cv2.drawContours(orig_img,[box_points],0,(0,0,255),2)
286
+
287
+ def dist_to_point (point ):
288
+ return np .sqrt ((point [0 ]^ 2 )+ (point [1 ]^ 2 ))
289
+
290
+ if dist_to_point (box_points [2 ]) < dist_to_point (box_points [1 ]):
291
+ botr ,botl ,topl ,topr = box_points
292
+ else :
293
+ botl ,topl ,topr ,botr = box_points
294
+
295
+ rect_points = np .float32 ([botr , botl , topl , topr ])
296
+ transform_points = np .float32 ([[400 ,400 ],[0 ,400 ],[0 ,0 ],[400 ,0 ]])
297
+ M = cv2 .getPerspectiveTransform (rect_points , transform_points )
298
+ warped = cv2 .warpPerspective (orig_img ,M ,(400 ,400 ))
299
+ self .img_list .append (warped )
300
+
301
+ return warped , box_points
268
302
269
303
270
304
def gaussian_blur (self , orig_img ):
@@ -306,7 +340,7 @@ def add_to_list(rho, theta):
306
340
307
341
lines = cv2 .HoughLines (image = edges , rho = 1 , theta = 1 * np .pi / 180 , threshold = 150 )
308
342
if lines is not None :
309
- lines = abs (lines )
343
+ # lines = abs(lines)
310
344
for x in range (0 , len (lines )):
311
345
f_rho = lines [x ][0 ][0 ]
312
346
f_theta = lines [x ][0 ][1 ]
0 commit comments