11
11
# Import MNIST data
12
12
from tensorflow .examples .tutorials .mnist import input_data
13
13
mnist = input_data .read_data_sets ("/tmp/data/" , one_hot = True )
14
-
15
14
dir = os .path .dirname (os .path .realpath (__file__ ))
16
15
17
- # Parameters
16
+ # Parameters for training
18
17
learning_rate = 0.001
19
18
training_iters = 50000
20
19
batch_size = 128
21
- display_step = 10
22
20
23
21
# Network Parameters
24
22
n_input = 784 # MNIST data input (img shape: 28*28)
25
23
n_classes = 10 # MNIST total classes (0-9 digits)
26
24
dropout = 0.75 # Dropout, probability to keep units
27
25
28
- # tf Graph input
29
26
x = tf .placeholder (tf .float32 , [None , n_input ])
30
27
y = tf .placeholder (tf .float32 , [None , n_classes ])
31
28
keep_prob = tf .placeholder (tf .float32 ) #dropout (keep probability)
32
29
30
+
31
+ #Test method
32
+ def show_image (self , img ):
33
+ cv2 .imshow ('img' , img )
34
+ cv2 .waitKey (0 )
35
+
36
+ #Needed for loading weights from disk
33
37
def load_weights ():
34
38
with tf .Session () as sess :
35
39
saver = tf .train .import_meta_graph (dir + '/vars.ckpt.meta' )
@@ -45,70 +49,37 @@ def load_weights():
45
49
b_out = graph .get_tensor_by_name ('b_out:0' ).eval ()
46
50
return [wc1 , wc2 , wd1 , w_out , bc1 , bc2 , bd1 , b_out ]
47
51
48
- # Create some wrappers for simplicity
52
+
53
+ #Convolution + biad add + ReLU activation
49
54
def conv2d (x , W , b , strides = 1 ):
50
- # Conv2D wrapper, with bias and relu activation
51
55
x = tf .nn .conv2d (x , W , strides = [1 , strides , strides , 1 ], padding = 'SAME' )
52
56
x = tf .nn .bias_add (x , b )
53
57
return tf .nn .relu (x )
54
58
55
-
59
+ #Maxpooling
56
60
def maxpool2d (x , k = 2 ):
57
61
# MaxPool2D wrapper
58
62
return tf .nn .max_pool (x , ksize = [1 , k , k , 1 ], strides = [1 , k , k , 1 ],
59
63
padding = 'SAME' )
60
64
61
65
62
- # Create model
66
+ #Entire convolution net
63
67
def conv_net (x , weights , biases , dropout ):
64
68
# Reshape input picture
65
69
x = tf .reshape (x , shape = [- 1 , 28 , 28 , 1 ])
66
70
67
- # Convolution Layer
68
71
conv1 = conv2d (x , weights ['wc1' ], biases ['bc1' ])
69
- # Max Pooling (down-sampling)
70
72
conv1 = maxpool2d (conv1 , k = 2 )
71
73
72
- # Convolution Layer
73
74
conv2 = conv2d (conv1 , weights ['wc2' ], biases ['bc2' ])
74
- # Max Pooling (down-sampling)
75
75
conv2 = maxpool2d (conv2 , k = 2 )
76
76
77
- # Fully connected layer
78
- # Reshape conv2 output to fit fully connected layer input
77
+ # Fully connected layer section
79
78
fc1 = tf .reshape (conv2 , [- 1 , weights ['wd1' ].get_shape ().as_list ()[0 ]])
80
79
fc1 = tf .add (tf .matmul (fc1 , weights ['wd1' ]), biases ['bd1' ])
81
80
fc1 = tf .nn .relu (fc1 )
82
- # Apply Dropout
83
- fc1 = tf .nn .dropout (fc1 , dropout )
84
-
85
- # Output, class prediction
86
- out = tf .add (tf .matmul (fc1 , weights ['out' ]), biases ['out' ])
87
- return out
88
-
89
- def conv_net2 (x , weights , biases , dropout ):
90
- # Reshape input picture
91
- x = tf .reshape (x , shape = [- 1 , 28 , 28 , 1 ])
92
-
93
- # Convolution Layer
94
- conv1 = conv2d (x , weights ['wc1' ], biases ['bc1' ])
95
- # Max Pooling (down-sampling)
96
- conv1 = maxpool2d (conv1 , k = 2 )
97
-
98
- # Convolution Layer
99
- conv2 = conv2d (conv1 , weights ['wc2' ], biases ['bc2' ])
100
- # Max Pooling (down-sampling)
101
- conv2 = maxpool2d (conv2 , k = 2 )
102
-
103
- # Fully connected layer
104
- # Reshape conv2 output to fit fully connected layer input
105
- fc1 = tf .reshape (conv2 , [- 1 , weights ['wd1' ].shape [0 ]])
106
- fc1 = tf .add (tf .matmul (fc1 , weights ['wd1' ]), biases ['bd1' ])
107
- fc1 = tf .nn .relu (fc1 )
108
- # Apply Dropout
109
81
fc1 = tf .nn .dropout (fc1 , dropout )
110
82
111
- # Output, class prediction
112
83
out = tf .add (tf .matmul (fc1 , weights ['out' ]), biases ['out' ])
113
84
return out
114
85
@@ -155,7 +126,7 @@ def conv_net2(x, weights, biases, dropout):
155
126
# Run optimization op (backprop)
156
127
sess .run (optimizer , feed_dict = {x : batch_x , y : batch_y ,
157
128
keep_prob : dropout })
158
- if step % display_step == 0 :
129
+ if step % 10 == 0 :
159
130
# Calculate batch loss and accuracy
160
131
loss , acc = sess .run ([cost , accuracy ], feed_dict = {x : batch_x ,
161
132
y : batch_y ,
@@ -179,10 +150,6 @@ def conv_net2(x, weights, biases, dropout):
179
150
180
151
batch_x = mnist .test .images [:10 ]
181
152
batch_y = mnist .test .labels [:10 ]
182
- # print(batch_x[0])
183
- # img = np.reshape(batch_x[0], (28,28))
184
- # cv2.imshow('img', img)
185
- # cv2.waitKey(0)
186
153
187
154
out = sess .run (conv_net (batch_x , weights , biases , 1.0 ))
188
155
for i in range (0 , len (batch_y )):
0 commit comments