Skip to content

Commit 278ba8b

Browse files
committed
classification working (not really)
1 parent 6c40b7d commit 278ba8b

9 files changed

+300
-78
lines changed

CNN_MNIST.py

+228
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,228 @@
1+
2+
from __future__ import print_function
3+
4+
import os
5+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
6+
7+
import cv2
8+
import numpy as np
9+
import tensorflow as tf
10+
11+
# Import MNIST data
12+
from tensorflow.examples.tutorials.mnist import input_data
13+
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
14+
15+
dir = os.path.dirname(os.path.realpath(__file__))
16+
17+
# Parameters
18+
learning_rate = 0.001
19+
training_iters = 50000
20+
batch_size = 128
21+
display_step = 10
22+
23+
# Network Parameters
24+
n_input = 784 # MNIST data input (img shape: 28*28)
25+
n_classes = 10 # MNIST total classes (0-9 digits)
26+
dropout = 0.75 # Dropout, probability to keep units
27+
28+
# tf Graph input
29+
x = tf.placeholder(tf.float32, [None, n_input])
30+
y = tf.placeholder(tf.float32, [None, n_classes])
31+
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
32+
33+
def load_weights():
34+
with tf.Session() as sess:
35+
saver = tf.train.import_meta_graph(dir + '/vars.ckpt.meta')
36+
saver.restore(sess,tf.train.latest_checkpoint('./'))
37+
graph = tf.get_default_graph()
38+
wc1 = graph.get_tensor_by_name('wc1:0').eval()
39+
wc2 = graph.get_tensor_by_name('wc2:0').eval()
40+
wd1 = graph.get_tensor_by_name('wd1:0').eval()
41+
w_out = graph.get_tensor_by_name('w_out:0').eval()
42+
bc1 = graph.get_tensor_by_name('bc1:0').eval()
43+
bc2 = graph.get_tensor_by_name('bc2:0').eval()
44+
bd1 = graph.get_tensor_by_name('bd1:0').eval()
45+
b_out = graph.get_tensor_by_name('b_out:0').eval()
46+
return [wc1, wc2, wd1, w_out, bc1, bc2, bd1, b_out]
47+
48+
# Create some wrappers for simplicity
49+
def conv2d(x, W, b, strides=1):
50+
# Conv2D wrapper, with bias and relu activation
51+
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
52+
x = tf.nn.bias_add(x, b)
53+
return tf.nn.relu(x)
54+
55+
56+
def maxpool2d(x, k=2):
57+
# MaxPool2D wrapper
58+
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
59+
padding='SAME')
60+
61+
62+
# Create model
63+
def conv_net(x, weights, biases, dropout):
64+
# Reshape input picture
65+
x = tf.reshape(x, shape=[-1, 28, 28, 1])
66+
67+
# Convolution Layer
68+
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
69+
# Max Pooling (down-sampling)
70+
conv1 = maxpool2d(conv1, k=2)
71+
72+
# Convolution Layer
73+
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
74+
# Max Pooling (down-sampling)
75+
conv2 = maxpool2d(conv2, k=2)
76+
77+
# Fully connected layer
78+
# Reshape conv2 output to fit fully connected layer input
79+
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
80+
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
81+
fc1 = tf.nn.relu(fc1)
82+
# Apply Dropout
83+
fc1 = tf.nn.dropout(fc1, dropout)
84+
85+
# Output, class prediction
86+
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
87+
return out
88+
89+
def conv_net2(x, weights, biases, dropout):
90+
# Reshape input picture
91+
x = tf.reshape(x, shape=[-1, 28, 28, 1])
92+
93+
# Convolution Layer
94+
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
95+
# Max Pooling (down-sampling)
96+
conv1 = maxpool2d(conv1, k=2)
97+
98+
# Convolution Layer
99+
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
100+
# Max Pooling (down-sampling)
101+
conv2 = maxpool2d(conv2, k=2)
102+
103+
# Fully connected layer
104+
# Reshape conv2 output to fit fully connected layer input
105+
fc1 = tf.reshape(conv2, [-1, weights['wd1'].shape[0]])
106+
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
107+
fc1 = tf.nn.relu(fc1)
108+
# Apply Dropout
109+
fc1 = tf.nn.dropout(fc1, dropout)
110+
111+
# Output, class prediction
112+
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
113+
return out
114+
115+
# Store layers weight & bias
116+
weights = {
117+
# 5x5 conv, 1 input, 32 outputs
118+
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32]), name='wc1'),
119+
# 5x5 conv, 32 inputs, 64 outputs
120+
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64]), name='wc2'),
121+
# fully connected, 7*7*64 inputs, 1024 outputs
122+
'wd1': tf.Variable(tf.random_normal([7*7*64, 1024]), name='wd1'),
123+
# 1024 inputs, 10 outputs (class prediction)
124+
'out': tf.Variable(tf.random_normal([1024, n_classes]), name='w_out')
125+
}
126+
127+
biases = {
128+
'bc1': tf.Variable(tf.random_normal([32]), name='bc1'),
129+
'bc2': tf.Variable(tf.random_normal([64]), name='bc2'),
130+
'bd1': tf.Variable(tf.random_normal([1024]), name='bd1'),
131+
'out': tf.Variable(tf.random_normal([n_classes]), name='b_out')
132+
}
133+
134+
# Construct model
135+
pred = conv_net(x, weights, biases, keep_prob)
136+
137+
# Define loss and optimizer
138+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
139+
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
140+
141+
# Evaluate model
142+
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
143+
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
144+
145+
# Initializing the variables
146+
init = tf.global_variables_initializer()
147+
148+
# Launch the graph
149+
with tf.Session() as sess:
150+
sess.run(init)
151+
step = 1
152+
# Keep training until reach max iterations
153+
while step * batch_size < training_iters:
154+
batch_x, batch_y = mnist.train.next_batch(batch_size)
155+
# Run optimization op (backprop)
156+
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
157+
keep_prob: dropout})
158+
if step % display_step == 0:
159+
# Calculate batch loss and accuracy
160+
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
161+
y: batch_y,
162+
keep_prob: 1.})
163+
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
164+
"{:.6f}".format(loss) + ", Training Accuracy= " + \
165+
"{:.5f}".format(acc))
166+
step += 1
167+
print("Optimization Finished!")
168+
169+
# Calculate accuracy for 256 mnist test images
170+
print("Testing Accuracy:", \
171+
sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
172+
y: mnist.test.labels[:256],
173+
keep_prob: 1.}))
174+
175+
saver = tf.train.Saver()
176+
path = saver.save(sess, dir + '/vars.ckpt')
177+
for v in tf.trainable_variables():
178+
print(v.name)
179+
180+
batch_x = mnist.test.images[:10]
181+
batch_y = mnist.test.labels[:10]
182+
# print(batch_x[0])
183+
# img = np.reshape(batch_x[0], (28,28))
184+
# cv2.imshow('img', img)
185+
# cv2.waitKey(0)
186+
187+
out = sess.run(conv_net(batch_x, weights, biases, 1.0))
188+
for i in range(0, len(batch_y)):
189+
print(np.argmax(batch_y[i]), np.argmax(out[i]))
190+
print(sess.run(biases['bc1']))
191+
# Let's load a previously saved meta graph in the default graph
192+
# This function returns a Saver
193+
194+
''' Save Variables'''
195+
# with tf.Session() as sess:
196+
# saver = tf.train.import_meta_graph(dir + '/vars.ckpt-128.meta')
197+
# saver.restore(sess,tf.train.latest_checkpoint('./'))
198+
# graph = tf.get_default_graph()
199+
# bc1 = graph.get_tensor_by_name('bc1:0').eval()
200+
# print(bc1)
201+
202+
#
203+
print('\nLOAD TEST\n')
204+
# var_list = load_weights()
205+
# #
206+
# new_weights = {
207+
# 'wc1': var_list[0],
208+
# 'wc2': var_list[1],
209+
# 'wd1': var_list[2],
210+
# 'out': var_list[3]
211+
# }
212+
# new_biases = {
213+
# 'bc1': var_list[4],
214+
# 'bc2': var_list[5],
215+
# 'bd1': var_list[6],
216+
# 'out': var_list[7]
217+
# }
218+
#
219+
# batch_size = 10
220+
# batch_x = mnist.test.images[:10]
221+
# batch_y = mnist.test.labels[:10]
222+
# with tf.Session() as sess:
223+
# out = sess.run(conv_net2(batch_x, new_weights, new_biases, 1.0))
224+
# print(out.shape)
225+
# print(batch_y.shape)
226+
# for i in range(0, len(batch_y)):
227+
# print(np.argmax(batch_y[i]), np.argmax(out[i]))
228+
# cv2.imshow('img', batch_x[i])

NumberClassification.py

+42-68
Original file line numberDiff line numberDiff line change
@@ -2,79 +2,77 @@
22
import os
33
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
44

5+
import cv2
56
import numpy as np
67
import tensorflow as tf
7-
from tensorflow.examples.tutorials.mnist import input_data
8+
89
dir = os.path.dirname(os.path.realpath(__file__))
910

1011
class NumberClassification:
1112
def __init__(self):
1213
print('')
13-
14+
self.weights = None
15+
self.biases = None
1416
var_list = self.load_weights()
17+
1518
self.weights = {
1619
'wc1': var_list[0],
1720
'wc2': var_list[1],
1821
'wd1': var_list[2],
1922
'out': var_list[3]
2023
}
21-
2224
self.biases = {
2325
'bc1': var_list[4],
2426
'bc2': var_list[5],
2527
'bd1': var_list[6],
2628
'out': var_list[7]
2729
}
2830

29-
# We use a neural net classifier
30-
''' --- Our model ---'''
31-
32-
#self.y = tf.nn.softmax(tf.matmul(self.x, self.W) + self.b)
33-
#self.vars = self.train()
34-
#self.trained_W = self.vars[0].astype(np.float32)
35-
#self.trained_b = self.vars[1].astype(np.float32)
36-
#print(self.trained_W.shape)
37-
#print(self.trained_b.shape)
38-
3931

4032
def classify_images(self, images):
33+
print(images.shape)
34+
images = tf.cast(images, tf.float32)
4135
with tf.Session() as sess:
42-
print(sess.run(conv_net(images)))
43-
44-
#print(self.x)
45-
#print(self.W)
46-
#print(self.b)
47-
#images = tf.cast(images.reshape(images.shape[0], -1), tf.float32)
48-
#print(images.shape)
49-
#labels = None
50-
#y_model = tf.nn.softmax(tf.matmul(images, self.trained_W) + self.trained_b)
51-
#with tf.Session() as sess:
52-
#labels = sess.run(y_model)
53-
#print(labels)
54-
#print(labels.shape)
55-
56-
57-
def conv_net(x):
36+
return sess.run(self.conv_net(images))
37+
38+
# #batch_size = 10
39+
# #images = tf.cast(images, tf.float32)
40+
#
41+
# batch_x, batch_y = mnist.train.next_batch(batch_size)
42+
# #print('pred ->', batch_y, '\n--\n')
43+
# cv2.imshow('img', batch_x[0])
44+
# with tf.Session() as sess:
45+
# return sess.run(self.conv_net(batch_x)), batch_y
46+
47+
48+
49+
def maxpool2d(self, x, k=2):
50+
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
51+
52+
def conv2d(self, x, W, b, strides=1):
53+
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
54+
x = tf.nn.bias_add(x, b)
55+
return tf.nn.relu(x)
56+
57+
def conv_net(self, x):
5858
# Reshape input picture
5959
x = tf.reshape(x, shape=[-1, 28, 28, 1])
6060

6161
# Convolution Layer
62-
conv1 = conv2d(x, self.weights['wc1'], self.biases['bc1'])
62+
conv1 = self.conv2d(x, self.weights['wc1'], self.biases['bc1'])
6363
# Max Pooling (down-sampling)
64-
conv1 = maxpool2d(self.conv1, k=2)
64+
conv1 = self.maxpool2d(conv1, k=2)
6565

6666
# Convolution Layer
67-
conv2 = conv2d(self.conv1, self.weights['wc2'], self.biases['bc2'])
67+
conv2 = self.conv2d(conv1, self.weights['wc2'], self.biases['bc2'])
6868
# Max Pooling (down-sampling)
69-
conv2 = maxpool2d(conv2, k=2)
69+
conv2 = self.maxpool2d(conv2, k=2)
7070

7171
# Fully connected layer
7272
# Reshape conv2 output to fit fully connected layer input
73-
fc1 = tf.reshape(conv2, [-1, self.weights['wd1'].get_shape().as_list()[0]])
73+
fc1 = tf.reshape(conv2, [-1, self.weights['wd1'].shape[0]])
7474
fc1 = tf.add(tf.matmul(fc1, self.weights['wd1']), self.biases['bd1'])
7575
fc1 = tf.nn.relu(fc1)
76-
# Apply Dropout
77-
#fc1 = tf.nn.dropout(fc1, self.dropout)
7876

7977
# Output, class prediction
8078
out = tf.add(tf.matmul(fc1, self.weights['out']), self.biases['out'])
@@ -84,37 +82,13 @@ def load_weights(self):
8482
with tf.Session() as sess:
8583
saver = tf.train.import_meta_graph(dir + '/vars.ckpt.meta')
8684
saver.restore(sess,tf.train.latest_checkpoint('./'))
87-
sess.run(tf.global_variables_initializer())
8885
graph = tf.get_default_graph()
89-
wc1 = graph.get_tensor_by_name('wc1:0')
90-
wc2 = graph.get_tensor_by_name('wc2:0')
91-
wd1 = graph.get_tensor_by_name('wd1:0')
92-
w_out = graph.get_tensor_by_name('w_out:0')
93-
bc1 = graph.get_tensor_by_name('bc1:0')
94-
bc2 = graph.get_tensor_by_name('bc2:0')
95-
bd1 = graph.get_tensor_by_name('bd1:0')
96-
b_out = graph.get_tensor_by_name('b_out:0')
86+
wc1 = graph.get_tensor_by_name('wc1:0').eval()
87+
wc2 = graph.get_tensor_by_name('wc2:0').eval()
88+
wd1 = graph.get_tensor_by_name('wd1:0').eval()
89+
w_out = graph.get_tensor_by_name('w_out:0').eval()
90+
bc1 = graph.get_tensor_by_name('bc1:0').eval()
91+
bc2 = graph.get_tensor_by_name('bc2:0').eval()
92+
bd1 = graph.get_tensor_by_name('bd1:0').eval()
93+
b_out = graph.get_tensor_by_name('b_out:0').eval()
9794
return [wc1, wc2, wd1, w_out, bc1, bc2, bd1, b_out]
98-
99-
# def train(self):
100-
# var_list = []
101-
# y_ = tf.placeholder(tf.float32, [None, 10]) # Correct labels
102-
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(self.y), reduction_indices=[1]))
103-
# train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
104-
# with tf.Session() as sess:
105-
# sess.run(tf.global_variables_initializer())
106-
# for _ in range(1000):
107-
# batch_xs, batch_ys = self.data.train.next_batch(100)
108-
# sess.run(train_step, feed_dict={self.x: batch_xs, y_: batch_ys})
109-
# correct_prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(y_,1))
110-
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
111-
# print(sess.run(accuracy, feed_dict={self.x: self.data.test.images, y_: self.data.test.labels}))
112-
# for v in tf.trainable_variables():
113-
# var_list.append(v.eval())
114-
# return var_list
115-
#
116-
#
117-
# def read_in_dataset(self):
118-
# data = input_data.read_data_sets("MNIST_data/", one_hot=True)
119-
# print('Data loaded...')
120-
# return data

NumberClassification.pyc

706 Bytes
Binary file not shown.

README.md

-3
This file was deleted.

0 commit comments

Comments
 (0)