|
| 1 | + |
| 2 | +from __future__ import print_function |
| 3 | + |
| 4 | +import os |
| 5 | +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' |
| 6 | + |
| 7 | +import cv2 |
| 8 | +import numpy as np |
| 9 | +import tensorflow as tf |
| 10 | + |
| 11 | +# Import MNIST data |
| 12 | +from tensorflow.examples.tutorials.mnist import input_data |
| 13 | +mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) |
| 14 | + |
| 15 | +dir = os.path.dirname(os.path.realpath(__file__)) |
| 16 | + |
| 17 | +# Parameters |
| 18 | +learning_rate = 0.001 |
| 19 | +training_iters = 50000 |
| 20 | +batch_size = 128 |
| 21 | +display_step = 10 |
| 22 | + |
| 23 | +# Network Parameters |
| 24 | +n_input = 784 # MNIST data input (img shape: 28*28) |
| 25 | +n_classes = 10 # MNIST total classes (0-9 digits) |
| 26 | +dropout = 0.75 # Dropout, probability to keep units |
| 27 | + |
| 28 | +# tf Graph input |
| 29 | +x = tf.placeholder(tf.float32, [None, n_input]) |
| 30 | +y = tf.placeholder(tf.float32, [None, n_classes]) |
| 31 | +keep_prob = tf.placeholder(tf.float32) #dropout (keep probability) |
| 32 | + |
| 33 | +def load_weights(): |
| 34 | + with tf.Session() as sess: |
| 35 | + saver = tf.train.import_meta_graph(dir + '/vars.ckpt.meta') |
| 36 | + saver.restore(sess,tf.train.latest_checkpoint('./')) |
| 37 | + graph = tf.get_default_graph() |
| 38 | + wc1 = graph.get_tensor_by_name('wc1:0').eval() |
| 39 | + wc2 = graph.get_tensor_by_name('wc2:0').eval() |
| 40 | + wd1 = graph.get_tensor_by_name('wd1:0').eval() |
| 41 | + w_out = graph.get_tensor_by_name('w_out:0').eval() |
| 42 | + bc1 = graph.get_tensor_by_name('bc1:0').eval() |
| 43 | + bc2 = graph.get_tensor_by_name('bc2:0').eval() |
| 44 | + bd1 = graph.get_tensor_by_name('bd1:0').eval() |
| 45 | + b_out = graph.get_tensor_by_name('b_out:0').eval() |
| 46 | + return [wc1, wc2, wd1, w_out, bc1, bc2, bd1, b_out] |
| 47 | + |
| 48 | +# Create some wrappers for simplicity |
| 49 | +def conv2d(x, W, b, strides=1): |
| 50 | + # Conv2D wrapper, with bias and relu activation |
| 51 | + x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') |
| 52 | + x = tf.nn.bias_add(x, b) |
| 53 | + return tf.nn.relu(x) |
| 54 | + |
| 55 | + |
| 56 | +def maxpool2d(x, k=2): |
| 57 | + # MaxPool2D wrapper |
| 58 | + return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], |
| 59 | + padding='SAME') |
| 60 | + |
| 61 | + |
| 62 | +# Create model |
| 63 | +def conv_net(x, weights, biases, dropout): |
| 64 | + # Reshape input picture |
| 65 | + x = tf.reshape(x, shape=[-1, 28, 28, 1]) |
| 66 | + |
| 67 | + # Convolution Layer |
| 68 | + conv1 = conv2d(x, weights['wc1'], biases['bc1']) |
| 69 | + # Max Pooling (down-sampling) |
| 70 | + conv1 = maxpool2d(conv1, k=2) |
| 71 | + |
| 72 | + # Convolution Layer |
| 73 | + conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) |
| 74 | + # Max Pooling (down-sampling) |
| 75 | + conv2 = maxpool2d(conv2, k=2) |
| 76 | + |
| 77 | + # Fully connected layer |
| 78 | + # Reshape conv2 output to fit fully connected layer input |
| 79 | + fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) |
| 80 | + fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) |
| 81 | + fc1 = tf.nn.relu(fc1) |
| 82 | + # Apply Dropout |
| 83 | + fc1 = tf.nn.dropout(fc1, dropout) |
| 84 | + |
| 85 | + # Output, class prediction |
| 86 | + out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) |
| 87 | + return out |
| 88 | + |
| 89 | +def conv_net2(x, weights, biases, dropout): |
| 90 | + # Reshape input picture |
| 91 | + x = tf.reshape(x, shape=[-1, 28, 28, 1]) |
| 92 | + |
| 93 | + # Convolution Layer |
| 94 | + conv1 = conv2d(x, weights['wc1'], biases['bc1']) |
| 95 | + # Max Pooling (down-sampling) |
| 96 | + conv1 = maxpool2d(conv1, k=2) |
| 97 | + |
| 98 | + # Convolution Layer |
| 99 | + conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) |
| 100 | + # Max Pooling (down-sampling) |
| 101 | + conv2 = maxpool2d(conv2, k=2) |
| 102 | + |
| 103 | + # Fully connected layer |
| 104 | + # Reshape conv2 output to fit fully connected layer input |
| 105 | + fc1 = tf.reshape(conv2, [-1, weights['wd1'].shape[0]]) |
| 106 | + fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) |
| 107 | + fc1 = tf.nn.relu(fc1) |
| 108 | + # Apply Dropout |
| 109 | + fc1 = tf.nn.dropout(fc1, dropout) |
| 110 | + |
| 111 | + # Output, class prediction |
| 112 | + out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) |
| 113 | + return out |
| 114 | + |
| 115 | +# Store layers weight & bias |
| 116 | +weights = { |
| 117 | + # 5x5 conv, 1 input, 32 outputs |
| 118 | + 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32]), name='wc1'), |
| 119 | + # 5x5 conv, 32 inputs, 64 outputs |
| 120 | + 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64]), name='wc2'), |
| 121 | + # fully connected, 7*7*64 inputs, 1024 outputs |
| 122 | + 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024]), name='wd1'), |
| 123 | + # 1024 inputs, 10 outputs (class prediction) |
| 124 | + 'out': tf.Variable(tf.random_normal([1024, n_classes]), name='w_out') |
| 125 | +} |
| 126 | + |
| 127 | +biases = { |
| 128 | + 'bc1': tf.Variable(tf.random_normal([32]), name='bc1'), |
| 129 | + 'bc2': tf.Variable(tf.random_normal([64]), name='bc2'), |
| 130 | + 'bd1': tf.Variable(tf.random_normal([1024]), name='bd1'), |
| 131 | + 'out': tf.Variable(tf.random_normal([n_classes]), name='b_out') |
| 132 | +} |
| 133 | + |
| 134 | +# Construct model |
| 135 | +pred = conv_net(x, weights, biases, keep_prob) |
| 136 | + |
| 137 | +# Define loss and optimizer |
| 138 | +cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) |
| 139 | +optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) |
| 140 | + |
| 141 | +# Evaluate model |
| 142 | +correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) |
| 143 | +accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) |
| 144 | + |
| 145 | +# Initializing the variables |
| 146 | +init = tf.global_variables_initializer() |
| 147 | + |
| 148 | +# Launch the graph |
| 149 | +with tf.Session() as sess: |
| 150 | + sess.run(init) |
| 151 | + step = 1 |
| 152 | + # Keep training until reach max iterations |
| 153 | + while step * batch_size < training_iters: |
| 154 | + batch_x, batch_y = mnist.train.next_batch(batch_size) |
| 155 | + # Run optimization op (backprop) |
| 156 | + sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, |
| 157 | + keep_prob: dropout}) |
| 158 | + if step % display_step == 0: |
| 159 | + # Calculate batch loss and accuracy |
| 160 | + loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, |
| 161 | + y: batch_y, |
| 162 | + keep_prob: 1.}) |
| 163 | + print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \ |
| 164 | + "{:.6f}".format(loss) + ", Training Accuracy= " + \ |
| 165 | + "{:.5f}".format(acc)) |
| 166 | + step += 1 |
| 167 | + print("Optimization Finished!") |
| 168 | + |
| 169 | + # Calculate accuracy for 256 mnist test images |
| 170 | + print("Testing Accuracy:", \ |
| 171 | + sess.run(accuracy, feed_dict={x: mnist.test.images[:256], |
| 172 | + y: mnist.test.labels[:256], |
| 173 | + keep_prob: 1.})) |
| 174 | + |
| 175 | + saver = tf.train.Saver() |
| 176 | + path = saver.save(sess, dir + '/vars.ckpt') |
| 177 | + for v in tf.trainable_variables(): |
| 178 | + print(v.name) |
| 179 | + |
| 180 | + batch_x = mnist.test.images[:10] |
| 181 | + batch_y = mnist.test.labels[:10] |
| 182 | + # print(batch_x[0]) |
| 183 | + # img = np.reshape(batch_x[0], (28,28)) |
| 184 | + # cv2.imshow('img', img) |
| 185 | + # cv2.waitKey(0) |
| 186 | + |
| 187 | + out = sess.run(conv_net(batch_x, weights, biases, 1.0)) |
| 188 | + for i in range(0, len(batch_y)): |
| 189 | + print(np.argmax(batch_y[i]), np.argmax(out[i])) |
| 190 | + print(sess.run(biases['bc1'])) |
| 191 | +# Let's load a previously saved meta graph in the default graph |
| 192 | +# This function returns a Saver |
| 193 | + |
| 194 | +''' Save Variables''' |
| 195 | +# with tf.Session() as sess: |
| 196 | +# saver = tf.train.import_meta_graph(dir + '/vars.ckpt-128.meta') |
| 197 | +# saver.restore(sess,tf.train.latest_checkpoint('./')) |
| 198 | +# graph = tf.get_default_graph() |
| 199 | +# bc1 = graph.get_tensor_by_name('bc1:0').eval() |
| 200 | +# print(bc1) |
| 201 | + |
| 202 | +# |
| 203 | +print('\nLOAD TEST\n') |
| 204 | +# var_list = load_weights() |
| 205 | +# # |
| 206 | +# new_weights = { |
| 207 | +# 'wc1': var_list[0], |
| 208 | +# 'wc2': var_list[1], |
| 209 | +# 'wd1': var_list[2], |
| 210 | +# 'out': var_list[3] |
| 211 | +# } |
| 212 | +# new_biases = { |
| 213 | +# 'bc1': var_list[4], |
| 214 | +# 'bc2': var_list[5], |
| 215 | +# 'bd1': var_list[6], |
| 216 | +# 'out': var_list[7] |
| 217 | +# } |
| 218 | +# |
| 219 | +# batch_size = 10 |
| 220 | +# batch_x = mnist.test.images[:10] |
| 221 | +# batch_y = mnist.test.labels[:10] |
| 222 | +# with tf.Session() as sess: |
| 223 | +# out = sess.run(conv_net2(batch_x, new_weights, new_biases, 1.0)) |
| 224 | +# print(out.shape) |
| 225 | +# print(batch_y.shape) |
| 226 | +# for i in range(0, len(batch_y)): |
| 227 | +# print(np.argmax(batch_y[i]), np.argmax(out[i])) |
| 228 | +# cv2.imshow('img', batch_x[i]) |
0 commit comments