7
7
import numpy as np
8
8
import pandas as pd
9
9
import h5py
10
- import matplotlib .pyplot as plt
10
+ # import matplotlib.pyplot as plt
11
11
from math import cos , sin , atan2 , sqrt , pi , radians , degrees , ceil , floor
12
12
import tensorflow as tf
13
13
17
17
train_file_path = data_path + 'normaliaztion_train_data.h5'
18
18
test_file_path = data_path + 'normaliaztion_test_data.h5'
19
19
log_path = './log/'
20
- log_file_path = log_path + file_time + '.h5'
20
+ log_file_path = log_path + file_time + '.h5'
21
21
log_text_path = log_path + file_time + '.txt'
22
- modal_file_path = 'mymodal'
22
+ modal_file_path = 'mymodal'
23
23
# modal_path = './modal/'
24
24
# modal_file_path = modal_path + 'mymodal'
25
25
29
29
c = 1
30
30
31
31
#将所有样本训练train_num次,每次训练中以batch_size个为一组训练完所有样本。
32
- train_num = 3500
32
+ train_num = 1500
33
33
batch_size = 55
34
34
regulary = 0.00375
35
35
learning_rate = 0.00125
48
48
x = tf .placeholder (tf .float32 , [None , w , h , c ], name = 'x' )
49
49
y_ = tf .placeholder (tf .int32 , [None ], name = 'y_' )
50
50
51
+ # 随机打乱点集数据
52
+ def exchange_data_index (sum_data , label_data ):
53
+ cursor_index = 0
54
+ max_range = len (sum_data )
55
+ while cursor_index < max_range :
56
+ random_index = random .randint (0 , max_range - 1 )
57
+ temp_sum_data = sum_data [0 ]
58
+ temp_label_data = label_data [0 ]
51
59
60
+ sum_data = np .delete (sum_data , 0 , axis = 0 )
61
+ label_data = np .delete (label_data , 0 , axis = 0 )
62
+ sum_data = np .insert (sum_data , random_index , temp_sum_data , axis = 0 )
63
+ label_data = np .insert (label_data , random_index ,
64
+ temp_label_data , axis = 0 )
65
+
66
+ cursor_index += 1
67
+ return sum_data , label_data
52
68
53
69
#每次获取batch_size个样本进行训练或测试
54
70
def get_batch (data , label , batch_size ):
55
71
for start_index in range (0 , len (data )- batch_size + 1 , batch_size ):
56
72
slice_index = slice (start_index , start_index + batch_size )
57
73
yield data [slice_index ], label [slice_index ]
58
74
75
+
59
76
def inference (input_tensor , train , regularizer ):
60
-
77
+
61
78
#第一层:卷积层,过滤器的尺寸为5×5,深度为6,不使用全0补充,步长为1。
62
79
#尺寸变化:32×32×1->28×28×6
63
80
'''参数的初始化:tf.truncated_normal_initializer()或者简写为tf.TruncatedNormal()、tf.RandomNormal() 去掉_initializer,大写首字母即可
@@ -69,14 +86,14 @@ def inference(input_tensor, train, regularizer):
69
86
conv1_biases = tf .get_variable (
70
87
'bias' , [6 ], initializer = tf .constant_initializer (0.0 ))
71
88
conv1 = tf .nn .conv2d (input_tensor , conv1_weights , strides = [
72
- 1 , 1 , 1 , 1 ], padding = 'VALID' )
89
+ 1 , 1 , 1 , 1 ], padding = 'VALID' )
73
90
relu1 = tf .nn .relu (tf .nn .bias_add (conv1 , conv1_biases ))
74
91
75
92
#第二层:池化层,过滤器的尺寸为2×2,使用全0补充,步长为2。
76
93
#尺寸变化:28×28×6->14×14×6
77
94
with tf .name_scope ('layer2-pool1' ):
78
95
pool1 = tf .nn .max_pool (relu1 , ksize = [1 , 2 , 2 , 1 ], strides = [
79
- 1 , 2 , 2 , 1 ], padding = 'SAME' )
96
+ 1 , 2 , 2 , 1 ], padding = 'SAME' )
80
97
81
98
#第三层:卷积层,过滤器的尺寸为5×5,深度为16,不使用全0补充,步长为1。
82
99
#尺寸变化:14×14×6->10×10×16
@@ -86,14 +103,14 @@ def inference(input_tensor, train, regularizer):
86
103
conv2_biases = tf .get_variable (
87
104
'bias' , [16 ], initializer = tf .constant_initializer (0.0 ))
88
105
conv2 = tf .nn .conv2d (pool1 , conv2_weights , strides = [
89
- 1 , 1 , 1 , 1 ], padding = 'VALID' )
106
+ 1 , 1 , 1 , 1 ], padding = 'VALID' )
90
107
relu2 = tf .nn .relu (tf .nn .bias_add (conv2 , conv2_biases ))
91
108
92
109
#第四层:池化层,过滤器的尺寸为2×2,使用全0补充,步长为2。
93
110
#尺寸变化:10×10×6->5×5×16
94
111
with tf .variable_scope ('layer4-pool2' ):
95
112
pool2 = tf .nn .max_pool (relu2 , ksize = [1 , 2 , 2 , 1 ], strides = [
96
- 1 , 2 , 2 , 1 ], padding = 'SAME' )
113
+ 1 , 2 , 2 , 1 ], padding = 'SAME' )
97
114
98
115
#将第四层池化层的输出转化为第五层全连接层的输入格式。第四层的输出为5×5×16的矩阵,然而第五层全连接层需要的输入格式
99
116
#为向量,所以我们需要把代表每张图片的尺寸为5×5×16的矩阵拉直成一个长度为5×5×16的向量。
@@ -154,6 +171,7 @@ def inference(input_tensor, train, regularizer):
154
171
logit = tf .matmul (fc2 , fc3_weights ) + fc3_biases
155
172
return logit
156
173
174
+
157
175
def start_training (train_data , train_label , test_data , test_label ):
158
176
159
177
#打乱训练数据及测试数据 np.arange()返回一个有终点和起点的固定步长的排列,
@@ -183,13 +201,14 @@ def start_training(train_data, train_label, test_data, test_label):
183
201
184
202
global regulary , learning_rate , beta1 , beta2
185
203
regularizer = tf .contrib .layers .l2_regularizer (regulary )
186
- # y = inference(x, False, regularizer)
187
- y = inference (x , True , regularizer )
204
+ y = inference (x , False , regularizer )
205
+ # y = inference(x, True, regularizer)
188
206
cross_entropy = tf .nn .sparse_softmax_cross_entropy_with_logits (
189
207
logits = y , labels = y_ )
190
208
cross_entropy_mean = tf .reduce_mean (cross_entropy )
191
209
loss = cross_entropy_mean + tf .add_n (tf .get_collection ('losses' ))
192
- train_op = tf .train .AdamOptimizer (learning_rate = learning_rate , beta1 = beta1 , beta2 = beta2 ).minimize (loss )
210
+ train_op = tf .train .AdamOptimizer (
211
+ learning_rate = learning_rate , beta1 = beta1 , beta2 = beta2 ).minimize (loss )
193
212
correct_prediction = tf .equal (tf .cast (tf .argmax (y , 1 ), tf .int32 ), y_ )
194
213
accuracy = tf .reduce_mean (tf .cast (correct_prediction , tf .float32 ))
195
214
saver = tf .train .Saver ()
@@ -210,7 +229,7 @@ def start_training(train_data, train_label, test_data, test_label):
210
229
train_loss , train_acc , batch_num = 0 , 0 , 0
211
230
for train_data_batch , train_label_batch in get_batch (train_data , train_label , batch_size ):
212
231
_ , err , acc = sess .run ([train_op , loss , accuracy ], feed_dict = {
213
- x : train_data_batch , y_ : train_label_batch })
232
+ x : train_data_batch , y_ : train_label_batch })
214
233
train_loss += err
215
234
train_acc += acc
216
235
batch_num += 1
@@ -242,7 +261,7 @@ def start_training(train_data, train_label, test_data, test_label):
242
261
max_test_acc = avg_acc
243
262
if min_test_loss > avg_loss :
244
263
min_test_loss = avg_loss
245
- print ('【测试-第%d轮共%d批,每批%d个元数据】' % (i + 1 ,batch_num ,batch_size ))
264
+ print ('【测试-第%d轮共%d批,每批%d个元数据】' % (i + 1 , batch_num , batch_size ))
246
265
print ("test loss:" , avg_loss )
247
266
print ("test acc:" , avg_acc )
248
267
@@ -259,6 +278,9 @@ def start_training(train_data, train_label, test_data, test_label):
259
278
normalized_test_data = f ['data' ][()]
260
279
rand_test_typical_data = f ['label' ][()]
261
280
281
+ print (normalized_train_data .shape )
282
+ print (normalized_test_data .shape )
283
+
262
284
start_time = time .time ()
263
285
264
286
start_training (normalized_train_data , rand_train_typical_data ,
@@ -284,13 +306,12 @@ def start_training(train_data, train_label, test_data, test_label):
284
306
f .write ('test_avg_acc:' + str (test_avg_acc ) + '\r \n ' )
285
307
f .write ('max_test_acc:' + str (max_test_acc ) + '\r \n ' )
286
308
f .write ('min_test_loss:' + str (min_test_loss ) + '\r \n ' )
287
- f .write ('time:' + str (end_time - start_time ) + 's\r \n ' )
309
+ f .write ('time:' + str (end_time - start_time ) + 's\r \n ' )
288
310
f .close ()
289
311
290
- acc_len = len (test_acc_array )
291
- plt .plot ([i for i in range (acc_len )], test_acc_array )
292
- plt .show ()
293
-
294
- plt .plot ([i for i in range (acc_len )], test_loss_array )
295
- plt .show ()
312
+ # acc_len = len(test_acc_array)
313
+ # plt.plot([i for i in range(acc_len)], test_acc_array)
314
+ # plt.show()
296
315
316
+ # plt.plot([i for i in range(acc_len)], test_loss_array)
317
+ # plt.show()
0 commit comments