Skip to content

Commit bba6531

Browse files
committed
feat: 🎸 封箱版本
1 parent 8e93b29 commit bba6531

24 files changed

+63
-276
lines changed

data_training.py

+42-21
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import numpy as np
88
import pandas as pd
99
import h5py
10-
import matplotlib.pyplot as plt
10+
# import matplotlib.pyplot as plt
1111
from math import cos, sin, atan2, sqrt, pi, radians, degrees, ceil, floor
1212
import tensorflow as tf
1313

@@ -17,9 +17,9 @@
1717
train_file_path = data_path + 'normaliaztion_train_data.h5'
1818
test_file_path = data_path + 'normaliaztion_test_data.h5'
1919
log_path = './log/'
20-
log_file_path = log_path + file_time + '.h5'
20+
log_file_path = log_path + file_time + '.h5'
2121
log_text_path = log_path + file_time + '.txt'
22-
modal_file_path = 'mymodal'
22+
modal_file_path = 'mymodal'
2323
# modal_path = './modal/'
2424
# modal_file_path = modal_path + 'mymodal'
2525

@@ -29,7 +29,7 @@
2929
c = 1
3030

3131
#将所有样本训练train_num次,每次训练中以batch_size个为一组训练完所有样本。
32-
train_num = 3500
32+
train_num = 1500
3333
batch_size = 55
3434
regulary = 0.00375
3535
learning_rate = 0.00125
@@ -48,16 +48,33 @@
4848
x = tf.placeholder(tf.float32, [None, w, h, c], name='x')
4949
y_ = tf.placeholder(tf.int32, [None], name='y_')
5050

51+
# 随机打乱点集数据
52+
def exchange_data_index(sum_data, label_data):
53+
cursor_index = 0
54+
max_range = len(sum_data)
55+
while cursor_index < max_range:
56+
random_index = random.randint(0, max_range-1)
57+
temp_sum_data = sum_data[0]
58+
temp_label_data = label_data[0]
5159

60+
sum_data = np.delete(sum_data, 0, axis=0)
61+
label_data = np.delete(label_data, 0, axis=0)
62+
sum_data = np.insert(sum_data, random_index, temp_sum_data, axis=0)
63+
label_data = np.insert(label_data, random_index,
64+
temp_label_data, axis=0)
65+
66+
cursor_index += 1
67+
return sum_data, label_data
5268

5369
#每次获取batch_size个样本进行训练或测试
5470
def get_batch(data, label, batch_size):
5571
for start_index in range(0, len(data)-batch_size+1, batch_size):
5672
slice_index = slice(start_index, start_index+batch_size)
5773
yield data[slice_index], label[slice_index]
5874

75+
5976
def inference(input_tensor, train, regularizer):
60-
77+
6178
#第一层:卷积层,过滤器的尺寸为5×5,深度为6,不使用全0补充,步长为1。
6279
#尺寸变化:32×32×1->28×28×6
6380
'''参数的初始化:tf.truncated_normal_initializer()或者简写为tf.TruncatedNormal()、tf.RandomNormal() 去掉_initializer,大写首字母即可
@@ -69,14 +86,14 @@ def inference(input_tensor, train, regularizer):
6986
conv1_biases = tf.get_variable(
7087
'bias', [6], initializer=tf.constant_initializer(0.0))
7188
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[
72-
1, 1, 1, 1], padding='VALID')
89+
1, 1, 1, 1], padding='VALID')
7390
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
7491

7592
#第二层:池化层,过滤器的尺寸为2×2,使用全0补充,步长为2。
7693
#尺寸变化:28×28×6->14×14×6
7794
with tf.name_scope('layer2-pool1'):
7895
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[
79-
1, 2, 2, 1], padding='SAME')
96+
1, 2, 2, 1], padding='SAME')
8097

8198
#第三层:卷积层,过滤器的尺寸为5×5,深度为16,不使用全0补充,步长为1。
8299
#尺寸变化:14×14×6->10×10×16
@@ -86,14 +103,14 @@ def inference(input_tensor, train, regularizer):
86103
conv2_biases = tf.get_variable(
87104
'bias', [16], initializer=tf.constant_initializer(0.0))
88105
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[
89-
1, 1, 1, 1], padding='VALID')
106+
1, 1, 1, 1], padding='VALID')
90107
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
91108

92109
#第四层:池化层,过滤器的尺寸为2×2,使用全0补充,步长为2。
93110
#尺寸变化:10×10×6->5×5×16
94111
with tf.variable_scope('layer4-pool2'):
95112
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[
96-
1, 2, 2, 1], padding='SAME')
113+
1, 2, 2, 1], padding='SAME')
97114

98115
#将第四层池化层的输出转化为第五层全连接层的输入格式。第四层的输出为5×5×16的矩阵,然而第五层全连接层需要的输入格式
99116
#为向量,所以我们需要把代表每张图片的尺寸为5×5×16的矩阵拉直成一个长度为5×5×16的向量。
@@ -154,6 +171,7 @@ def inference(input_tensor, train, regularizer):
154171
logit = tf.matmul(fc2, fc3_weights) + fc3_biases
155172
return logit
156173

174+
157175
def start_training(train_data, train_label, test_data, test_label):
158176

159177
#打乱训练数据及测试数据 np.arange()返回一个有终点和起点的固定步长的排列,
@@ -183,13 +201,14 @@ def start_training(train_data, train_label, test_data, test_label):
183201

184202
global regulary, learning_rate, beta1, beta2
185203
regularizer = tf.contrib.layers.l2_regularizer(regulary)
186-
# y = inference(x, False, regularizer)
187-
y = inference(x, True, regularizer)
204+
y = inference(x, False, regularizer)
205+
# y = inference(x, True, regularizer)
188206
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
189207
logits=y, labels=y_)
190208
cross_entropy_mean = tf.reduce_mean(cross_entropy)
191209
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
192-
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1, beta2=beta2).minimize(loss)
210+
train_op = tf.train.AdamOptimizer(
211+
learning_rate=learning_rate, beta1=beta1, beta2=beta2).minimize(loss)
193212
correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
194213
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
195214
saver = tf.train.Saver()
@@ -210,7 +229,7 @@ def start_training(train_data, train_label, test_data, test_label):
210229
train_loss, train_acc, batch_num = 0, 0, 0
211230
for train_data_batch, train_label_batch in get_batch(train_data, train_label, batch_size):
212231
_, err, acc = sess.run([train_op, loss, accuracy], feed_dict={
213-
x: train_data_batch, y_: train_label_batch})
232+
x: train_data_batch, y_: train_label_batch})
214233
train_loss += err
215234
train_acc += acc
216235
batch_num += 1
@@ -242,7 +261,7 @@ def start_training(train_data, train_label, test_data, test_label):
242261
max_test_acc = avg_acc
243262
if min_test_loss > avg_loss:
244263
min_test_loss = avg_loss
245-
print('【测试-第%d轮共%d批,每批%d个元数据】' %(i+1,batch_num,batch_size))
264+
print('【测试-第%d轮共%d批,每批%d个元数据】' % (i+1, batch_num, batch_size))
246265
print("test loss:", avg_loss)
247266
print("test acc:", avg_acc)
248267

@@ -259,6 +278,9 @@ def start_training(train_data, train_label, test_data, test_label):
259278
normalized_test_data = f['data'][()]
260279
rand_test_typical_data = f['label'][()]
261280

281+
print(normalized_train_data.shape)
282+
print(normalized_test_data.shape)
283+
262284
start_time = time.time()
263285

264286
start_training(normalized_train_data, rand_train_typical_data,
@@ -284,13 +306,12 @@ def start_training(train_data, train_label, test_data, test_label):
284306
f.write('test_avg_acc:' + str(test_avg_acc) + '\r\n')
285307
f.write('max_test_acc:' + str(max_test_acc) + '\r\n')
286308
f.write('min_test_loss:' + str(min_test_loss) + '\r\n')
287-
f.write('time:' +str (end_time - start_time) + 's\r\n')
309+
f.write('time:' + str(end_time - start_time) + 's\r\n')
288310
f.close()
289311

290-
acc_len = len(test_acc_array)
291-
plt.plot([i for i in range(acc_len)], test_acc_array)
292-
plt.show()
293-
294-
plt.plot([i for i in range(acc_len)], test_loss_array)
295-
plt.show()
312+
# acc_len = len(test_acc_array)
313+
# plt.plot([i for i in range(acc_len)], test_acc_array)
314+
# plt.show()
296315

316+
# plt.plot([i for i in range(acc_len)], test_loss_array)
317+
# plt.show()

road_data/data.h5

-7.85 MB
Binary file not shown.

0 commit comments

Comments
 (0)