Skip to content

Commit 282b53b

Browse files
Reorg
1 parent 0149374 commit 282b53b

File tree

10 files changed

+37
-234
lines changed

10 files changed

+37
-234
lines changed

Diff for: CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ set(CMAKE_CXX_FLAGS_RELEASE "-g -O3")
5858
add_subdirectory(test)
5959
add_subdirectory(examples/cifar)
6060
add_subdirectory(examples/rnn)
61+
add_subdirectory(examples/mnist)
6162

6263
#add_subdirectory(third_party/pybind11)
6364
#pybind11_add_module(NeuralNetwork python/bindings.cpp)

Diff for: README.md

+5-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,10 @@ Strenghts
1919

2020
Examples
2121
--------
22-
####A deep neural network for MNIST
22+
The repo contains examples for deep neural networks, CNNs and RNNs. The
23+
examples can be found in the folder `examples`
24+
25+
#### A deep neural network for MNIST
2326
----------------------------------------
2427
The standard MNIST example is developed in [test/mnist.cpp](test/mnist.cpp). Running this file, prints the following information to `stdout`
2528
```shell
@@ -122,3 +125,4 @@ of the results is checked with dedicated tests. Similary, I test that the GPU
122125
implementation is never slower than the CPU version.
123126

124127

128+

Diff for: examples/mnist/CMakeLists.txt

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
set(${LIBRARY_TARGET_NAME}_TEST
2+
mnist_lenet.cpp
3+
mnist.cpp
4+
)
5+
foreach( testsourcefile ${${LIBRARY_TARGET_NAME}_TEST} )
6+
#string( REPLACE "test/cifar/" "" testname ${testsourcefile})
7+
string( REPLACE ".cpp" "" testname ${testsourcefile} )
8+
add_executable( ${testname} ${testsourcefile} )
9+
target_link_libraries(${testname} GPU_NN ${CUDA_LIBRARIES})
10+
target_link_libraries(${testname} GPU_NN cuda_obj)
11+
add_test(${testname} ${testname})
12+
endforeach( testsourcefile ${${LIBRARY_TARGET_NAME}_TEST})

Diff for: test/mnist.cpp renamed to examples/mnist/mnist.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
#include <stdlib.h>
22
//#include <type_traits>
33
#include <memory>
4-
#include "../include/neural_network.h"
5-
#include "../include/metrics/missclassified.hpp"
6-
#include "../third_party/mnist/include/mnist/get_data.h"
4+
#include "../../include/neural_network.h"
5+
#include "../../include/metrics/missclassified.hpp"
6+
#include "../../third_party/mnist/include/mnist/get_data.h"
77

88
typedef std::shared_ptr<Layer> s_Layer;
99
using std::make_shared;

Diff for: test/mnist_lenet.cpp renamed to examples/mnist/mnist_lenet.cpp

+7-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
#include <stdlib.h>
22
//#include <type_traits>
3-
#include "../include/neural_network.h"
4-
#include "../third_party/mnist/include/mnist/get_data.h"
3+
#include "../../include/neural_network.h"
4+
#include "../../third_party/mnist/include/mnist/get_data.h"
5+
#include "../../include/metrics/missclassified.hpp"
56

67
typedef std::shared_ptr<Layer> s_Layer;
78
using std::make_shared;
@@ -26,6 +27,9 @@ int main(int argc, char** argv) {
2627
std::shared_ptr<GradientDescent> sgd =
2728
std::make_shared<Momentum>(LearningRate(0.001), MomentumRate(0.90),
2829
WeightDecay(0.004));
30+
std::vector<Metric*> metrics;
31+
Metric* val = new Missclassified(&n1);
32+
metrics.push_back(val);
2933
n1.train(data.get_x_train(), data.get_y_train(), sgd, Epochs(30),
30-
Patience(10), BatchSize(32));
34+
Patience(10), BatchSize(32), metrics);
3135
}

Diff for: src/initalization/glorot.cpp

+5-2
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
11
#include "../../include/initalization/glorot.hpp"
2+
#include <random>
23

34
Matrix Glorot::weights(int rows, int cols) const {
4-
srand((unsigned int)0);
5-
Matrix mat = Matrix::Random(rows, cols);
5+
std::mt19937 gen;
6+
gen.seed(0);
7+
std::uniform_real_distribution<dtype> dis(-1.0, 1.0);
8+
Matrix mat = Matrix::NullaryExpr(rows, cols, [&]() { return dis(gen); });
69
dtype glorot_scale = std::sqrt(6.) / std::sqrt(rows + cols);
710
mat *= glorot_scale;
811
return mat;

Diff for: src/layer/convolution.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -103,15 +103,16 @@ void Convolution::initialize_grad() {
103103
}
104104

105105
void Convolution::initialize_weight(Init* init) {
106+
// this is the same init as keras uses for conv, overwrite init, not good
106107
int receptive_field = _kernel.first() * _kernel.second();
107108
int fan_in = receptive_field * _channels.get();
108109
int fan_out = receptive_field * _filters.get();
109110
dtype glorot_scale = std::sqrt(6.) / std::sqrt(fan_in + fan_out);
110111
int cols = _filters.get();
111112
int rows = _channels.get() * _kernel.first() * _kernel.second();
113+
srand((unsigned int)0);
112114
Matrix weights = Matrix::Random(rows, cols);
113115
weights *= glorot_scale;
114-
// Matrix weights = init->weights(rows, cols);
115116
parameters.push_back(std::make_shared<Storage>(weights));
116117
}
117118

Diff for: test/CMakeLists.txt

+1-4
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,9 @@
11

22
set(${LIBRARY_TARGET_NAME}_TEST
33
#normalization.cpp
4-
#mnist_lenet.cpp
5-
#mnist.cpp
64
lstm.cpp
75
inits.cpp
8-
#im2col_layer.cpp
9-
#convolution.cpp
6+
convolution.cpp
107
softmax.cpp
118
dense.cpp
129
cross_entropy.cpp

Diff for: test/convolution.cpp

+1-4
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ TEST_CASE("NeuralNetwork cpu", "[cpu]") {
5252
// l2);
5353
Matrix _input = Matrix::Random(
5454
image.first() * image.second() * channels.get(), batches);
55-
print_Matrix_to_stdout(_input, "input_cpu");
5655
std::shared_ptr<Storage> input = std::make_shared<Storage>(_input);
5756
Matrix _out =
5857
Matrix::Zero(out_height * out_width, channels.get() * kernel.first() *
@@ -63,7 +62,6 @@ TEST_CASE("NeuralNetwork cpu", "[cpu]") {
6362
std::shared_ptr<Storage> conv_out = std::make_shared<Storage>(_out2);
6463
inp1->forward_cpu(input, output_cpu, "train");
6564
l2->forward_cpu(output_cpu, conv_out, "train");
66-
print_Matrix_to_stdout(output_cpu->return_data_const(), "cpu");
6765
}
6866

6967
TEST_CASE("NeuralNetwork gpu", "[gpu]") {
@@ -86,7 +84,6 @@ TEST_CASE("NeuralNetwork gpu", "[gpu]") {
8684
make_shared<Im2ColLayer>(std::dynamic_pointer_cast<Convolution>(l2));
8785
Matrix _input = Matrix::Random(
8886
image.first() * image.second() * channels.get(), batches);
89-
print_Matrix_to_stdout(_input, "input_gpu");
9087
std::shared_ptr<Storage> input = std::make_shared<Storage>(_input);
9188
Matrix _out =
9289
Matrix::Zero(out_height * out_width, channels.get() * kernel.first() *
@@ -97,7 +94,6 @@ TEST_CASE("NeuralNetwork gpu", "[gpu]") {
9794
std::shared_ptr<Storage> conv_out = std::make_shared<Storage>(_out2);
9895
inp1->forward_gpu(input, output_cpu, "train");
9996
l2->forward_gpu(output_cpu, conv_out, "train");
100-
print_Matrix_to_stdout(output_cpu->return_data_const(), "cpu");
10197
}
10298

10399
TEST_CASE("NeuralNetwork forward equivalence", "[forward equivalance]") {
@@ -118,6 +114,7 @@ TEST_CASE("NeuralNetwork forward equivalence", "[forward equivalance]") {
118114
image, channels, init);
119115
s_Layer im2col_cpu = make_shared<Im2ColLayer>(
120116
std::dynamic_pointer_cast<Convolution>(conv_cpu));
117+
//Init* init_gpu = new Glorot();
121118
s_Layer conv_gpu = make_shared<Convolution>(kernel, pad, stride, filters,
122119
image, channels, init);
123120
s_Layer im2col_gpu = make_shared<Im2ColLayer>(

Diff for: test/im2col_layer.cpp

-216
This file was deleted.

0 commit comments

Comments
 (0)