From c0e687b939904b60232f3f01f7acbba3164dfbd2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 20 Dec 2016 17:32:57 +0800 Subject: [PATCH 01/22] Refine Code --- demo/mnist/api_train.py | 12 ++++++++++++ demo/mnist/simple_mnist_network.py | 16 ++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 demo/mnist/api_train.py create mode 100644 demo/mnist/simple_mnist_network.py diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py new file mode 100644 index 00000000000000..6abb5d4e562ee9 --- /dev/null +++ b/demo/mnist/api_train.py @@ -0,0 +1,12 @@ +import py_paddle.swig_paddle as api +from paddle.trainer.config_parser import parse_config + + +def main(): + api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores + config = parse_config('simple_mnist_network.py', '') + m = api.GradientMachine.createFromConfigProto(config.model_config) + + +if __name__ == '__main__': + main() diff --git a/demo/mnist/simple_mnist_network.py b/demo/mnist/simple_mnist_network.py new file mode 100644 index 00000000000000..41f4e51657d35b --- /dev/null +++ b/demo/mnist/simple_mnist_network.py @@ -0,0 +1,16 @@ +from paddle.trainer_config_helpers import * + +settings(learning_rate=1e-4, learning_method=AdamOptimizer(), batch_size=1000) + +imgs = data_layer(name='pixel', size=784) + +hidden1 = fc_layer(input=imgs, size=200) +hidden2 = fc_layer(input=hidden1, size=200) + +inference = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) + +cost = classification_cost( + input=inference, label=data_layer( + name='label', size=10)) + +outputs(cost) From 8b4cbcfc1847c50228c151a485755202912e7df2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 20 Dec 2016 22:01:28 +0800 Subject: [PATCH 02/22] Start doing mnist_train_api --- demo/mnist/api_train.py | 31 ++++++++++++++++++++++++++--- paddle/api/CMakeLists.txt | 1 + paddle/api/Paddle.swig | 3 ++- paddle/api/PaddleAPI.h | 20 +++++++++++++++++++ paddle/api/PaddleAPIPrivate.h | 27 +++++++++++++++++++++++-- paddle/api/Parameter.cpp | 16 +-------------- paddle/api/ParameterUpdater.cpp | 35 +++++++++++++++++++++++++++++++++ 7 files changed, 112 insertions(+), 21 deletions(-) create mode 100644 paddle/api/ParameterUpdater.cpp diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 6abb5d4e562ee9..5d4ef90f10d3d4 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -1,11 +1,36 @@ import py_paddle.swig_paddle as api -from paddle.trainer.config_parser import parse_config +import paddle.trainer.config_parser +import numpy as np + + +def init_parameter(network): + assert isinstance(network, api.GradientMachine) + for each_param in network.getParameters(): + assert isinstance(each_param, api.Parameter) + array = each_param.getBuf(api.PARAMETER_VALUE).toNumpyArrayInplace() + assert isinstance(array, np.ndarray) + for i in xrange(len(array)): + array[i] = np.random.uniform(-1.0, 1.0) def main(): api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores - config = parse_config('simple_mnist_network.py', '') - m = api.GradientMachine.createFromConfigProto(config.model_config) + config = paddle.trainer.config_parser.parse_config( + 'simple_mnist_network.py', '') + + opt_config = api.OptimizationConfig.createFromProto(config.opt_config) + _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) + enable_types = _temp_optimizer_.getParameterTypes() + + m = api.GradientMachine.createFromConfigProto( + config.model_config, api.CREATE_MODE_NORMAL, enable_types) + assert isinstance(m, api.GradientMachine) + init_parameter(network=m) + + updater = api.ParameterUpdater.createLocalUpdater(opt_config) + assert isinstance(updater, api.ParameterUpdater) + updater.init(m) + updater.startPass() if __name__ == '__main__': diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 6ad1d79e59b11b..39fe43556595cc 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -5,6 +5,7 @@ set(API_SOURCES Matrix.cpp Parameter.cpp ParameterOptimizer.cpp + ParameterUpdater.cpp SequenceGenerator.cpp Trainer.cpp Util.cpp diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index 9194a6371be9e0..b0fa8beb166b34 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -174,6 +174,7 @@ namespace std { %newobject Parameter::getConfig; %newobject ParameterOptimizer::create; %newobject ParameterOptimizer::needSpecialTraversal; +%newobject ParameterUpdater::createLocalUpdater; %feature("director") UpdateCallback; %feature("autodoc", 1); // To generate method stub, for code hint in ide @@ -193,4 +194,4 @@ namespace std { %ignore OptimizationConfigPrivate; %ignore ParameterTraverseCallbackPrivate; %include "utils/GlobalConstants.h" -%include "api/PaddleAPI.h" \ No newline at end of file +%include "api/PaddleAPI.h" diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 84a66719c33678..bd413eb1e9d9a9 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -519,6 +519,7 @@ class OptimizationConfig { friend class TrainerConfig; friend class ParameterOptimizer; + friend class ParameterUpdater; friend class Trainer; }; @@ -557,6 +558,7 @@ class Parameter { ParameterPrivate* m; friend class UpdateCallbackWrapper; friend class GradientMachine; + friend class ParameterUpdater; }; struct ModelConfigPrivate; @@ -772,6 +774,24 @@ class GradientMachine { // Not to use c++ 11 init-list, so we use static var as function default arg. static std::vector defaultParamTypes; friend class Trainer; + friend class ParameterUpdater; +}; + +struct ParameterUpdaterPrivate; +class ParameterUpdater { +private: + ParameterUpdater(); + +public: + static ParameterUpdater* createLocalUpdater(OptimizationConfig* config); + ~ParameterUpdater(); + + void init(const GradientMachine& gm); + + void startPass(); + +private: + ParameterUpdaterPrivate* m; }; struct TrainerPrivate; diff --git a/paddle/api/PaddleAPIPrivate.h b/paddle/api/PaddleAPIPrivate.h index d2b56fc41c8aad..905668a62f24fb 100644 --- a/paddle/api/PaddleAPIPrivate.h +++ b/paddle/api/PaddleAPIPrivate.h @@ -11,11 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - +#pragma once +#include +#include "PaddleAPI.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" #include "paddle/trainer/TrainerConfigHelper.h" -#pragma once +#include "paddle/parameter/ParameterUpdaterBase.h" struct GradientMachinePrivate { std::shared_ptr machine; @@ -65,3 +67,24 @@ struct ArgumentsPrivate { return *(std::shared_ptr*)(rawPtr); } }; + +struct ParameterUpdaterPrivate { + std::unique_ptr updater; +}; + +struct ParameterPrivate { + std::shared_ptr sharedPtr; + paddle::Parameter* rawPtr; // rawPtr only used in ParameterUpdater, + // in other situation sharedPtr should + // contains value. + + ParameterPrivate() : sharedPtr(nullptr), rawPtr(nullptr) {} + + paddle::Parameter* getPtr() { + if (sharedPtr) { + return sharedPtr.get(); + } else { + return rawPtr; + } + } +}; diff --git a/paddle/api/Parameter.cpp b/paddle/api/Parameter.cpp index 4eed00a84a695f..41cf50043cc2b0 100644 --- a/paddle/api/Parameter.cpp +++ b/paddle/api/Parameter.cpp @@ -14,21 +14,7 @@ limitations under the License. */ #include "paddle/parameter/Parameter.h" #include "PaddleAPI.h" - -struct ParameterPrivate { - std::shared_ptr sharedPtr; - paddle::Parameter* rawPtr; - - ParameterPrivate() : sharedPtr(nullptr), rawPtr(nullptr) {} - - paddle::Parameter* getPtr() { - if (sharedPtr) { - return sharedPtr.get(); - } else { - return rawPtr; - } - } -}; +#include "PaddleAPIPrivate.h" Parameter::Parameter() : m(new ParameterPrivate()) {} diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp new file mode 100644 index 00000000000000..af5b746a7cd082 --- /dev/null +++ b/paddle/api/ParameterUpdater.cpp @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "PaddleAPI.h" + +#include "PaddleAPIPrivate.h" +#include "paddle/trainer/ThreadParameterUpdater.h" + +ParameterUpdater::ParameterUpdater() : m(new ParameterUpdaterPrivate()) {} + +ParameterUpdater *ParameterUpdater::createLocalUpdater( + OptimizationConfig *config) { + auto param = new ParameterUpdater(); + param->m->updater.reset(new paddle::SgdThreadUpdater(config->m->getConfig())); + return param; +} + +ParameterUpdater::~ParameterUpdater() { delete m; } + +void ParameterUpdater::init(const GradientMachine &gm) { + m->updater->init(gm.m->machine->getParameters()); +} + +void ParameterUpdater::startPass() { m->updater->startPass(); } From 025e3e94d2b216cc278de103cbef27b851274bf5 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 20 Dec 2016 23:00:34 +0800 Subject: [PATCH 03/22] Add GradientMachine::start/finish to API --- demo/mnist/api_train.py | 7 ++++++- paddle/api/GradientMachine.cpp | 4 ++++ paddle/api/PaddleAPI.h | 9 +++++++++ paddle/api/ParameterUpdater.cpp | 2 ++ 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 5d4ef90f10d3d4..b061cfb2b8f1fa 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -30,7 +30,12 @@ def main(): updater = api.ParameterUpdater.createLocalUpdater(opt_config) assert isinstance(updater, api.ParameterUpdater) updater.init(m) - updater.startPass() + m.start() + + for _ in xrange(100): + updater.startPass() + + m.finish() if __name__ == '__main__': diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index 297eaa19bb9981..2cece2109795a9 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -64,6 +64,10 @@ GradientMachine* GradientMachine::createByModelConfig( return GradientMachine::createFromPaddleModelPtr(confPtr, mode, types); } +void GradientMachine::start() { m->machine->start(); } + +void GradientMachine::finish() { m->machine->finish(); } + void GradientMachine::forward(const Arguments& inArgs, Arguments* outArgs, PassType passType) { diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index bd413eb1e9d9a9..c074325091dee9 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -716,6 +716,13 @@ class GradientMachine { GradientMatchineCreateMode mode = CREATE_MODE_NORMAL, const std::vector& parameterTypes = defaultParamTypes); + /** + * @brief finish + */ + void finish(); + + void start(); + /** * The forward stage of GradientMachine. * @@ -790,6 +797,8 @@ class ParameterUpdater { void startPass(); + void finishPass(); + private: ParameterUpdaterPrivate* m; }; diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index af5b746a7cd082..3b626c05071393 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -33,3 +33,5 @@ void ParameterUpdater::init(const GradientMachine &gm) { } void ParameterUpdater::startPass() { m->updater->startPass(); } + +void ParameterUpdater::finishPass() {} From 27d87db6a0f937a7fa22b03e3d18844f894698e1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 12:54:11 +0800 Subject: [PATCH 04/22] Wait for reading data. --- demo/mnist/api_train.py | 2 ++ paddle/api/ParameterUpdater.cpp | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index b061cfb2b8f1fa..59043ce6c42085 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -35,6 +35,8 @@ def main(): for _ in xrange(100): updater.startPass() + updater.finishPass() + m.finish() diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index 3b626c05071393..4edec78b4a3d42 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -34,4 +34,4 @@ void ParameterUpdater::init(const GradientMachine &gm) { void ParameterUpdater::startPass() { m->updater->startPass(); } -void ParameterUpdater::finishPass() {} +void ParameterUpdater::finishPass() { m->updater->finishPass(); } From 9f5e742b6d4018cf5022a6718d5913f2459cf95e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 12:57:19 +0800 Subject: [PATCH 05/22] A tiny fix in PyDataProvider2 * hidden decorator kwargs in DataProvider.__init__ * also add unit test for this. --- paddle/gserver/tests/test_PyDataProvider2.py | 2 +- python/paddle/trainer/PyDataProvider2.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index f7b540013e76f0..2e6225519f4681 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -17,7 +17,7 @@ from paddle.trainer.PyDataProvider2 import * -@provider(input_types=[dense_vector(200, seq_type=SequenceType.NO_SEQUENCE)]) +@provider(slots=[dense_vector(200, seq_type=SequenceType.NO_SEQUENCE)]) def test_dense_no_seq(setting, filename): for i in xrange(200): yield [(float(j - 100) * float(i + 1)) / 200.0 for j in xrange(200)] diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index de266bb5d3d07e..5ca4bcbca6906a 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -232,7 +232,7 @@ def provider(input_types=None, check=False, check_fail_continue=False, init_hook=None, - **kwargs): + **outter_kwargs): """ Provider decorator. Use it to make a function into PyDataProvider2 object. In this function, user only need to get each sample for some train/test @@ -318,10 +318,10 @@ def __init__(self, file_list, **kwargs): self.logger = logging.getLogger("") self.logger.setLevel(logging.INFO) self.input_types = None - if 'slots' in kwargs: + if 'slots' in outter_kwargs: self.logger.warning('setting slots value is deprecated, ' 'please use input_types instead.') - self.slots = kwargs['slots'] + self.slots = outter_kwargs['slots'] self.slots = input_types self.should_shuffle = should_shuffle From 5f6c4af3a544b828fe7c71c98164f9e8b6994f5b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 13:27:32 +0800 Subject: [PATCH 06/22] Try to read data in mnist --- demo/mnist/api_train.py | 29 +++++++++++++++++++++++++++++ demo/mnist/mnist_provider.py | 28 +++------------------------- demo/mnist/mnist_util.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 25 deletions(-) create mode 100644 demo/mnist/mnist_util.py diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 59043ce6c42085..e508af7a0c571c 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -1,6 +1,9 @@ import py_paddle.swig_paddle as api +from py_paddle import DataProviderConverter +import paddle.trainer.PyDataProvider2 as dp import paddle.trainer.config_parser import numpy as np +from mnist_util import read_from_mnist def init_parameter(network): @@ -13,6 +16,22 @@ def init_parameter(network): array[i] = np.random.uniform(-1.0, 1.0) +def generator_to_batch(generator, batch_size): + ret_val = list() + for each_item in generator: + ret_val.append(each_item) + if len(ret_val) == batch_size: + yield ret_val + ret_val = list() + if len(ret_val) != 0: + yield ret_val + + +def input_order_converter(generator): + for each_item in generator: + yield each_item['pixel'], each_item['label'] + + def main(): api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores config = paddle.trainer.config_parser.parse_config( @@ -30,10 +49,20 @@ def main(): updater = api.ParameterUpdater.createLocalUpdater(opt_config) assert isinstance(updater, api.ParameterUpdater) updater.init(m) + + converter = DataProviderConverter( + input_types=[dp.dense_vector(784), dp.integer_value(10)]) + + train_file = './data/raw_data/train' + m.start() for _ in xrange(100): updater.startPass() + train_data_generator = input_order_converter( + read_from_mnist(train_file)) + for data_batch in generator_to_batch(train_data_generator, 128): + inArgs = converter(data_batch) updater.finishPass() diff --git a/demo/mnist/mnist_provider.py b/demo/mnist/mnist_provider.py index 4635833d36b9f2..888cfef1e7e3e1 100644 --- a/demo/mnist/mnist_provider.py +++ b/demo/mnist/mnist_provider.py @@ -1,5 +1,5 @@ from paddle.trainer.PyDataProvider2 import * -import numpy +from mnist_util import read_from_mnist # Define a py data provider @@ -8,27 +8,5 @@ 'label': integer_value(10)}, cache=CacheType.CACHE_PASS_IN_MEM) def process(settings, filename): # settings is not used currently. - imgf = filename + "-images-idx3-ubyte" - labelf = filename + "-labels-idx1-ubyte" - f = open(imgf, "rb") - l = open(labelf, "rb") - - f.read(16) - l.read(8) - - # Define number of samples for train/test - if "train" in filename: - n = 60000 - else: - n = 10000 - - images = numpy.fromfile( - f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') - images = images / 255.0 * 2.0 - 1.0 - labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") - - for i in xrange(n): - yield {"pixel": images[i, :], 'label': labels[i]} - - f.close() - l.close() + for each in read_from_mnist(filename): + yield each diff --git a/demo/mnist/mnist_util.py b/demo/mnist/mnist_util.py new file mode 100644 index 00000000000000..3fd88ae7edc821 --- /dev/null +++ b/demo/mnist/mnist_util.py @@ -0,0 +1,30 @@ +import numpy + +__all__ = ['read_from_mnist'] + + +def read_from_mnist(filename): + imgf = filename + "-images-idx3-ubyte" + labelf = filename + "-labels-idx1-ubyte" + f = open(imgf, "rb") + l = open(labelf, "rb") + + f.read(16) + l.read(8) + + # Define number of samples for train/test + if "train" in filename: + n = 60000 + else: + n = 10000 + + images = numpy.fromfile( + f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') + images = images / 255.0 * 2.0 - 1.0 + labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") + + for i in xrange(n): + yield {"pixel": images[i, :], 'label': labels[i]} + + f.close() + l.close() From 36d1e6178c4e6d563cf1be644d1a828b577b7f28 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 13:38:31 +0800 Subject: [PATCH 07/22] Use numpy in DenseScanner. --- paddle/py_paddle/dataprovider_converter.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index edcefba6a854df..981d10afda2671 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -15,6 +15,7 @@ import paddle.trainer.PyDataProvider2 as dp2 import collections import swig_paddle +import numpy __all__ = ['DataProviderConverter'] @@ -35,18 +36,18 @@ def finish_scan(self, argument): class DenseScanner(IScanner): def __init__(self, input_type, pos): IScanner.__init__(self, input_type, pos) - self.__mat__ = [] - self.__height__ = 0 + self.__mat__ = None def scan(self, dat): - self.__mat__.extend(dat) - self.__height__ += 1 + if self.__mat__ is None: + self.__mat__ = numpy.array([dat], dtype='float32') + else: + self.__mat__ = numpy.append(self.__mat__, [dat], axis=0) def finish_scan(self, argument): assert isinstance(argument, swig_paddle.Arguments) assert isinstance(self.input_type, dp2.InputType) - m = swig_paddle.Matrix.createDense(self.__mat__, self.__height__, - self.input_type.dim, False) + m = swig_paddle.Matrix.createDenseFromNumpy(self.__mat__, True, False) argument.setSlotValue(self.pos, m) From 20249e8e65aca17abaa9bbee9ab660e3573e21cf Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 13:55:44 +0800 Subject: [PATCH 08/22] Try expose ParamUpdater::update --- demo/mnist/api_train.py | 3 +-- paddle/api/PaddleAPI.h | 6 ++++++ paddle/api/ParameterUpdater.cpp | 13 +++++++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index e508af7a0c571c..ef8b20a48dc607 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -45,7 +45,6 @@ def main(): config.model_config, api.CREATE_MODE_NORMAL, enable_types) assert isinstance(m, api.GradientMachine) init_parameter(network=m) - updater = api.ParameterUpdater.createLocalUpdater(opt_config) assert isinstance(updater, api.ParameterUpdater) updater.init(m) @@ -62,7 +61,7 @@ def main(): train_data_generator = input_order_converter( read_from_mnist(train_file)) for data_batch in generator_to_batch(train_data_generator, 128): - inArgs = converter(data_batch) + trainRole = updater.startBatch(len(data_batch)) updater.finishPass() diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index c074325091dee9..165997ba3499f4 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -799,6 +799,12 @@ class ParameterUpdater { void finishPass(); + PassType startBatch(int64_t batchSize); + + void finishBatch(float cost); + + void update(Parameter* param); + private: ParameterUpdaterPrivate* m; }; diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index 4edec78b4a3d42..e5d07b81782bf5 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -35,3 +35,16 @@ void ParameterUpdater::init(const GradientMachine &gm) { void ParameterUpdater::startPass() { m->updater->startPass(); } void ParameterUpdater::finishPass() { m->updater->finishPass(); } + +PassType ParameterUpdater::startBatch(int64_t batchSize) { + return m->updater->startBatch(batchSize); +} + +void ParameterUpdater::finishBatch(float cost) { + m->updater->finishBatch(cost); +} + +void ParameterUpdater::update(Parameter *param) { + auto paddleParam = param->m->getPtr(); + m->updater->update(paddleParam); +} From 05ab22c332e615f3c81f4d4b2c9b47f71229c71c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 14:22:31 +0800 Subject: [PATCH 09/22] A simplest train file for mnist added. --- demo/mnist/api_train.py | 16 +++++++++++++++- paddle/api/PaddleAPI.h | 2 +- paddle/api/ParameterUpdater.cpp | 4 ++-- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index ef8b20a48dc607..425c5f897a9c25 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -58,11 +58,25 @@ def main(): for _ in xrange(100): updater.startPass() + outArgs = api.Arguments.createArguments(0) train_data_generator = input_order_converter( read_from_mnist(train_file)) - for data_batch in generator_to_batch(train_data_generator, 128): + for batch_id, data_batch in enumerate( + generator_to_batch(train_data_generator, 256)): trainRole = updater.startBatch(len(data_batch)) + def update_callback(param): + updater.update(param) + + m.forwardBackward( + converter(data_batch), outArgs, trainRole, update_callback) + + cost_vec = outArgs.getSlotValue(0) + cost_vec = cost_vec.copyToNumpyMat() + cost = cost_vec.sum() / len(data_batch) + print 'Batch id', batch_id, 'with cost=', cost + updater.finishBatch(cost) + updater.finishPass() m.finish() diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 165997ba3499f4..cc49e6a09d5dee 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -799,7 +799,7 @@ class ParameterUpdater { void finishPass(); - PassType startBatch(int64_t batchSize); + PassType startBatch(size_t batchSize); void finishBatch(float cost); diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index e5d07b81782bf5..fba47620249dbc 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -36,8 +36,8 @@ void ParameterUpdater::startPass() { m->updater->startPass(); } void ParameterUpdater::finishPass() { m->updater->finishPass(); } -PassType ParameterUpdater::startBatch(int64_t batchSize) { - return m->updater->startBatch(batchSize); +PassType ParameterUpdater::startBatch(size_t batchSize) { + return m->updater->startBatch((int64_t)batchSize); } void ParameterUpdater::finishBatch(float cost) { From 1f4f04427d5f34e48a0a30b9137a882a6f1b571c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 12:57:19 +0800 Subject: [PATCH 10/22] A tiny fix in PyDataProvider2 * hidden decorator kwargs in DataProvider.__init__ * also add unit test for this. --- paddle/gserver/tests/test_PyDataProvider2.py | 2 +- python/paddle/trainer/PyDataProvider2.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index f7b540013e76f0..2e6225519f4681 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -17,7 +17,7 @@ from paddle.trainer.PyDataProvider2 import * -@provider(input_types=[dense_vector(200, seq_type=SequenceType.NO_SEQUENCE)]) +@provider(slots=[dense_vector(200, seq_type=SequenceType.NO_SEQUENCE)]) def test_dense_no_seq(setting, filename): for i in xrange(200): yield [(float(j - 100) * float(i + 1)) / 200.0 for j in xrange(200)] diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index de266bb5d3d07e..c918fa78ac2f6e 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -232,7 +232,7 @@ def provider(input_types=None, check=False, check_fail_continue=False, init_hook=None, - **kwargs): + **outter_kwargs): """ Provider decorator. Use it to make a function into PyDataProvider2 object. In this function, user only need to get each sample for some train/test @@ -318,11 +318,15 @@ def __init__(self, file_list, **kwargs): self.logger = logging.getLogger("") self.logger.setLevel(logging.INFO) self.input_types = None - if 'slots' in kwargs: + if 'slots' in outter_kwargs: self.logger.warning('setting slots value is deprecated, ' 'please use input_types instead.') - self.slots = kwargs['slots'] - self.slots = input_types + self.slots = outter_kwargs['slots'] + if input_types is not None: + self.slots = input_types + + assert self.slots is not None, \ + "Data Provider's input_types must be set" self.should_shuffle = should_shuffle true_table = [1, 't', 'true', 'on'] From eaba2e2eff6c9bf1bbdff452b4be636ef0b8da9a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 21:56:37 +0800 Subject: [PATCH 11/22] Expose Evaluator API --- demo/mnist/api_train.py | 6 +++--- paddle/api/CMakeLists.txt | 10 ++++++++++ paddle/api/Evaluator.cpp | 29 +++++++++++++++++++++++++++++ paddle/api/GradientMachine.cpp | 10 ++++++++++ paddle/api/Paddle.swig | 2 ++ paddle/api/PaddleAPI.h | 27 ++++++++++++++++++++++++++- paddle/api/PaddleAPIPrivate.h | 11 +++++++++-- paddle/api/ParameterUpdater.cpp | 2 +- 8 files changed, 90 insertions(+), 7 deletions(-) create mode 100644 paddle/api/Evaluator.cpp diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 425c5f897a9c25..52cc13c5a3eaee 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -62,14 +62,14 @@ def main(): train_data_generator = input_order_converter( read_from_mnist(train_file)) for batch_id, data_batch in enumerate( - generator_to_batch(train_data_generator, 256)): + generator_to_batch(train_data_generator, 2048)): trainRole = updater.startBatch(len(data_batch)) - def update_callback(param): + def updater_callback(param): updater.update(param) m.forwardBackward( - converter(data_batch), outArgs, trainRole, update_callback) + converter(data_batch), outArgs, trainRole, updater_callback) cost_vec = outArgs.getSlotValue(0) cost_vec = cost_vec.copyToNumpyMat() diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 39fe43556595cc..a7f17e186bf6b4 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -1,6 +1,7 @@ set(API_SOURCES Arguments.cpp ConfigParser.cpp + Evaluator.cpp GradientMachine.cpp Matrix.cpp Parameter.cpp @@ -63,6 +64,15 @@ install(DIRECTORY ${PROJ_ROOT}/paddle/dist/ add_custom_target(python_api_wheel ALL DEPENDS ${PROJ_ROOT}/paddle/dist/.timestamp) +add_dependencies(python_api_wheel python_swig_sources + paddle_parameter + paddle_math + paddle_utils + paddle_gserver + paddle_pserver + paddle_trainer + paddle_api + paddle_cuda) if(WITH_TESTING) add_subdirectory(test) diff --git a/paddle/api/Evaluator.cpp b/paddle/api/Evaluator.cpp new file mode 100644 index 00000000000000..c30e09876397e3 --- /dev/null +++ b/paddle/api/Evaluator.cpp @@ -0,0 +1,29 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include +#include "PaddleAPI.h" +#include "PaddleAPIPrivate.h" + +Evaluator::Evaluator() : m(new EvaluatorPrivate()) {} +Evaluator::~Evaluator() { delete m; } + +void Evaluator::start() { m->rawPtr->start(); } + +void Evaluator::finish() { m->rawPtr->finish(); } + +std::string Evaluator::toString() { + std::ostringstream sout; + m->rawPtr->printStats(sout); + return sout.str(); +} diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index 2cece2109795a9..0d1e17529611d1 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -162,3 +162,13 @@ SequenceGenerator* GradientMachine::asSequenceGenerator( r->setBeamSize(beam_size); return r; } + +Evaluator* GradientMachine::makeEvaluator() { + auto ev = new Evaluator(); + ev->m->rawPtr = m->machine->makeEvaluator(); + return ev; +} + +void GradientMachine::eval(Evaluator* evaluator) { + m->machine->eval(evaluator->m->rawPtr); +} diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index b0fa8beb166b34..7a110a90b84fcb 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -97,6 +97,7 @@ namespace std { %rename(__setitem__) Vector::set; %rename(__len__) Vector::getSize; %rename(__call__) ParameterTraverseCallback::apply; +%rename(__repr__) Evaluator::toString; %apply (float* INPLACE_ARRAY2, int DIM1, int DIM2) { (float* data, int dim1, int dim2) @@ -167,6 +168,7 @@ namespace std { %newobject GradientMachine::asSequenceGenerator; %newobject GradientMachine::getParameter; %newobject GradientMachine::getLayerOutput; +%newobject GradientMachine::makeEvaluator; %newobject TrainerConfig::createFromTrainerConfigFile; %newobject TrainerConfig::getModelConfig; %newobject TrainerConfig::getOptimizationConfig; diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index cc49e6a09d5dee..413c3851464621 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -685,7 +685,7 @@ class ParameterOptimizer { }; class SequenceGenerator; - +class Evaluator; struct GradientMachinePrivate; class GradientMachine { private: @@ -770,6 +770,10 @@ class GradientMachine { size_t max_length = 100UL, size_t beam_size = -1UL); + Evaluator* makeEvaluator(); + + void eval(Evaluator* evaluator); + private: GradientMachinePrivate* m; @@ -809,6 +813,27 @@ class ParameterUpdater { ParameterUpdaterPrivate* m; }; +struct EvaluatorPrivate; +class Evaluator { +private: + Evaluator(); + DISABLE_COPY_AND_ASSIGN(Evaluator); + +public: + ~Evaluator(); + + void start(); + + void finish(); + + std::string toString(); + +private: + EvaluatorPrivate* m; + + friend class GradientMachine; +}; + struct TrainerPrivate; class Trainer { private: diff --git a/paddle/api/PaddleAPIPrivate.h b/paddle/api/PaddleAPIPrivate.h index 905668a62f24fb..f41352bfec7c33 100644 --- a/paddle/api/PaddleAPIPrivate.h +++ b/paddle/api/PaddleAPIPrivate.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once #include #include "PaddleAPI.h" +#include "paddle/gserver/evaluators/Evaluator.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "paddle/trainer/TrainerConfigHelper.h" - #include "paddle/parameter/ParameterUpdaterBase.h" +#include "paddle/trainer/TrainerConfigHelper.h" struct GradientMachinePrivate { std::shared_ptr machine; @@ -88,3 +88,10 @@ struct ParameterPrivate { } } }; + +struct EvaluatorPrivate { + paddle::Evaluator* rawPtr; + + EvaluatorPrivate() : rawPtr(nullptr) {} + ~EvaluatorPrivate() { delete rawPtr; } +}; diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index fba47620249dbc..91c83927628080 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -29,7 +29,7 @@ ParameterUpdater *ParameterUpdater::createLocalUpdater( ParameterUpdater::~ParameterUpdater() { delete m; } void ParameterUpdater::init(const GradientMachine &gm) { - m->updater->init(gm.m->machine->getParameters()); + m->updater->init(gm.m->machine->getNonStaticParameters()); } void ParameterUpdater::startPass() { m->updater->startPass(); } From 409a5774c475b67160ea5cdf22b489652da6bff3 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 22:45:42 +0800 Subject: [PATCH 12/22] Complete a very simple mnist demo. --- demo/mnist/api_train.py | 108 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 99 insertions(+), 9 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 52cc13c5a3eaee..c1439bd526d8e6 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -1,8 +1,17 @@ +""" +A very basic example for how to use current Raw SWIG API to train mnist network. + +Current implementation uses Raw SWIG, which means the API call is directly \ +passed to C++ side of Paddle. + +The user api could be simpler and carefully designed. +""" import py_paddle.swig_paddle as api from py_paddle import DataProviderConverter import paddle.trainer.PyDataProvider2 as dp import paddle.trainer.config_parser import numpy as np +import random from mnist_util import read_from_mnist @@ -27,6 +36,18 @@ def generator_to_batch(generator, batch_size): yield ret_val +class BatchPool(object): + def __init__(self, generator, batch_size): + self.data = list(generator) + self.batch_size = batch_size + + def __call__(self): + random.shuffle(self.data) + for offset in xrange(0, len(self.data), self.batch_size): + limit = min(offset + self.batch_size, len(self.data)) + yield self.data[offset:limit] + + def input_order_converter(generator): for each_item in generator: yield each_item['pixel'], each_item['label'] @@ -37,46 +58,115 @@ def main(): config = paddle.trainer.config_parser.parse_config( 'simple_mnist_network.py', '') + # get enable_types for each optimizer. + # enable_types = [value, gradient, momentum, etc] + # For each optimizer(SGD, Adam), GradientMachine should enable different + # buffers. opt_config = api.OptimizationConfig.createFromProto(config.opt_config) _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) enable_types = _temp_optimizer_.getParameterTypes() + # Create Simple Gradient Machine. m = api.GradientMachine.createFromConfigProto( config.model_config, api.CREATE_MODE_NORMAL, enable_types) + + # This type check is not useful. Only enable type hint in IDE. + # Such as PyCharm assert isinstance(m, api.GradientMachine) + + # Initialize Parameter by numpy. init_parameter(network=m) + + # Create Local Updater. Local means not run in cluster. + # For a cluster training, here we can change to createRemoteUpdater + # in future. updater = api.ParameterUpdater.createLocalUpdater(opt_config) assert isinstance(updater, api.ParameterUpdater) + + # Initialize ParameterUpdater. updater.init(m) + # DataProvider Converter is a utility convert Python Object to Paddle C++ + # Input. The input format is as same as Paddle's DataProvider. converter = DataProviderConverter( input_types=[dp.dense_vector(784), dp.integer_value(10)]) train_file = './data/raw_data/train' + test_file = './data/raw_data/t10k' + # start gradient machine. + # the gradient machine must be started before invoke forward/backward. + # not just for training, but also for inference. m.start() - for _ in xrange(100): + # evaluator can print error rate, etc. It is a C++ class. + batch_evaluator = m.makeEvaluator() + test_evaluator = m.makeEvaluator() + + # Get Train Data. + # TrainData will stored in a data pool. Currently implementation is not care + # about memory, speed. Just a very naive implementation. + train_data_generator = input_order_converter(read_from_mnist(train_file)) + train_data = BatchPool(train_data_generator, 128) + + # outArgs is Neural Network forward result. Here is not useful, just passed + # to gradient_machine.forward + outArgs = api.Arguments.createArguments(0) + + for pass_id in xrange(2): # we train 2 passes. updater.startPass() - outArgs = api.Arguments.createArguments(0) - train_data_generator = input_order_converter( - read_from_mnist(train_file)) - for batch_id, data_batch in enumerate( - generator_to_batch(train_data_generator, 2048)): - trainRole = updater.startBatch(len(data_batch)) + for batch_id, data_batch in enumerate(train_data()): + # data_batch is input images. + # here, for online learning, we could get data_batch from network. + + # Start update one batch. + pass_type = updater.startBatch(len(data_batch)) + + # Start BatchEvaluator. + # batch_evaluator can be used between start/finish. + batch_evaluator.start() + + # A callback when backward. + # It is used for updating weight values vy calculated Gradient. def updater_callback(param): updater.update(param) + # forwardBackward is a shortcut for forward and backward. + # It is sometimes faster than invoke forward/backward separately, + # because in GradientMachine, it may be async. m.forwardBackward( - converter(data_batch), outArgs, trainRole, updater_callback) + converter(data_batch), outArgs, pass_type, updater_callback) + # Get cost. We use numpy to calculate total cost for this batch. cost_vec = outArgs.getSlotValue(0) cost_vec = cost_vec.copyToNumpyMat() cost = cost_vec.sum() / len(data_batch) - print 'Batch id', batch_id, 'with cost=', cost + + # Make evaluator works. + m.eval(batch_evaluator) + + # Print logs. + print 'Pass id', pass_id, 'Batch id', batch_id, 'with cost=', \ + cost, batch_evaluator + + batch_evaluator.finish() + # Finish batch. + # * will clear gradient. + # * ensure all values should be updated. updater.finishBatch(cost) + # testing stage. use test data set to test current network. + test_evaluator.start() + test_data_generator = input_order_converter(read_from_mnist(test_file)) + for data_batch in generator_to_batch(test_data_generator, 128): + # in testing stage, only forward is needed. + m.forward(converter(data_batch), outArgs, api.PASS_TEST) + m.eval(test_evaluator) + + # print error rate for test data set + print 'Pass', pass_id, ' test evaluator: ', test_evaluator + test_evaluator.finish() updater.finishPass() m.finish() From 680dd92bde2e4d6c2173f47d6da3263d827050e8 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 11:31:31 +0800 Subject: [PATCH 13/22] Add AverageOptimizer, Add save parameter --- demo/mnist/api_train.py | 13 +++++++++++++ demo/mnist/simple_mnist_network.py | 7 ++++++- paddle/api/PaddleAPI.h | 6 ++++++ paddle/api/ParameterUpdater.cpp | 6 ++++++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index c1439bd526d8e6..ce75d79bebe3a8 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -157,6 +157,7 @@ def updater_callback(param): updater.finishBatch(cost) # testing stage. use test data set to test current network. + updater.apply() test_evaluator.start() test_data_generator = input_order_converter(read_from_mnist(test_file)) for data_batch in generator_to_batch(test_data_generator, 128): @@ -167,6 +168,18 @@ def updater_callback(param): # print error rate for test data set print 'Pass', pass_id, ' test evaluator: ', test_evaluator test_evaluator.finish() + updater.restore() + + updater.catchUpWith() + params = m.getParameters() + for each_param in params: + assert isinstance(each_param, api.Parameter) + value = each_param.getBuf(api.PARAMETER_VALUE) + value = value.toNumpyArrayInplace() + + # Here, we could save parameter to every where you want + print each_param.getName(), value + updater.finishPass() m.finish() diff --git a/demo/mnist/simple_mnist_network.py b/demo/mnist/simple_mnist_network.py index 41f4e51657d35b..f5d1ea169e784e 100644 --- a/demo/mnist/simple_mnist_network.py +++ b/demo/mnist/simple_mnist_network.py @@ -1,6 +1,11 @@ from paddle.trainer_config_helpers import * -settings(learning_rate=1e-4, learning_method=AdamOptimizer(), batch_size=1000) +settings( + learning_rate=1e-4, + learning_method=AdamOptimizer(), + batch_size=1000, + model_average=ModelAverage(average_window=0.5), + regularization=L2Regularization(rate=0.5)) imgs = data_layer(name='pixel', size=784) diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 413c3851464621..d94fd1e52ed036 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -809,6 +809,12 @@ class ParameterUpdater { void update(Parameter* param); + void restore(); + + void apply(); + + void catchUpWith(); + private: ParameterUpdaterPrivate* m; }; diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index 91c83927628080..7cd8ed7e390748 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -48,3 +48,9 @@ void ParameterUpdater::update(Parameter *param) { auto paddleParam = param->m->getPtr(); m->updater->update(paddleParam); } + +void ParameterUpdater::restore() { m->updater->restore(); } + +void ParameterUpdater::apply() { m->updater->apply(); } + +void ParameterUpdater::catchUpWith() { m->updater->catchUpWith(); } From 5bca268bd1f9fdc01afe52834b486119076b1e8b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 14:51:51 +0800 Subject: [PATCH 14/22] Add gitignore --- demo/mnist/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/demo/mnist/.gitignore b/demo/mnist/.gitignore index 810910fd5ca56f..8bd9837523ccf9 100644 --- a/demo/mnist/.gitignore +++ b/demo/mnist/.gitignore @@ -4,3 +4,4 @@ mnist_vgg_model plot.png train.log *pyc +.ipynb_checkpoints From 59009ba72d54cc35717dbd80d73500f11fbb7852 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 14:51:51 +0800 Subject: [PATCH 15/22] Always use copy method for numpy. * Make this demo support GPU --- demo/mnist/.gitignore | 1 + demo/mnist/api_train.py | 9 ++++----- paddle/api/Paddle.swig | 1 + paddle/api/PaddleAPI.h | 2 ++ paddle/api/Parameter.cpp | 2 ++ 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/demo/mnist/.gitignore b/demo/mnist/.gitignore index 810910fd5ca56f..8bd9837523ccf9 100644 --- a/demo/mnist/.gitignore +++ b/demo/mnist/.gitignore @@ -4,3 +4,4 @@ mnist_vgg_model plot.png train.log *pyc +.ipynb_checkpoints diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index ce75d79bebe3a8..7e653246a31777 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -19,10 +19,9 @@ def init_parameter(network): assert isinstance(network, api.GradientMachine) for each_param in network.getParameters(): assert isinstance(each_param, api.Parameter) - array = each_param.getBuf(api.PARAMETER_VALUE).toNumpyArrayInplace() - assert isinstance(array, np.ndarray) - for i in xrange(len(array)): - array[i] = np.random.uniform(-1.0, 1.0) + array_size = len(each_param) + array = np.random.uniform(-1.0, 1.0, array_size).astype('float32') + each_param.getBuf(api.PARAMETER_VALUE).copyFromNumpyArray(array) def generator_to_batch(generator, batch_size): @@ -175,7 +174,7 @@ def updater_callback(param): for each_param in params: assert isinstance(each_param, api.Parameter) value = each_param.getBuf(api.PARAMETER_VALUE) - value = value.toNumpyArrayInplace() + value = value.copyToNumpyArray() # Here, we could save parameter to every where you want print each_param.getName(), value diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index 7a110a90b84fcb..3365927f9b5993 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -96,6 +96,7 @@ namespace std { %rename(__getitem__) Vector::get; %rename(__setitem__) Vector::set; %rename(__len__) Vector::getSize; +%rename(__len__) Parameter::getSize; %rename(__call__) ParameterTraverseCallback::apply; %rename(__repr__) Evaluator::toString; diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index d94fd1e52ed036..d4b057e8a19894 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -550,6 +550,8 @@ class Parameter { ParameterConfig* getConfig(); void setValueUpdated(); + size_t getSize() const; + private: static Parameter* createFromRawPtr(void* ptr); static Parameter* createFromSharedPtr(void* ptr); diff --git a/paddle/api/Parameter.cpp b/paddle/api/Parameter.cpp index 41cf50043cc2b0..ddc00d8d1af4c5 100644 --- a/paddle/api/Parameter.cpp +++ b/paddle/api/Parameter.cpp @@ -56,3 +56,5 @@ ParameterConfig* Parameter::getConfig() { size_t Parameter::getID() const { return m->getPtr()->getID(); } void Parameter::setValueUpdated() { m->getPtr()->setValueUpdated(); } + +size_t Parameter::getSize() const { return m->getPtr()->getSize(); } From f06b64fee47c1d807a224049243d2d3dec39fc5c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 07:45:18 +0000 Subject: [PATCH 16/22] Test GPU --- demo/mnist/api_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index ce75d79bebe3a8..e5a9075c8ecc0b 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -54,7 +54,7 @@ def input_order_converter(generator): def main(): - api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores + api.initPaddle("-use_gpu=true", "-trainer_count=4") # use 4 cpu cores config = paddle.trainer.config_parser.parse_config( 'simple_mnist_network.py', '') From 5a685841317625786d4c37eb79abfd22cec995d6 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 07:57:04 +0000 Subject: [PATCH 17/22] Test on GPU --- demo/mnist/api_train.py | 17 +++++++---------- paddle/api/Vector.cpp | 2 +- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 129922c30b48e8..48ba61c47da41a 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -53,7 +53,7 @@ def input_order_converter(generator): def main(): - api.initPaddle("-use_gpu=true", "-trainer_count=4") # use 4 cpu cores + api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores config = paddle.trainer.config_parser.parse_config( 'simple_mnist_network.py', '') @@ -106,7 +106,7 @@ def main(): # TrainData will stored in a data pool. Currently implementation is not care # about memory, speed. Just a very naive implementation. train_data_generator = input_order_converter(read_from_mnist(train_file)) - train_data = BatchPool(train_data_generator, 128) + train_data = BatchPool(train_data_generator, 512) # outArgs is Neural Network forward result. Here is not useful, just passed # to gradient_machine.forward @@ -126,16 +126,13 @@ def main(): # batch_evaluator can be used between start/finish. batch_evaluator.start() - # A callback when backward. - # It is used for updating weight values vy calculated Gradient. - def updater_callback(param): - updater.update(param) - # forwardBackward is a shortcut for forward and backward. # It is sometimes faster than invoke forward/backward separately, # because in GradientMachine, it may be async. - m.forwardBackward( - converter(data_batch), outArgs, pass_type, updater_callback) + m.forwardBackward(converter(data_batch), outArgs, pass_type) + + for each_param in m.getParameters(): + updater.update(each_param) # Get cost. We use numpy to calculate total cost for this batch. cost_vec = outArgs.getSlotValue(0) @@ -159,7 +156,7 @@ def updater_callback(param): updater.apply() test_evaluator.start() test_data_generator = input_order_converter(read_from_mnist(test_file)) - for data_batch in generator_to_batch(test_data_generator, 128): + for data_batch in generator_to_batch(test_data_generator, 512): # in testing stage, only forward is needed. m.forward(converter(data_batch), outArgs, api.PASS_TEST) m.eval(test_evaluator) diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 874f2fd044e9e8..db8f005929d90f 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -253,7 +253,7 @@ void Vector::copyToNumpyArray(float** view_m_data, int* dim1) { *view_m_data = new float[*dim1]; if (auto cpuVec = dynamic_cast(m->vec.get())) { std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1)); - } else if (auto gpuVec = dynamic_cast(m->vec.get())) { + } else if (auto gpuVec = dynamic_cast(m->vec.get())) { hl_memcpy_device2host( *view_m_data, gpuVec->getData(), sizeof(float) * (*dim1)); } else { From 3a802729746468d654c1a0908a7787bc10618f94 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 16:57:12 +0800 Subject: [PATCH 18/22] Add comments. --- paddle/api/PaddleAPI.h | 50 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index d4b057e8a19894..0a273f9f6f942a 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -799,22 +799,61 @@ class ParameterUpdater { static ParameterUpdater* createLocalUpdater(OptimizationConfig* config); ~ParameterUpdater(); + /** + * @brief initialize Parameter Updater by GradientMachine. + * @param gm + */ void init(const GradientMachine& gm); + /** + * @brief begin of a training/testing of one pass. + */ void startPass(); + /** + * @brief end of a traning/testing of one pass. + */ void finishPass(); + /** + * @brief begin of a training/testing of one batch. + * @param data batch's size + * @return PassType, mostly will be training. + */ PassType startBatch(size_t batchSize); + /** + * @brief end of a traning/testing of one batch + * @param cost current batch cost. + */ void finishBatch(float cost); + /** + * @brief update a parameter (by local optimizer or by cluster pserver) + * @param param + */ void update(Parameter* param); + /** + * @brief restore the average parameter. + * @note It is only used in AverageOptimizer. Restore will get the current + * PARAMETER_VALUE back. + */ void restore(); + /** + * @brief apply. Store the average parameter. + * @note It is only used in AverageOptimizer. Apply will store the current + * PARAMETER_VALUE to buffer, calcaualte current Average Parameter, and save + * it to PARAMETER_VALUE. + */ void apply(); + /** + * @brief catchUpWith The Regularization will be delayed in many situations( + * pserver, local sparse). Catch Up means catch the regularization up, apply + * regularization to all params. + */ void catchUpWith(); private: @@ -830,10 +869,21 @@ class Evaluator { public: ~Evaluator(); + /** + * @brief begin an evaluate stage. + */ void start(); + /** + * @brief end an evaluate stage. + */ void finish(); + /** + * @brief toString will get a evaluate result. + * + * __repr__ method in python + */ std::string toString(); private: From 843b63bb84586c2b861d971865be270b60a87c56 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 21 Dec 2016 19:26:05 +0800 Subject: [PATCH 19/22] add config_parser in trainer_config_helpers to seperate trainer config --- demo/mnist/api_train.py | 28 ++++++-- python/paddle/trainer/config_parser.py | 70 ++++++++++--------- .../paddle/trainer_config_helpers/__init__.py | 1 + .../trainer_config_helpers/config_parser.py | 38 ++++++++++ 4 files changed, 98 insertions(+), 39 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/config_parser.py diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 48ba61c47da41a..8fa286b5f940f7 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -9,11 +9,29 @@ import py_paddle.swig_paddle as api from py_paddle import DataProviderConverter import paddle.trainer.PyDataProvider2 as dp -import paddle.trainer.config_parser import numpy as np import random from mnist_util import read_from_mnist +import paddle.trainer_config_helpers.config_parser as config_parser +from paddle.trainer_config_helpers import * + + +def optimizer_config(): + settings( + learning_rate=1e-4, learning_method=AdamOptimizer(), batch_size=1000) + + +def network_config(): + imgs = data_layer(name='pixel', size=784) + hidden1 = fc_layer(input=imgs, size=200) + hidden2 = fc_layer(input=hidden1, size=200) + inference = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) + cost = classification_cost( + input=inference, label=data_layer( + name='label', size=10)) + outputs(cost) + def init_parameter(network): assert isinstance(network, api.GradientMachine) @@ -54,20 +72,20 @@ def input_order_converter(generator): def main(): api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores - config = paddle.trainer.config_parser.parse_config( - 'simple_mnist_network.py', '') # get enable_types for each optimizer. # enable_types = [value, gradient, momentum, etc] # For each optimizer(SGD, Adam), GradientMachine should enable different # buffers. - opt_config = api.OptimizationConfig.createFromProto(config.opt_config) + opt_config_proto = config_parser.parse_optimizer_config(optimizer_config) + opt_config = api.OptimizationConfig.createFromProto(opt_config_proto) _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) enable_types = _temp_optimizer_.getParameterTypes() # Create Simple Gradient Machine. + model_config = config_parser.parse_network_config(network_config) m = api.GradientMachine.createFromConfigProto( - config.model_config, api.CREATE_MODE_NORMAL, enable_types) + model_config, api.CREATE_MODE_NORMAL, enable_types) # This type check is not useful. Only enable type hint in IDE. # Such as PyCharm diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 2eb7b17a0b40eb..674b5ac58b6feb 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -3416,8 +3416,35 @@ def register_parse_config_hook(f): _parse_config_hooks.add(f) -def parse_config(config_file, config_arg_str): +def update_g_config(): ''' + Update g_config after execute config_file or config_functions. + ''' + for k, v in settings.iteritems(): + if v is None: + continue + g_config.opt_config.__setattr__(k, v) + + for k, v in trainer_settings.iteritems(): + if v is None: + continue + g_config.__setattr__(k, v) + + for name in g_config.model_config.input_layer_names: + assert name in g_layer_map, \ + 'input name "%s" does not correspond to a layer name' % name + assert (g_layer_map[name].type == "data" or g_layer_map[name].type == "data_trim"), \ + 'The type of input layer "%s" is not "data"' % name + for name in g_config.model_config.output_layer_names: + assert name in g_layer_map, \ + 'input name "%s" does not correspond to a layer name' % name + return g_config + + +def parse_config(trainer_config, config_arg_str): + ''' + @param trainer_config: can be a string of config file name or a function name + with config logic @param config_arg_str: a string of the form var1=val1,var2=val2. It will be passed to config script as a dictionary CONFIG_ARGS ''' @@ -3451,45 +3478,20 @@ def parse_config(config_file, config_arg_str): g_root_submodel.is_recurrent_layer_group = False g_current_submodel = g_root_submodel - # for paddle on spark, need support non-file config. - # you can use parse_config like below: - # - # from paddle.trainer.config_parser import parse_config - # def configs(): - # #your paddle config code, which is same as config file. - # - # config = parse_config(configs, "is_predict=1") - # # then you get config proto object. - if hasattr(config_file, '__call__'): - config_file.func_globals.update( + if hasattr(trainer_config, '__call__'): + trainer_config.func_globals.update( make_config_environment("", config_args)) - config_file() + trainer_config() else: - execfile(config_file, make_config_environment(config_file, config_args)) - for k, v in settings.iteritems(): - if v is None: - continue - g_config.opt_config.__setattr__(k, v) - - for k, v in trainer_settings.iteritems(): - if v is None: - continue - g_config.__setattr__(k, v) + execfile(trainer_config, + make_config_environment(trainer_config, config_args)) - for name in g_config.model_config.input_layer_names: - assert name in g_layer_map, \ - 'input name "%s" does not correspond to a layer name' % name - assert (g_layer_map[name].type == "data" or g_layer_map[name].type == "data_trim"), \ - 'The type of input layer "%s" is not "data"' % name - for name in g_config.model_config.output_layer_names: - assert name in g_layer_map, \ - 'input name "%s" does not correspond to a layer name' % name - return g_config + return update_g_config() -def parse_config_and_serialize(config_file, config_arg_str): +def parse_config_and_serialize(trainer_config, config_arg_str): try: - config = parse_config(config_file, config_arg_str) + config = parse_config(trainer_config, config_arg_str) #logger.info(config) return config.SerializeToString() except: diff --git a/python/paddle/trainer_config_helpers/__init__.py b/python/paddle/trainer_config_helpers/__init__.py index a2335768b92b66..84ed40a036a187 100644 --- a/python/paddle/trainer_config_helpers/__init__.py +++ b/python/paddle/trainer_config_helpers/__init__.py @@ -20,6 +20,7 @@ from networks import * from optimizers import * from attrs import * +from config_parser import * # This will enable operator overload for LayerOutput import math as layer_math diff --git a/python/paddle/trainer_config_helpers/config_parser.py b/python/paddle/trainer_config_helpers/config_parser.py new file mode 100644 index 00000000000000..4b91b8d2824cd8 --- /dev/null +++ b/python/paddle/trainer_config_helpers/config_parser.py @@ -0,0 +1,38 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.trainer.config_parser as config_parser +''' +This file is a wrapper of formal config_parser. The main idea of this file is to +separete different config logic into different function, such as network configuration + and optimizer configuration. +''' + +__all__ = [ + "parse_trainer_config", "parse_network_config", "parse_optimizer_config" +] + + +def parse_trainer_config(trainer_conf, config_arg_str): + return config_parser.parse_config(trainer_conf, config_arg_str) + + +def parse_network_config(network_conf): + config = config_parser.parse_config(network_conf, '') + return config.model_config + + +def parse_optimizer_config(optimizer_conf): + config = config_parser.parse_config(optimizer_conf, '') + return config.opt_config From 763a30fdde211c047c2ba77d2d82cfa4152f0f26 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 22 Dec 2016 18:22:47 +0800 Subject: [PATCH 20/22] add config_parser_utils --- demo/mnist/api_train.py | 14 ++++--- demo/mnist/simple_mnist_network.py | 21 ---------- .../paddle/trainer_config_helpers/__init__.py | 2 +- .../config_parser_utils.py | 38 +++++++++++++++++++ 4 files changed, 48 insertions(+), 27 deletions(-) delete mode 100644 demo/mnist/simple_mnist_network.py create mode 100644 python/paddle/trainer_config_helpers/config_parser_utils.py diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 8fa286b5f940f7..924bd39a505308 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -12,14 +12,17 @@ import numpy as np import random from mnist_util import read_from_mnist - -import paddle.trainer_config_helpers.config_parser as config_parser +import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils from paddle.trainer_config_helpers import * def optimizer_config(): settings( - learning_rate=1e-4, learning_method=AdamOptimizer(), batch_size=1000) + learning_rate=1e-4, + learning_method=AdamOptimizer(), + batch_size=1000, + model_average=ModelAverage(average_window=0.5), + regularization=L2Regularization(rate=0.5)) def network_config(): @@ -77,13 +80,14 @@ def main(): # enable_types = [value, gradient, momentum, etc] # For each optimizer(SGD, Adam), GradientMachine should enable different # buffers. - opt_config_proto = config_parser.parse_optimizer_config(optimizer_config) + opt_config_proto = config_parser_utils.parse_optimizer_config( + optimizer_config) opt_config = api.OptimizationConfig.createFromProto(opt_config_proto) _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) enable_types = _temp_optimizer_.getParameterTypes() # Create Simple Gradient Machine. - model_config = config_parser.parse_network_config(network_config) + model_config = config_parser_utils.parse_network_config(network_config) m = api.GradientMachine.createFromConfigProto( model_config, api.CREATE_MODE_NORMAL, enable_types) diff --git a/demo/mnist/simple_mnist_network.py b/demo/mnist/simple_mnist_network.py deleted file mode 100644 index f5d1ea169e784e..00000000000000 --- a/demo/mnist/simple_mnist_network.py +++ /dev/null @@ -1,21 +0,0 @@ -from paddle.trainer_config_helpers import * - -settings( - learning_rate=1e-4, - learning_method=AdamOptimizer(), - batch_size=1000, - model_average=ModelAverage(average_window=0.5), - regularization=L2Regularization(rate=0.5)) - -imgs = data_layer(name='pixel', size=784) - -hidden1 = fc_layer(input=imgs, size=200) -hidden2 = fc_layer(input=hidden1, size=200) - -inference = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) - -cost = classification_cost( - input=inference, label=data_layer( - name='label', size=10)) - -outputs(cost) diff --git a/python/paddle/trainer_config_helpers/__init__.py b/python/paddle/trainer_config_helpers/__init__.py index 84ed40a036a187..ef9859f8313efc 100644 --- a/python/paddle/trainer_config_helpers/__init__.py +++ b/python/paddle/trainer_config_helpers/__init__.py @@ -20,7 +20,7 @@ from networks import * from optimizers import * from attrs import * -from config_parser import * +from config_parser_utils import * # This will enable operator overload for LayerOutput import math as layer_math diff --git a/python/paddle/trainer_config_helpers/config_parser_utils.py b/python/paddle/trainer_config_helpers/config_parser_utils.py new file mode 100644 index 00000000000000..681b177a55f48d --- /dev/null +++ b/python/paddle/trainer_config_helpers/config_parser_utils.py @@ -0,0 +1,38 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.trainer.config_parser as config_parser +''' +This file is a wrapper of formal config_parser. The main idea of this file is to +separete different config logic into different function, such as network configuration + and optimizer configuration. +''' + +__all__ = [ + "parse_trainer_config", "parse_network_config", "parse_optimizer_config" +] + + +def parse_trainer_config(trainer_conf, config_arg_str): + return config_parser.parse_config(trainer_conf, config_arg_str) + + +def parse_network_config(network_conf, config_arg_str=''): + config = config_parser.parse_config(network_conf, config_arg_str) + return config.model_config + + +def parse_optimizer_config(optimizer_conf, config_arg_str=''): + config = config_parser.parse_config(optimizer_conf, config_arg_str) + return config.opt_config From 9b41b08ef39aaf4f49daaf85a8defd4726642e69 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 21:24:14 +0800 Subject: [PATCH 21/22] Remove unnecessary import in api_train.py --- demo/mnist/api_train.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 924bd39a505308..f301da382ff8a5 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -12,7 +12,6 @@ import numpy as np import random from mnist_util import read_from_mnist -import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils from paddle.trainer_config_helpers import * @@ -80,14 +79,13 @@ def main(): # enable_types = [value, gradient, momentum, etc] # For each optimizer(SGD, Adam), GradientMachine should enable different # buffers. - opt_config_proto = config_parser_utils.parse_optimizer_config( - optimizer_config) + opt_config_proto = parse_optimizer_config(optimizer_config) opt_config = api.OptimizationConfig.createFromProto(opt_config_proto) _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) enable_types = _temp_optimizer_.getParameterTypes() # Create Simple Gradient Machine. - model_config = config_parser_utils.parse_network_config(network_config) + model_config = parse_network_config(network_config) m = api.GradientMachine.createFromConfigProto( model_config, api.CREATE_MODE_NORMAL, enable_types) From eca45928d5f3f9b1c2219fd71adb72160fee9edf Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 27 Dec 2016 13:15:27 +0800 Subject: [PATCH 22/22] Fix merge errors. --- paddle/api/PaddleAPI.h | 2 +- paddle/utils/common.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index e83718448ddfb9..09c891871a5ca8 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -870,7 +870,7 @@ struct EvaluatorPrivate; class Evaluator { private: Evaluator(); - DISABLE_COPY_AND_ASSIGN(Evaluator); + DISABLE_COPY(Evaluator); public: ~Evaluator(); diff --git a/paddle/utils/common.h b/paddle/utils/common.h index 3ff0b869478832..202a9d980d8350 100644 --- a/paddle/utils/common.h +++ b/paddle/utils/common.h @@ -14,8 +14,6 @@ limitations under the License. */ #pragma once -namespace paddle { - /** * Disable copy macro. */ @@ -24,6 +22,8 @@ namespace paddle { class_name(const class_name &other) = delete; \ class_name &operator=(const class_name &other) = delete +namespace paddle { + #ifdef PADDLE_TYPE_DOUBLE using real = double; #else