From 0049ce047961fafc284a3692c1895028fe758ec2 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Mon, 30 Oct 2017 05:58:06 +0800 Subject: [PATCH] 03 image classification (#5192) * add batch_norm_layer * add img_conv_group layer and test * add check to Tensor.type() * forward can run * with backward * change label data time from int32 to int64 * refine code * follow comment --- paddle/framework/operator.h | 1 + paddle/framework/tensor.h | 9 +- paddle/operators/batch_norm_op.cc | 30 +++- paddle/operators/reshape_op.cc | 12 +- paddle/operators/reshape_op.h | 7 +- python/paddle/v2/framework/framework.py | 5 +- python/paddle/v2/framework/layers.py | 91 ++++++++++++ python/paddle/v2/framework/nets.py | 71 +++++++++- .../tests/test_image_classification_layer.py | 75 ++++++++++ .../tests/test_image_classification_train.py | 133 ++++++++++++++++++ 10 files changed, 418 insertions(+), 16 deletions(-) create mode 100644 python/paddle/v2/framework/tests/test_image_classification_layer.py create mode 100644 python/paddle/v2/framework/tests/test_image_classification_train.py diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 1294e06fb1c..93885fa3028 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -408,6 +408,7 @@ class OperatorWithKernel : public OperatorBase { // indicate kernel DataType by input data. Defaultly all input data must be // same. virtual DataType IndicateDataType(const ExecutionContext& ctx) const { + VLOG(3) << "Default IndicateDataType " << this->Type(); auto& scope = ctx.scope(); int data_type = -1; for (auto& input : this->inputs_) { diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 9d2dc6a32bb..7b9a5b75e10 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -126,11 +126,16 @@ class Tensor { inline Tensor Slice(const int& begin_idx, const int& end_idx) const; platform::Place place() const { - PADDLE_ENFORCE_NOT_NULL(holder_, "Tensor get place() must contains holder"); + PADDLE_ENFORCE_NOT_NULL( + holder_, "Tensor not initialized yet when Tensor::place() is called."); return holder_->place(); } - std::type_index type() const { return holder_->type(); } + std::type_index type() const { + PADDLE_ENFORCE_NOT_NULL( + holder_, "Tensor not initialized yet when Tensor::type() is called."); + return holder_->type(); + } size_t memory_size() const; diff --git a/paddle/operators/batch_norm_op.cc b/paddle/operators/batch_norm_op.cc index f7dc990f0db..f2c8be4c54e 100644 --- a/paddle/operators/batch_norm_op.cc +++ b/paddle/operators/batch_norm_op.cc @@ -18,6 +18,7 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; template using EigenMatrix = framework::EigenMatrix; @@ -64,6 +65,9 @@ class BatchNormOp : public framework::OperatorWithKernel { (tensor_format == TensorFormat::NCHW ? x_dims[1] : x_dims[x_dims.size() - 1]); + PADDLE_ENFORCE(x_dims.size() >= 3 && x_dims.size() <= 5, + "Input x must have 3 to 5 dimensions."); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], C); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); @@ -108,10 +112,12 @@ class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { "Store the global Variance when training"); AddOutput("SavedMean", "Mean of the current mini batch, " - "will apply to output when training"); + "will apply to output when training") + .AsIntermediate(); AddOutput("SavedVariance", "Variance of the current mini batch, " - "will apply to output when training"); + "will apply to output when training") + .AsIntermediate(); AddComment(R"DOC( https://arxiv.org/pdf/1502.03167.pdf @@ -135,7 +141,6 @@ class BatchNormKernel : public framework::OpKernel { const auto *x = ctx.Input("X"); const auto &x_dims = x->dims(); - PADDLE_ENFORCE(x_dims.size() >= 3 && x_dims.size() <= 5, "The Input dim size should be between 3 and 5"); const int N = x_dims[0]; @@ -289,6 +294,25 @@ class BatchNormGradOp : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("Scale"), {C}); ctx->SetOutputDim(framework::GradVarName("Bias"), {C}); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext &ctx) const override { + VLOG(3) << "IndicateDataType " << this->Type(); + const auto *var = ctx.InputVar(framework::GradVarName("Y")); + if (var == nullptr) { + PADDLE_THROW("can't find Y@GRAD"); + } + const Tensor *t = nullptr; + if (var->IsType()) { + t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); + } + if (t == nullptr) { + PADDLE_THROW("can't find Y@GRAD"); + } + return framework::ToDataType(t->type()); + } }; template diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index a8eb8d45eec..eda8226480a 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -34,13 +34,19 @@ class ReshapeOp : public framework::OperatorWithKernel { auto shape = ctx->Attrs().Get>("shape"); PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); - for (auto dim : shape) { - PADDLE_ENFORCE(dim > 0, "Each dimension of shape must be positive."); + auto x_dims = ctx->GetInputDim("X"); + // TODO(qiao) change batch_size + for (int i = 1; i < shape.size(); ++i) { + PADDLE_ENFORCE(shape[i] > 0, + "Each dimension of shape " + "must be positiv except the first."); + } + if (shape[0] < 0) { + shape[0] = x_dims[0]; } // capacity check int64_t capacity = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); - auto x_dims = ctx->GetInputDim("X"); int64_t in_size = framework::product(x_dims); PADDLE_ENFORCE_EQ(capacity, in_size, "The size of Input(X) mismatches with Attr(shape)."); diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index c89cdf8cab9..beb951713ae 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -26,13 +26,8 @@ class ReshapeKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const { auto* out = ctx.Output("Out"); auto* in = ctx.Input("X"); + auto out_dims = out->dims(); out->mutable_data(ctx.GetPlace()); - - auto shape = ctx.Attr>("shape"); - std::vector shape_int64(shape.size(), 0); - std::transform(shape.begin(), shape.end(), shape_int64.begin(), - [](int a) { return static_cast(a); }); - auto out_dims = framework::make_ddim(shape_int64); out->CopyFrom(*in, ctx.GetPlace(), ctx.device_context()); out->Resize(out_dims); } diff --git a/python/paddle/v2/framework/framework.py b/python/paddle/v2/framework/framework.py index 348c393913b..43101c9ddad 100644 --- a/python/paddle/v2/framework/framework.py +++ b/python/paddle/v2/framework/framework.py @@ -352,7 +352,10 @@ class Block(object): return {v for k, v in self.vars.iteritems() if isinstance(v, Parameter)} def create_var(self, *args, **kwargs): - return Variable(self, *args, **kwargs) + var = Variable(self, *args, **kwargs) + if 'init_attr' in kwargs: + self._prepend_initialize_ops_(var, kwargs['init_attr']) + return var def has_var(self, name): return name in self.vars diff --git a/python/paddle/v2/framework/layers.py b/python/paddle/v2/framework/layers.py index 9e6d5f49db6..041a3b2c0b0 100644 --- a/python/paddle/v2/framework/layers.py +++ b/python/paddle/v2/framework/layers.py @@ -161,6 +161,7 @@ def _create_op_func_(op_type): _create_op_func_('mean') _create_op_func_('mul') _create_op_func_('dropout') +_create_op_func_('reshape') def cast(x, data_type, program=None): @@ -308,6 +309,96 @@ def pool2d(input, return pool_out +def batch_norm(input, + act=None, + is_test=False, + momentum=0.9, + epsilon=1e05, + param_attr=None, + bias_attr=None, + data_layout='NCHW', + program=None, + init_program=None): + helper = LayerHelper('batch_norm', **locals()) + dtype = helper.input_dtype() + + input_shape = input.shape + if data_layout == 'NCHW': + channel_num = input_shape[1] + else: + if data_layout == 'NHWC': + channel_num = input_shape[-1] + else: + raise ValueError("unsupported data layout:" + data_layout) + + def get_init_attr(value): + if not isinstance(value, float): + raise ValueError("attr value should be a float") + return {'type': 'fill_constant', 'value': value} + + def prepend_init_op(var, init_attr): + assert isinstance(var, Variable) + op_type = init_attr['type'] + init_attr['shape'] = var.shape + init_attr['data_type'] = int(var.data_type) + op = var.block.prepend_op( + type=op_type, inputs=None, outputs={'Out': [var]}, attrs=init_attr) + return op + + def create_persistable_var(dtype, shape, init_attr=None): + name = unique_name(".".join([helper.name, "xxxx"])) + var = init_program.global_block().create_var( + dtype=dtype, shape=shape, name=name, persistable=True) + if 'init_attr' is not None: + prepend_init_op(var, init_attr) + return program.global_block().create_var( + name=name, dtype=dtype, shape=shape, persistable=True) + + param_shape = [channel_num] + + # create parameter + scale = helper.create_parameter( + attr=helper.param_attr, shape=param_shape, dtype=dtype) + bias = helper.create_parameter( + attr=helper.param_attr, shape=param_shape, dtype=dtype) + + # create input + mean = create_persistable_var(dtype, param_shape, get_init_attr(0.0)) + variance = create_persistable_var(dtype, param_shape, get_init_attr(1.0)) + + # create output + # mean and mean_out share the same memory + mean_out = mean + # variance and variance out share the same memory + variance_out = variance + saved_mean = helper.create_tmp_variable(dtype) + saved_variance = helper.create_tmp_variable(dtype) + + batch_norm_out = helper.create_tmp_variable(dtype) + + helper.append_op( + type="batch_norm", + inputs={ + "X": input, + "Scale": scale, + "Bias": bias, + "Mean": mean, + "Variance": variance + }, + outputs={ + "Y": batch_norm_out, + "MeanOut": mean_out, + "VarianceOut": variance_out, + "SavedMean": saved_mean, + "SavedVariance": saved_variance + }, + attrs={"momentum": momentum, + "epsilon": epsilon, + "is_test": is_test}) + + return helper.append_activation(batch_norm_out) + + class BlockGuard(object): """ BlockGuard used to create sub-block in program by using Python `with` diff --git a/python/paddle/v2/framework/nets.py b/python/paddle/v2/framework/nets.py index 8a83ebfb963..803534fa391 100644 --- a/python/paddle/v2/framework/nets.py +++ b/python/paddle/v2/framework/nets.py @@ -7,6 +7,7 @@ def simple_img_conv_pool(input, pool_size, pool_stride, act, + pool_type='max', program=None, init_program=None): conv_out = layers.conv2d( @@ -20,7 +21,75 @@ def simple_img_conv_pool(input, pool_out = layers.pool2d( input=conv_out, pool_size=pool_size, - pool_type='max', + pool_type=pool_type, + pool_stride=pool_stride, + program=program, + init_program=init_program) + return pool_out + + +def img_conv_group(input, + conv_num_filter, + pool_size, + conv_padding=1, + conv_filter_size=3, + conv_act=None, + conv_with_batchnorm=False, + conv_batchnorm_drop_rate=None, + pool_stride=1, + pool_type=None, + program=None, + init_program=None): + """ + Image Convolution Group, Used for vgg net. + """ + tmp = input + assert isinstance(conv_num_filter, list) or \ + isinstance(conv_num_filter, tuple) + + def __extend_list__(obj): + if not hasattr(obj, '__len__'): + return [obj] * len(conv_num_filter) + else: + return obj + + conv_padding = __extend_list__(conv_padding) + conv_filter_size = __extend_list__(conv_filter_size) + conv_with_batchnorm = __extend_list__(conv_with_batchnorm) + conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate) + + for i in xrange(len(conv_num_filter)): + local_conv_act = conv_act + if conv_with_batchnorm[i]: + local_conv_act = None + + tmp = layers.conv2d( + input=tmp, + num_filters=conv_num_filter[i], + filter_size=conv_filter_size[i], + padding=conv_padding[i], + act=local_conv_act, + program=program, + init_program=init_program) + + if conv_with_batchnorm[i]: + tmp = layers.batch_norm( + input=tmp, + act=conv_act, + program=program, + init_program=init_program) + drop_rate = conv_batchnorm_drop_rate[i] + if abs(drop_rate) > 1e-5: + tmp = layers.dropout( + x=tmp, + dropout_prob=drop_rate, + program=program, + init_program=init_program) + + pool_out = layers.pool2d( + input=tmp, + pool_size=pool_size, + pool_type=pool_type, pool_stride=pool_stride, program=program, init_program=init_program) diff --git a/python/paddle/v2/framework/tests/test_image_classification_layer.py b/python/paddle/v2/framework/tests/test_image_classification_layer.py new file mode 100644 index 00000000000..908cf44b88a --- /dev/null +++ b/python/paddle/v2/framework/tests/test_image_classification_layer.py @@ -0,0 +1,75 @@ +import unittest + +import paddle.v2.framework.layers as layers +import paddle.v2.framework.nets as nets +from paddle.v2.framework.framework import Program + + +def conv_block(input, + num_filter, + groups, + dropouts, + program=None, + init_program=None): + return nets.img_conv_group( + input=input, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max', + program=program, + init_program=init_program) + + +class TestLayer(unittest.TestCase): + def test_batch_norm_layer(self): + program = Program() + init_program = Program() + images = layers.data( + name='pixel', + shape=[3, 48, 48], + data_type='float32', + program=program) + layers.batch_norm( + input=images, program=program, init_program=init_program) + + #print str(program) + + def test_dropout_layer(self): + program = Program() + init_program = Program() + images = layers.data( + name='pixel', + shape=[3, 48, 48], + data_type='float32', + program=program) + layers.dropout( + x=images, + dropout_prob=0.5, + program=program, + init_program=init_program) + + #print str(program) + + def test_img_conv_group(self): + program = Program() + init_program = Program() + + images = layers.data( + name='pixel', + shape=[3, 48, 48], + data_type='float32', + program=program, + init_program=init_program) + conv1 = conv_block(images, 64, 2, [0.3, 0], program, init_program) + conv2 = conv_block(conv1, 256, 3, [0.4, 0.4, 0], program, init_program) + + # print str(program) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_image_classification_train.py b/python/paddle/v2/framework/tests/test_image_classification_train.py new file mode 100644 index 00000000000..4eb9051261e --- /dev/null +++ b/python/paddle/v2/framework/tests/test_image_classification_train.py @@ -0,0 +1,133 @@ +import paddle.v2 as paddle +import paddle.v2.framework.layers as layers +import paddle.v2.framework.nets as nets +import paddle.v2.framework.core as core +import paddle.v2.framework.optimizer as optimizer + +from paddle.v2.framework.framework import Program, g_program +from paddle.v2.framework.executor import Executor + +import numpy as np + + +def vgg16_bn_drop(input, program, init_program): + def conv_block(input, + num_filter, + groups, + dropouts, + program=None, + init_program=None): + return nets.img_conv_group( + input=input, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max', + program=program, + init_program=init_program) + + conv1 = conv_block(input, 64, 2, [0.3, 0], program, init_program) + conv2 = conv_block(conv1, 128, 2, [0.4, 0], program, init_program) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0], program, init_program) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0], program, init_program) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0], program, init_program) + + drop = layers.dropout( + x=conv5, dropout_prob=0.5, program=program, init_program=init_program) + fc1 = layers.fc(input=drop, + size=512, + act=None, + program=program, + init_program=init_program) + reshape1 = layers.reshape( + x=fc1, + shape=list(fc1.shape + (1, 1)), + program=program, + init_program=init_program) + bn = layers.batch_norm( + input=reshape1, act='relu', program=program, init_program=init_program) + drop2 = layers.dropout( + x=bn, dropout_prob=0.5, program=program, init_program=init_program) + fc2 = layers.fc(input=drop2, + size=512, + act=None, + program=program, + init_program=init_program) + return fc2 + + +init_program = Program() +program = Program() + +classdim = 10 +data_shape = [3, 32, 32] + +images = layers.data( + name='pixel', shape=data_shape, data_type='float32', program=program) + +label = layers.data( + name='label', + shape=[1], + data_type='int64', + program=program, + init_program=init_program) +vgg_net = vgg16_bn_drop(images, program, init_program) +predict = layers.fc(input=vgg_net, + size=classdim, + act='softmax', + program=program, + init_program=init_program) +cost = layers.cross_entropy( + input=predict, label=label, program=program, init_program=init_program) +avg_cost = layers.mean(x=cost, program=program, init_program=init_program) + +sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) +opts = sgd_optimizer.minimize(avg_cost) + +BATCH_SIZE = 128 +PASS_NUM = 1 + +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=128 * 10), + batch_size=BATCH_SIZE) + +place = core.CPUPlace() +exe = Executor(place) + +exe.run(init_program, feed={}, fetch_list=[]) + +for pass_id in range(PASS_NUM): + batch_id = 0 + for data in train_reader(): + img_data = np.array(map(lambda x: x[0].reshape(data_shape), + data)).astype("float32") + y_data = np.array(map(lambda x: x[1], data)).astype("int64") + batch_size = 1 + for i in y_data.shape: + batch_size = batch_size * i + y_data = y_data.reshape([batch_size, 1]) + + tensor_img = core.LoDTensor() + tensor_y = core.LoDTensor() + tensor_img.set(img_data, place) + tensor_y.set(y_data, place) + + outs = exe.run(program, + feed={"pixel": tensor_img, + "label": tensor_y}, + fetch_list=[avg_cost]) + + loss = np.array(outs[0]) + # print("pass_id:" + str(pass_id) + " batch_id:" + str(batch_id) + + # " loss:" + str(loss)) + batch_id = batch_id + 1 + + if batch_id > 1: + # this model is slow, so if we can train two mini batch, we think it works properly. + exit(0) +exit(1) -- GitLab