From 6752b06f8cc6d67f00a6bafb39a164d1ffd39322 Mon Sep 17 00:00:00 2001 From: emailweixu Date: Thu, 15 Feb 2018 14:14:21 -0800 Subject: [PATCH] Generating random numbers with given batch size (#8337) * Generating random numbers with given batch size uniform_random_batch_size_like_op gaussian_random_batch_size_like_op * More comments about random seed. * Move test_*_random_batch_size_like_op to unittests --- paddle/fluid/operators/CMakeLists.txt | 14 ++++ paddle/fluid/operators/batch_size_like.cc | 64 ++++++++++++++++ paddle/fluid/operators/batch_size_like.h | 36 +++++++++ .../fill_constant_batch_size_like_op.cc | 52 ++----------- .../gaussian_random_batch_size_like_op.cc | 73 +++++++++++++++++++ paddle/fluid/operators/gaussian_random_op.cc | 10 ++- paddle/fluid/operators/gaussian_random_op.cu | 6 +- .../uniform_random_batch_size_like_op.cc | 72 ++++++++++++++++++ paddle/fluid/operators/uniform_random_op.cc | 9 ++- paddle/fluid/operators/uniform_random_op.cu | 3 + python/paddle/v2/fluid/layers/ops.py | 3 + .../v2/fluid/tests/unittests/op_test.py | 18 ++++- ...test_gaussian_random_batch_size_like_op.py | 46 ++++++++++++ .../test_uniform_random_batch_size_like_op.py | 42 +++++++++++ .../tests/unittests/test_uniform_random_op.py | 46 ++++-------- 15 files changed, 409 insertions(+), 85 deletions(-) create mode 100644 paddle/fluid/operators/batch_size_like.cc create mode 100644 paddle/fluid/operators/batch_size_like.h create mode 100644 paddle/fluid/operators/gaussian_random_batch_size_like_op.cc create mode 100644 paddle/fluid/operators/uniform_random_batch_size_like_op.cc create mode 100644 python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py create mode 100644 python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index cadfd735d7b..8f14fd376ae 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -176,6 +176,20 @@ op_library(pool_op SRCS pool_op.cc DEPS pooling) op_library(conv_transpose_op SRCS conv_transpose_op.cc DEPS vol2col) endif() +cc_library(batch_size_like SRCS batch_size_like.cc DEPS op_registry) + +op_library(fill_constant_batch_size_like_op + SRCS fill_constant_batch_size_like_op.cc fill_constant_batch_size_like_op.cu.cc + DEPS batch_size_like) + +op_library(uniform_random_batch_size_like_op + SRCS uniform_random_batch_size_like_op.cc + DEPS batch_size_like uniform_random_op) + +op_library(gaussian_random_batch_size_like_op + SRCS gaussian_random_batch_size_like_op.cc + DEPS batch_size_like gaussian_random_op) + # FIXME(typhoonzero): save/load depends lodtensor serialization functions op_library(save_op DEPS lod_tensor) op_library(load_op DEPS lod_tensor) diff --git a/paddle/fluid/operators/batch_size_like.cc b/paddle/fluid/operators/batch_size_like.cc new file mode 100644 index 00000000000..4d4a6d4c472 --- /dev/null +++ b/paddle/fluid/operators/batch_size_like.cc @@ -0,0 +1,64 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/batch_size_like.h" + +namespace paddle { +namespace operators { + +void BatchSizeLikeOp::InferShape(framework::InferShapeContext *ctx) const { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of %s should not be null.", Type()); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of %s should not be null.", + Type()); + + auto &shape = ctx->Attrs().Get>("shape"); + PADDLE_ENFORCE_GT(shape.size(), 0); + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto output_dim = framework::make_ddim(shape_int64); + + int input_dim_idx = ctx->Attrs().Get("input_dim_idx"); + PADDLE_ENFORCE_GE(input_dim_idx, 0); + PADDLE_ENFORCE_GT(ctx->GetInputDim("Input").size(), input_dim_idx); + + int output_dim_idx = ctx->Attrs().Get("output_dim_idx"); + PADDLE_ENFORCE_GE(output_dim_idx, 0); + PADDLE_ENFORCE_GT(static_cast(shape.size()), output_dim_idx); + + output_dim[output_dim_idx] = ctx->GetInputDim("Input")[input_dim_idx]; + ctx->SetOutputDim("Out", output_dim); +} + +BatchSizeLikeOpMaker::BatchSizeLikeOpMaker(OpProto *proto, + OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", + "(Tensor) Tensor " + "whose input_dim_idx'th dimension specifies the batch_size"); + AddOutput("Out", + "(Tensor) Tensor of specified shape will be filled " + "with the specified value"); + AddAttr>("shape", "(vector) The shape of the output"); + AddAttr("input_dim_idx", + "(int, default 0) The index of input's batch size dimension") + .SetDefault(0); + AddAttr("output_dim_idx", + "(int, default 0) The index of output's batch size dimension") + .SetDefault(0); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/batch_size_like.h b/paddle/fluid/operators/batch_size_like.h new file mode 100644 index 00000000000..87e8f053a73 --- /dev/null +++ b/paddle/fluid/operators/batch_size_like.h @@ -0,0 +1,36 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +class BatchSizeLikeOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override; +}; + +class BatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + BatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker); +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc index a36248531ef..55eca71c8bd 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc @@ -13,42 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fill_constant_batch_size_like_op.h" +#include "paddle/fluid/operators/batch_size_like.h" namespace paddle { namespace operators { -class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE( - ctx->HasInput("Input"), - "Input(Input) of FillConstantBatchSizeLikeOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("Out"), - "Output(Out) of FillConstantBatchSizeLikeOp should not be null."); - - auto &shape = ctx->Attrs().Get>("shape"); - PADDLE_ENFORCE_GT(shape.size(), 0); - std::vector shape_int64(shape.size(), 0); - std::transform(shape.begin(), shape.end(), shape_int64.begin(), - [](int a) { return static_cast(a); }); - auto output_dim = framework::make_ddim(shape_int64); - - int input_dim_idx = ctx->Attrs().Get("input_dim_idx"); - PADDLE_ENFORCE_GE(input_dim_idx, 0); - PADDLE_ENFORCE_GT(ctx->GetInputDim("Input").size(), input_dim_idx); - - int output_dim_idx = ctx->Attrs().Get("output_dim_idx"); - PADDLE_ENFORCE_GE(output_dim_idx, 0); - PADDLE_ENFORCE_GT(static_cast(shape.size()), output_dim_idx); - - output_dim[output_dim_idx] = ctx->GetInputDim("Input")[input_dim_idx]; - ctx->SetOutputDim("Out", output_dim); - } - +class FillConstantBatchSizeLikeOp : public BatchSizeLikeOp { protected: + using BatchSizeLikeOp::BatchSizeLikeOp; framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( @@ -57,28 +29,14 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel { } }; -class FillConstantBatchSizeLikeOpMaker - : public framework::OpProtoAndCheckerMaker { +class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { public: FillConstantBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + : BatchSizeLikeOpMaker(proto, op_checker) { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") .SetDefault(framework::proto::DataType::FP32); - AddInput("Input", - "(Tensor) Tensor " - "whose dim_idx th dimension is used to specify the batch_size"); - AddOutput("Out", - "(Tensor) Tensor of specified shape will be filled " - "with the specified value"); - AddAttr>("shape", "(vector) The shape of the output"); - AddAttr("input_dim_idx", - "(int, default 0) The index of input's batch size dimension") - .SetDefault(0); - AddAttr("output_dim_idx", - "(int, default 0) The index of output's batch size dimension") - .SetDefault(0); AddAttr("value", "(float, default 0) The value to be filled") .SetDefault(0.0f); AddComment(R"DOC( diff --git a/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc new file mode 100644 index 00000000000..ac516986add --- /dev/null +++ b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc @@ -0,0 +1,73 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/batch_size_like.h" + +namespace paddle { +namespace operators { + +class GaussianRandomBatchSizeLikeOp : public BatchSizeLikeOp { + protected: + using BatchSizeLikeOp::BatchSizeLikeOp; + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + static_cast(ctx.Attr("dtype")), + ctx.GetPlace()); + } +}; + +class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { + public: + GaussianRandomBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : BatchSizeLikeOpMaker(proto, op_checker) { + AddAttr("mean", + "(float, default 0.0) " + "mean of random tensor.") + .SetDefault(.0f); + AddAttr("std", + "(float, default 1.0) " + "std of random tensor.") + .SetDefault(1.0f); + AddAttr("seed", + "(int, default 0) " + "Random seed of generator." + "0 means use system wide seed." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") + .SetDefault(0); + AddAttr("dtype", + "(int, default 5(FP32)) " + "Output data type.") + .SetDefault(framework::proto::DataType::FP32); + + AddComment(R"DOC( +GaussianRandom Operator. + +Used to initialize tensors with gaussian random generator. +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_WITHOUT_GRADIENT( + gaussian_random_batch_size_like, + paddle::operators::GaussianRandomBatchSizeLikeOp, + paddle::operators::GaussianRandomBatchSizeLikeOpMaker); +// Kernels are registered in gaussian_random_op.cc and gaussian_random_op.cu diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index cf3a528bdd0..7fb2b2c230e 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -88,7 +88,9 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("seed", "(int, default 0) " "Random seed of generator." - "0 means use system wide seed.") + "0 means use system wide seed." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") .SetDefault(0); AddAttr("dtype", "(int, default 5(FP32)) " @@ -110,4 +112,8 @@ Used to initialize tensors with gaussian random generator. namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(gaussian_random, ops::GaussianRandomOp, ops::GaussianRandomOpMaker); -REGISTER_OP_CPU_KERNEL(gaussian_random, ops::CPUGaussianRandomKernel); +REGISTER_OP_CPU_KERNEL(gaussian_random, ops::CPUGaussianRandomKernel, + ops::CPUGaussianRandomKernel); +REGISTER_OP_CPU_KERNEL(gaussian_random_batch_size_like, + ops::CPUGaussianRandomKernel, + ops::CPUGaussianRandomKernel); diff --git a/paddle/fluid/operators/gaussian_random_op.cu b/paddle/fluid/operators/gaussian_random_op.cu index 7340590c3ef..7784856417e 100644 --- a/paddle/fluid/operators/gaussian_random_op.cu +++ b/paddle/fluid/operators/gaussian_random_op.cu @@ -61,4 +61,8 @@ class GPUGaussianRandomKernel : public framework::OpKernel { } // namespace paddle REGISTER_OP_CUDA_KERNEL(gaussian_random, - paddle::operators::GPUGaussianRandomKernel); + paddle::operators::GPUGaussianRandomKernel, + paddle::operators::GPUGaussianRandomKernel); +REGISTER_OP_CUDA_KERNEL(gaussian_random_batch_size_like, + paddle::operators::GPUGaussianRandomKernel, + paddle::operators::GPUGaussianRandomKernel); diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc new file mode 100644 index 00000000000..fa31dad513d --- /dev/null +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc @@ -0,0 +1,72 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/batch_size_like.h" + +namespace paddle { +namespace operators { + +class UniformRandomBatchSizeLikeOp : public BatchSizeLikeOp { + protected: + using BatchSizeLikeOp::BatchSizeLikeOp; + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + static_cast(ctx.Attr("dtype")), + ctx.GetPlace()); + } +}; + +class UniformRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { + public: + UniformRandomBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : BatchSizeLikeOpMaker(proto, op_checker) { + AddComment(R"DOC( +Uniform random operator + +This operator initializes a tensor with the same batch_size as the Input tensor + with random values sampled from a uniform distribution. + +)DOC"); + AddAttr("min", + "(float, default -1.0) " + "Minimum value of uniform random") + .SetDefault(-1.0f); + AddAttr("max", + "(float, default 1.0) " + "Maximun value of uniform random") + .SetDefault(1.0f); + AddAttr("seed", + "(int, default 0) " + "Random seed used for generating samples. " + "0 means use a seed generated by the system." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") + .SetDefault(0); + AddAttr("dtype", "(int, default 5(FP32)) Output tensor data type") + .SetDefault(framework::proto::DataType::FP32); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_WITHOUT_GRADIENT( + uniform_random_batch_size_like, + paddle::operators::UniformRandomBatchSizeLikeOp, + paddle::operators::UniformRandomBatchSizeLikeOpMaker); +// Kernels are registered in uniform_random_op.cc and uniform_random_op.cu diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 6c0167deab5..3a0a0d6fcaf 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -79,7 +79,7 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Uniform random operator. -This operator initializes a tensor with random values sampled from a +This operator initializes a tensor with random values sampled from a uniform distribution. )DOC"); @@ -96,7 +96,9 @@ uniform distribution. AddAttr("seed", "(int, default 0) " "Random seed used for generating samples. " - "0 means use a seed generated by the system.") + "0 means use a seed generated by the system." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") .SetDefault(0); AddAttr("dtype", "(int, default 5(FP32)) Output tensor data type") .SetDefault(framework::proto::DataType::FP32); @@ -110,3 +112,6 @@ REGISTER_OP_WITHOUT_GRADIENT(uniform_random, paddle::operators::UniformRandomOp, REGISTER_OP_CPU_KERNEL(uniform_random, paddle::operators::CPUUniformRandomKernel, paddle::operators::CPUUniformRandomKernel); +REGISTER_OP_CPU_KERNEL(uniform_random_batch_size_like, + paddle::operators::CPUUniformRandomKernel, + paddle::operators::CPUUniformRandomKernel); diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index 877d81d5c48..1232cd1eb33 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -66,3 +66,6 @@ class GPUUniformRandomKernel : public framework::OpKernel { REGISTER_OP_CUDA_KERNEL(uniform_random, paddle::operators::GPUUniformRandomKernel, paddle::operators::GPUUniformRandomKernel); +REGISTER_OP_CUDA_KERNEL(uniform_random_batch_size_like, + paddle::operators::GPUUniformRandomKernel, + paddle::operators::GPUUniformRandomKernel); diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index 28265a57e6a..0b88b639629 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -66,6 +66,9 @@ __all__ = [ 'logical_xor', 'logical_not', 'uniform_random', + 'uniform_random_batch_size_like', + 'gaussian_random', + 'gaussian_random_batch_size_like', 'cumsum', ] + __activations__ diff --git a/python/paddle/v2/fluid/tests/unittests/op_test.py b/python/paddle/v2/fluid/tests/unittests/op_test.py index 940e2bfb16a..4761811f0a3 100644 --- a/python/paddle/v2/fluid/tests/unittests/op_test.py +++ b/python/paddle/v2/fluid/tests/unittests/op_test.py @@ -248,7 +248,11 @@ class OpTest(unittest.TestCase): return feed_map - def check_output_with_place(self, place, atol): + def calc_output(self, place): + outs, _ = self._calc_output(place) + return outs + + def _calc_output(self, place): op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) program = Program() @@ -281,7 +285,10 @@ class OpTest(unittest.TestCase): feed=feed_map, fetch_list=fetch_list, return_numpy=False) + return outs, fetch_list + def check_output_with_place(self, place, atol): + outs, fetch_list = self._calc_output(place) for out_name, out_dup in Operator.get_op_outputs(self.op_type): if out_name not in self.outputs: continue @@ -340,6 +347,15 @@ class OpTest(unittest.TestCase): for place in places: self.check_output_with_place(place, atol) + def check_output_customized(self, checker): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): + places.append(core.CUDAPlace(0)) + for place in places: + outs = self.calc_output(place) + outs = [np.array(out) for out in outs] + checker(outs) + def __assert_is_close(self, numeric_grads, analytic_grads, names, max_relative_error, msg_prefix): diff --git a/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py b/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py new file mode 100644 index 00000000000..1398166a74e --- /dev/null +++ b/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py @@ -0,0 +1,46 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestGaussianRandomBatchSizeLike(OpTest): + def setUp(self): + self.op_type = "gaussian_random_batch_size_like" + self.inputs = {'Input': np.zeros((500, 2000), dtype="float32")} + self.attrs = {'mean': 1., 'std': 2., 'shape': [-1, 2000]} + self.outputs = {'Out': np.zeros((500, 2000), dtype='float32')} + + def test_check_output(self): + self.check_output_customized(self.verify_output) + + def verify_output(self, outs): + self.assertEqual(outs[0].shape, (500, 2000)) + hist, _ = np.histogram(outs[0], range=(-3, 5)) + hist = hist.astype("float32") + hist /= float(outs[0].size) + data = np.random.normal(size=(500, 2000), loc=1, scale=2) + hist2, _ = np.histogram(data, range=(-3, 5)) + hist2 = hist2.astype("float32") + hist2 /= float(outs[0].size) + self.assertTrue( + np.allclose( + hist, hist2, rtol=0, atol=0.01), + "hist: " + str(hist) + " hist2: " + str(hist2)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py new file mode 100644 index 00000000000..e033e86114f --- /dev/null +++ b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py @@ -0,0 +1,42 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestUniformRandomBatchSizeLike(OpTest): + def setUp(self): + self.op_type = "uniform_random_batch_size_like" + self.inputs = {'Input': np.zeros((500, 2000), dtype="float32")} + self.attrs = {'min': 1., 'max': 2., 'shape': [-1, 2000]} + self.outputs = {'Out': np.zeros((500, 2000), dtype='float32')} + + def test_check_output(self): + self.check_output_customized(self.verify_output) + + def verify_output(self, outs): + self.assertEqual(outs[0].shape, (500, 2000)) + hist, _ = np.histogram(outs[0], range=(1, 2)) + hist = hist.astype("float32") + hist /= float(outs[0].size) + prob = 0.1 * np.ones((10)) + self.assertTrue( + np.allclose( + hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py index 53227716efb..75ff85a55fc 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py @@ -13,14 +13,11 @@ # limitations under the License. import unittest -import numpy +import numpy as np +from op_test import OpTest -from paddle.v2.fluid.op import Operator -import paddle.v2.fluid.core as core -import paddle.v2.fluid as fluid - -class TestUniformRandomOp(unittest.TestCase): +class TestUniformRandomOp(OpTest): def setUp(self): self.op_type = "uniform_random" self.inputs = {} @@ -30,35 +27,20 @@ class TestUniformRandomOp(unittest.TestCase): "max": 10.0, "seed": 10 } - self.outputs = ["Out"] - - def test_cpu(self): - self.uniform_random_test(place=core.CPUPlace()) - - def test_gpu(self): - if core.is_compiled_with_cuda(): - self.uniform_random_test(place=core.CUDAPlace(0)) - - def uniform_random_test(self, place): - program = fluid.Program() - block = program.global_block() - vout = block.create_var(name="Out") - op = block.append_op( - type=self.op_type, outputs={"Out": vout}, attrs=self.attrs) + self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")} - op.desc.infer_var_type(block.desc) - op.desc.infer_shape(block.desc) - - fetch_list = [] - for var_name in self.outputs: - fetch_list.append(block.var(var_name)) - - exe = fluid.Executor(place) - outs = exe.run(program, fetch_list=fetch_list) + def test_check_output(self): + self.check_output_customized(self.verify_output) + def verify_output(self, outs): tensor = outs[0] - - self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) + hist, _ = np.histogram(outs[0], range=(-5, 10)) + hist = hist.astype("float32") + hist /= float(outs[0].size) + prob = 0.1 * np.ones((10)) + self.assertTrue( + np.allclose( + hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) if __name__ == "__main__": -- GitLab