From f909ff1a3652697f63070cf1bc8cb425d1902417 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Mon, 9 Apr 2018 15:53:00 +0800 Subject: [PATCH] update unit test --- paddle/fluid/operators/uniform_random_op.cc | 5 +- paddle/fluid/operators/uniform_random_op.cu | 13 +++++- .../tests/unittests/test_uniform_random_op.py | 46 +++++++++++++++++-- 3 files changed, 56 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index a50add9739d..d8b38fb7eb5 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -29,11 +29,14 @@ class CPUUniformRandomKernel : public framework::OpKernel { if (out_var->IsType()) { tensor = ctx.Output("Out"); } else if (out_var->IsType()) { + auto shape = ctx.Attr>("shape"); tensor = ctx.Output("Out")->mutable_value(); + tensor->Resize(framework::make_ddim(shape)); } else { PADDLE_THROW("Only support LoDTensor and SelectedRows."); } T* data = tensor->mutable_data(ctx.GetPlace()); + data[0] = static_cast(1000); unsigned int seed = static_cast(ctx.Attr("seed")); std::minstd_rand engine; if (seed == 0) { @@ -44,7 +47,6 @@ class CPUUniformRandomKernel : public framework::OpKernel { static_cast(ctx.Attr("min")), static_cast(ctx.Attr("max"))); int64_t size = tensor->numel(); - VLOG(3) << "size = " << size; for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); } @@ -64,7 +66,6 @@ class UniformRandomOp : public framework::OperatorWithKernel { "uniform_random's min must less then max"); auto& shape = ctx->Attrs().Get>("shape"); std::vector temp; - VLOG(3) << "shape.size() = " << shape.size(); temp.reserve(shape.size()); for (auto dim : shape) { temp.push_back(static_cast(dim)); diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index 1232cd1eb33..115c8595278 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -43,7 +43,18 @@ template class GPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* tensor = context.Output("Out"); + framework::Tensor* tensor(nullptr); + auto out_var = ctx.OutputVar("Out"); + if (out_var->IsType()) { + tensor = ctx.Output("Out"); + } else if (out_var->IsType()) { + auto shape = ctx.Attr>("shape"); + tensor = ctx.Output("Out")->mutable_value(); + tensor->Resize(framework::make_ddim(shape)); + } else { + PADDLE_THROW("Only support LoDTensor and SelectedRows."); + } + T* data = tensor->mutable_data(context.GetPlace()); unsigned int seed = static_cast(context.Attr("seed")); if (seed == 0) { diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 75ff85a55fc..3331e99c366 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -15,6 +15,16 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core +from paddle.fluid.op import Operator + + +def output_hist(out): + hist, _ = np.histogram(out, range=(-5, 10)) + hist = hist.astype("float32") + hist /= float(out.size) + prob = 0.1 * np.ones((10)) + return hist, prob class TestUniformRandomOp(OpTest): @@ -33,11 +43,37 @@ class TestUniformRandomOp(OpTest): self.check_output_customized(self.verify_output) def verify_output(self, outs): - tensor = outs[0] - hist, _ = np.histogram(outs[0], range=(-5, 10)) - hist = hist.astype("float32") - hist /= float(outs[0].size) - prob = 0.1 * np.ones((10)) + hist, prob = output_hist(outs[0]) + self.assertTrue( + np.allclose( + hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) + + +class TestUniformRandomOpSelectedRows(unittest.TestCase): + def get_places(self): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(core.CUDAPlace(0)) + return places + + def test_check_output(self): + for place in self.get_places(): + self.check_with_place(place) + + def check_with_place(self, place): + scope = core.Scope() + out = scope.var("X").get_selected_rows() + + op = Operator( + "uniform_random", + Out="X", + shape=[1000, 784], + min=-5.0, + max=10.0, + seed=10) + op.run(scope, place) + out_tensor = out.get_tensor() + hist, prob = output_hist(np.array(out_tensor)) self.assertTrue( np.allclose( hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) -- GitLab