提交 f909ff1a 编写于 作者: Y Yancey1989

update unit test

上级 972ae6e9
...@@ -29,11 +29,14 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> { ...@@ -29,11 +29,14 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
if (out_var->IsType<framework::LoDTensor>()) { if (out_var->IsType<framework::LoDTensor>()) {
tensor = ctx.Output<framework::LoDTensor>("Out"); tensor = ctx.Output<framework::LoDTensor>("Out");
} else if (out_var->IsType<framework::SelectedRows>()) { } else if (out_var->IsType<framework::SelectedRows>()) {
auto shape = ctx.Attr<std::vector<int>>("shape");
tensor = ctx.Output<framework::SelectedRows>("Out")->mutable_value(); tensor = ctx.Output<framework::SelectedRows>("Out")->mutable_value();
tensor->Resize(framework::make_ddim(shape));
} else { } else {
PADDLE_THROW("Only support LoDTensor and SelectedRows."); PADDLE_THROW("Only support LoDTensor and SelectedRows.");
} }
T* data = tensor->mutable_data<T>(ctx.GetPlace()); T* data = tensor->mutable_data<T>(ctx.GetPlace());
data[0] = static_cast<T>(1000);
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
std::minstd_rand engine; std::minstd_rand engine;
if (seed == 0) { if (seed == 0) {
...@@ -44,7 +47,6 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> { ...@@ -44,7 +47,6 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
static_cast<T>(ctx.Attr<float>("min")), static_cast<T>(ctx.Attr<float>("min")),
static_cast<T>(ctx.Attr<float>("max"))); static_cast<T>(ctx.Attr<float>("max")));
int64_t size = tensor->numel(); int64_t size = tensor->numel();
VLOG(3) << "size = " << size;
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine); data[i] = dist(engine);
} }
...@@ -64,7 +66,6 @@ class UniformRandomOp : public framework::OperatorWithKernel { ...@@ -64,7 +66,6 @@ class UniformRandomOp : public framework::OperatorWithKernel {
"uniform_random's min must less then max"); "uniform_random's min must less then max");
auto& shape = ctx->Attrs().Get<std::vector<int>>("shape"); auto& shape = ctx->Attrs().Get<std::vector<int>>("shape");
std::vector<int64_t> temp; std::vector<int64_t> temp;
VLOG(3) << "shape.size() = " << shape.size();
temp.reserve(shape.size()); temp.reserve(shape.size());
for (auto dim : shape) { for (auto dim : shape) {
temp.push_back(static_cast<int64_t>(dim)); temp.push_back(static_cast<int64_t>(dim));
......
...@@ -43,7 +43,18 @@ template <typename T> ...@@ -43,7 +43,18 @@ template <typename T>
class GPUUniformRandomKernel : public framework::OpKernel<T> { class GPUUniformRandomKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out"); framework::Tensor* tensor(nullptr);
auto out_var = ctx.OutputVar("Out");
if (out_var->IsType<framework::LoDTensor>()) {
tensor = ctx.Output<framework::LoDTensor>("Out");
} else if (out_var->IsType<framework::SelectedRows>()) {
auto shape = ctx.Attr<std::vector<int>>("shape");
tensor = ctx.Output<framework::SelectedRows>("Out")->mutable_value();
tensor->Resize(framework::make_ddim(shape));
} else {
PADDLE_THROW("Only support LoDTensor and SelectedRows.");
}
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
if (seed == 0) { if (seed == 0) {
......
...@@ -15,6 +15,16 @@ ...@@ -15,6 +15,16 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def output_hist(out):
hist, _ = np.histogram(out, range=(-5, 10))
hist = hist.astype("float32")
hist /= float(out.size)
prob = 0.1 * np.ones((10))
return hist, prob
class TestUniformRandomOp(OpTest): class TestUniformRandomOp(OpTest):
...@@ -33,11 +43,37 @@ class TestUniformRandomOp(OpTest): ...@@ -33,11 +43,37 @@ class TestUniformRandomOp(OpTest):
self.check_output_customized(self.verify_output) self.check_output_customized(self.verify_output)
def verify_output(self, outs): def verify_output(self, outs):
tensor = outs[0] hist, prob = output_hist(outs[0])
hist, _ = np.histogram(outs[0], range=(-5, 10)) self.assertTrue(
hist = hist.astype("float32") np.allclose(
hist /= float(outs[0].size) hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
prob = 0.1 * np.ones((10))
class TestUniformRandomOpSelectedRows(unittest.TestCase):
def get_places(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
return places
def test_check_output(self):
for place in self.get_places():
self.check_with_place(place)
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
op = Operator(
"uniform_random",
Out="X",
shape=[1000, 784],
min=-5.0,
max=10.0,
seed=10)
op.run(scope, place)
out_tensor = out.get_tensor()
hist, prob = output_hist(np.array(out_tensor))
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册