提交 291aa231 编写于 作者: Y Yancey1989

Merge branch 'random_selected_rows_value' of github.com:Yancey1989/Paddle into...

Merge branch 'random_selected_rows_value' of github.com:Yancey1989/Paddle into random_selected_rows_value
...@@ -24,8 +24,19 @@ template <typename T> ...@@ -24,8 +24,19 @@ template <typename T>
class CPUUniformRandomKernel : public framework::OpKernel<T> { class CPUUniformRandomKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto* tensor = ctx.Output<framework::Tensor>("Out"); framework::Tensor* tensor(nullptr);
auto out_var = ctx.OutputVar("Out");
if (out_var->IsType<framework::LoDTensor>()) {
tensor = ctx.Output<framework::LoDTensor>("Out");
} else if (out_var->IsType<framework::SelectedRows>()) {
auto shape = ctx.Attr<std::vector<int>>("shape");
tensor = ctx.Output<framework::SelectedRows>("Out")->mutable_value();
tensor->Resize(framework::make_ddim(shape));
} else {
PADDLE_THROW("Only support LoDTensor and SelectedRows.");
}
T* data = tensor->mutable_data<T>(ctx.GetPlace()); T* data = tensor->mutable_data<T>(ctx.GetPlace());
data[0] = static_cast<T>(1000);
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
std::minstd_rand engine; std::minstd_rand engine;
if (seed == 0) { if (seed == 0) {
......
...@@ -43,7 +43,18 @@ template <typename T> ...@@ -43,7 +43,18 @@ template <typename T>
class GPUUniformRandomKernel : public framework::OpKernel<T> { class GPUUniformRandomKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out"); framework::Tensor* tensor(nullptr);
auto out_var = ctx.OutputVar("Out");
if (out_var->IsType<framework::LoDTensor>()) {
tensor = ctx.Output<framework::LoDTensor>("Out");
} else if (out_var->IsType<framework::SelectedRows>()) {
auto shape = ctx.Attr<std::vector<int>>("shape");
tensor = ctx.Output<framework::SelectedRows>("Out")->mutable_value();
tensor->Resize(framework::make_ddim(shape));
} else {
PADDLE_THROW("Only support LoDTensor and SelectedRows.");
}
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
if (seed == 0) { if (seed == 0) {
......
...@@ -15,6 +15,16 @@ ...@@ -15,6 +15,16 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def output_hist(out):
hist, _ = np.histogram(out, range=(-5, 10))
hist = hist.astype("float32")
hist /= float(out.size)
prob = 0.1 * np.ones((10))
return hist, prob
class TestUniformRandomOp(OpTest): class TestUniformRandomOp(OpTest):
...@@ -33,11 +43,37 @@ class TestUniformRandomOp(OpTest): ...@@ -33,11 +43,37 @@ class TestUniformRandomOp(OpTest):
self.check_output_customized(self.verify_output) self.check_output_customized(self.verify_output)
def verify_output(self, outs): def verify_output(self, outs):
tensor = outs[0] hist, prob = output_hist(outs[0])
hist, _ = np.histogram(outs[0], range=(-5, 10)) self.assertTrue(
hist = hist.astype("float32") np.allclose(
hist /= float(outs[0].size) hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
prob = 0.1 * np.ones((10))
class TestUniformRandomOpSelectedRows(unittest.TestCase):
def get_places(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
return places
def test_check_output(self):
for place in self.get_places():
self.check_with_place(place)
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
op = Operator(
"uniform_random",
Out="X",
shape=[1000, 784],
min=-5.0,
max=10.0,
seed=10)
op.run(scope, place)
out_tensor = out.get_tensor()
hist, prob = output_hist(np.array(out_tensor))
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册