提交 cb7bbf42 编写于 作者: Y Yancey1989

revert uniform_random_op

上级 291aa231
......@@ -24,19 +24,8 @@ template <typename T>
class CPUUniformRandomKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
framework::Tensor* tensor(nullptr);
auto out_var = ctx.OutputVar("Out");
if (out_var->IsType<framework::LoDTensor>()) {
tensor = ctx.Output<framework::LoDTensor>("Out");
} else if (out_var->IsType<framework::SelectedRows>()) {
auto shape = ctx.Attr<std::vector<int>>("shape");
tensor = ctx.Output<framework::SelectedRows>("Out")->mutable_value();
tensor->Resize(framework::make_ddim(shape));
} else {
PADDLE_THROW("Only support LoDTensor and SelectedRows.");
}
auto* tensor = ctx.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(ctx.GetPlace());
data[0] = static_cast<T>(1000);
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
......
......@@ -43,18 +43,7 @@ template <typename T>
class GPUUniformRandomKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
framework::Tensor* tensor(nullptr);
auto out_var = ctx.OutputVar("Out");
if (out_var->IsType<framework::LoDTensor>()) {
tensor = ctx.Output<framework::LoDTensor>("Out");
} else if (out_var->IsType<framework::SelectedRows>()) {
auto shape = ctx.Attr<std::vector<int>>("shape");
tensor = ctx.Output<framework::SelectedRows>("Out")->mutable_value();
tensor->Resize(framework::make_ddim(shape));
} else {
PADDLE_THROW("Only support LoDTensor and SelectedRows.");
}
auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
if (seed == 0) {
......
......@@ -15,16 +15,6 @@
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def output_hist(out):
hist, _ = np.histogram(out, range=(-5, 10))
hist = hist.astype("float32")
hist /= float(out.size)
prob = 0.1 * np.ones((10))
return hist, prob
class TestUniformRandomOp(OpTest):
......@@ -43,37 +33,11 @@ class TestUniformRandomOp(OpTest):
self.check_output_customized(self.verify_output)
def verify_output(self, outs):
hist, prob = output_hist(outs[0])
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
class TestUniformRandomOpSelectedRows(unittest.TestCase):
def get_places(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
return places
def test_check_output(self):
for place in self.get_places():
self.check_with_place(place)
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
op = Operator(
"uniform_random",
Out="X",
shape=[1000, 784],
min=-5.0,
max=10.0,
seed=10)
op.run(scope, place)
out_tensor = out.get_tensor()
hist, prob = output_hist(np.array(out_tensor))
tensor = outs[0]
hist, _ = np.histogram(outs[0], range=(-5, 10))
hist = hist.astype("float32")
hist /= float(outs[0].size)
prob = 0.1 * np.ones((10))
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册