未验证 提交 2c514102 编写于 作者: C chengduo 提交者: GitHub

fix layers.uniform_random (#13859)

test=release/1.0.0
上级 cddff20d
...@@ -23,14 +23,14 @@ namespace operators { ...@@ -23,14 +23,14 @@ namespace operators {
template <typename T> template <typename T>
class CPUUniformRandomKernel : public framework::OpKernel<T> { class CPUUniformRandomKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext &ctx) const override {
framework::Tensor* tensor = nullptr; framework::Tensor *tensor = nullptr;
auto out_var = ctx.OutputVar("Out"); auto out_var = ctx.OutputVar("Out");
if (out_var->IsType<framework::LoDTensor>()) { if (out_var->IsType<framework::LoDTensor>()) {
tensor = out_var->GetMutable<framework::LoDTensor>(); tensor = out_var->GetMutable<framework::LoDTensor>();
} else if (out_var->IsType<framework::SelectedRows>()) { } else if (out_var->IsType<framework::SelectedRows>()) {
auto shape = ctx.Attr<std::vector<int>>("shape"); auto shape = ctx.Attr<std::vector<int>>("shape");
auto* selected_rows = out_var->GetMutable<framework::SelectedRows>(); auto *selected_rows = out_var->GetMutable<framework::SelectedRows>();
tensor = selected_rows->mutable_value(); tensor = selected_rows->mutable_value();
tensor->Resize(framework::make_ddim(shape)); tensor->Resize(framework::make_ddim(shape));
selected_rows->mutable_rows()->reserve(shape[0]); selected_rows->mutable_rows()->reserve(shape[0]);
...@@ -39,7 +39,7 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> { ...@@ -39,7 +39,7 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
"uniform_random_op's output only" "uniform_random_op's output only"
"supports SelectedRows and LoDTensor"); "supports SelectedRows and LoDTensor");
} }
T* data = tensor->mutable_data<T>(ctx.GetPlace()); T *data = tensor->mutable_data<T>(ctx.GetPlace());
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
std::minstd_rand engine; std::minstd_rand engine;
if (seed == 0) { if (seed == 0) {
...@@ -60,14 +60,14 @@ class UniformRandomOp : public framework::OperatorWithKernel { ...@@ -60,14 +60,14 @@ class UniformRandomOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of UniformRandomOp should not be null."); "Output(Out) of UniformRandomOp should not be null.");
PADDLE_ENFORCE( PADDLE_ENFORCE(
ctx->Attrs().Get<float>("min") < ctx->Attrs().Get<float>("max"), ctx->Attrs().Get<float>("min") < ctx->Attrs().Get<float>("max"),
"uniform_random's min must less then max"); "uniform_random's min must less then max");
auto& shape = ctx->Attrs().Get<std::vector<int>>("shape"); auto &shape = ctx->Attrs().Get<std::vector<int>>("shape");
std::vector<int64_t> temp; std::vector<int64_t> temp;
temp.reserve(shape.size()); temp.reserve(shape.size());
for (auto dim : shape) { for (auto dim : shape) {
...@@ -78,7 +78,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { ...@@ -78,7 +78,7 @@ class UniformRandomOp : public framework::OperatorWithKernel {
protected: protected:
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType( return framework::OpKernelType(
static_cast<framework::proto::VarType::Type>(ctx.Attr<int>("dtype")), static_cast<framework::proto::VarType::Type>(ctx.Attr<int>("dtype")),
ctx.GetPlace()); ctx.GetPlace());
...@@ -112,17 +112,17 @@ uniform distribution. The random result is in set [min, max]. ...@@ -112,17 +112,17 @@ uniform distribution. The random result is in set [min, max].
class UniformRandomOpVarTypeInference : public framework::VarTypeInference { class UniformRandomOpVarTypeInference : public framework::VarTypeInference {
public: public:
void operator()(const framework::OpDesc& op_desc, void operator()(const framework::OpDesc &op_desc,
framework::BlockDesc* block) const override { framework::BlockDesc *block) const override {
auto out_var_name = op_desc.Output("Out").front(); auto out_var_name = op_desc.Output("Out").front();
if (block->FindRecursiveOrCreateVar(out_var_name).GetType() == auto var_data_type = static_cast<framework::proto::VarType::Type>(
framework::proto::VarType::SELECTED_ROWS) { boost::get<int>(op_desc.GetAttr("dtype")));
block->FindRecursiveOrCreateVar(out_var_name)
.SetType(framework::proto::VarType::SELECTED_ROWS); auto out_var = block->FindRecursiveOrCreateVar(out_var_name);
} else { if (out_var.GetType() != framework::proto::VarType::SELECTED_ROWS) {
block->FindRecursiveOrCreateVar(out_var_name) out_var.SetType(framework::proto::VarType::LOD_TENSOR);
.SetType(framework::proto::VarType::LOD_TENSOR);
} }
out_var.SetDataType(var_data_type);
} }
}; };
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
from __future__ import print_function from __future__ import print_function
from .layer_function_generator import generate_layer_fn, generate_layer_fn_noattr from .layer_function_generator import generate_layer_fn, generate_layer_fn_noattr
from .. import core
from ..framework import convert_np_dtype_to_dtype_
__activations_noattr__ = [ __activations_noattr__ = [
'sigmoid', 'sigmoid',
...@@ -58,8 +60,11 @@ _uniform_random_ = generate_layer_fn('uniform_random') ...@@ -58,8 +60,11 @@ _uniform_random_ = generate_layer_fn('uniform_random')
def uniform_random(shape, dtype=None, min=None, max=None, seed=None): def uniform_random(shape, dtype=None, min=None, max=None, seed=None):
locals_var = locals().keys()
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
kwargs = dict() kwargs = dict()
for name in locals(): for name in locals_var:
val = locals()[name] val = locals()[name]
if val is not None: if val is not None:
kwargs[name] = val kwargs[name] = val
...@@ -78,8 +83,9 @@ _hard_shrink_ = generate_layer_fn('hard_shrink') ...@@ -78,8 +83,9 @@ _hard_shrink_ = generate_layer_fn('hard_shrink')
def hard_shrink(x, threshold=None): def hard_shrink(x, threshold=None):
locals_var = locals().keys()
kwargs = dict() kwargs = dict()
for name in locals(): for name in locals_var:
val = locals()[name] val = locals()[name]
if val is not None: if val is not None:
kwargs[name] = val kwargs[name] = val
...@@ -99,12 +105,12 @@ _cum_sum_ = generate_layer_fn('cumsum') ...@@ -99,12 +105,12 @@ _cum_sum_ = generate_layer_fn('cumsum')
def cumsum(x, axis=None, exclusive=None, reverse=None): def cumsum(x, axis=None, exclusive=None, reverse=None):
locals_var = locals().keys()
kwargs = dict() kwargs = dict()
for name in locals(): for name in locals_var:
val = locals()[name] val = locals()[name]
if val is not None: if val is not None:
kwargs[name] = val kwargs[name] = val
return _cum_sum_(**kwargs) return _cum_sum_(**kwargs)
...@@ -121,8 +127,9 @@ _thresholded_relu_ = generate_layer_fn('thresholded_relu') ...@@ -121,8 +127,9 @@ _thresholded_relu_ = generate_layer_fn('thresholded_relu')
def thresholded_relu(x, threshold=None): def thresholded_relu(x, threshold=None):
locals_var = locals().keys()
kwargs = dict() kwargs = dict()
for name in locals(): for name in locals_var:
val = locals()[name] val = locals()[name]
if val is not None: if val is not None:
kwargs[name] = val kwargs[name] = val
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册