Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
2c514102
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2c514102
编写于
10月 14, 2018
作者:
C
chengduo
提交者:
GitHub
10月 14, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix layers.uniform_random (#13859)
test=release/1.0.0
上级
cddff20d
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
28 addition
and
21 deletion
+28
-21
paddle/fluid/operators/uniform_random_op.cc
paddle/fluid/operators/uniform_random_op.cc
+16
-16
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+12
-5
未找到文件。
paddle/fluid/operators/uniform_random_op.cc
浏览文件 @
2c514102
...
@@ -23,14 +23,14 @@ namespace operators {
...
@@ -23,14 +23,14 @@ namespace operators {
template
<
typename
T
>
template
<
typename
T
>
class
CPUUniformRandomKernel
:
public
framework
::
OpKernel
<
T
>
{
class
CPUUniformRandomKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
framework
::
Tensor
*
tensor
=
nullptr
;
framework
::
Tensor
*
tensor
=
nullptr
;
auto
out_var
=
ctx
.
OutputVar
(
"Out"
);
auto
out_var
=
ctx
.
OutputVar
(
"Out"
);
if
(
out_var
->
IsType
<
framework
::
LoDTensor
>
())
{
if
(
out_var
->
IsType
<
framework
::
LoDTensor
>
())
{
tensor
=
out_var
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
=
out_var
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
if
(
out_var
->
IsType
<
framework
::
SelectedRows
>
())
{
}
else
if
(
out_var
->
IsType
<
framework
::
SelectedRows
>
())
{
auto
shape
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"shape"
);
auto
shape
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"shape"
);
auto
*
selected_rows
=
out_var
->
GetMutable
<
framework
::
SelectedRows
>
();
auto
*
selected_rows
=
out_var
->
GetMutable
<
framework
::
SelectedRows
>
();
tensor
=
selected_rows
->
mutable_value
();
tensor
=
selected_rows
->
mutable_value
();
tensor
->
Resize
(
framework
::
make_ddim
(
shape
));
tensor
->
Resize
(
framework
::
make_ddim
(
shape
));
selected_rows
->
mutable_rows
()
->
reserve
(
shape
[
0
]);
selected_rows
->
mutable_rows
()
->
reserve
(
shape
[
0
]);
...
@@ -39,7 +39,7 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
...
@@ -39,7 +39,7 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
"uniform_random_op's output only"
"uniform_random_op's output only"
"supports SelectedRows and LoDTensor"
);
"supports SelectedRows and LoDTensor"
);
}
}
T
*
data
=
tensor
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
T
*
data
=
tensor
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
ctx
.
Attr
<
int
>
(
"seed"
));
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
ctx
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
if
(
seed
==
0
)
{
...
@@ -60,14 +60,14 @@ class UniformRandomOp : public framework::OperatorWithKernel {
...
@@ -60,14 +60,14 @@ class UniformRandomOp : public framework::OperatorWithKernel {
public:
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of UniformRandomOp should not be null."
);
"Output(Out) of UniformRandomOp should not be null."
);
PADDLE_ENFORCE
(
PADDLE_ENFORCE
(
ctx
->
Attrs
().
Get
<
float
>
(
"min"
)
<
ctx
->
Attrs
().
Get
<
float
>
(
"max"
),
ctx
->
Attrs
().
Get
<
float
>
(
"min"
)
<
ctx
->
Attrs
().
Get
<
float
>
(
"max"
),
"uniform_random's min must less then max"
);
"uniform_random's min must less then max"
);
auto
&
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
auto
&
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
std
::
vector
<
int64_t
>
temp
;
std
::
vector
<
int64_t
>
temp
;
temp
.
reserve
(
shape
.
size
());
temp
.
reserve
(
shape
.
size
());
for
(
auto
dim
:
shape
)
{
for
(
auto
dim
:
shape
)
{
...
@@ -78,7 +78,7 @@ class UniformRandomOp : public framework::OperatorWithKernel {
...
@@ -78,7 +78,7 @@ class UniformRandomOp : public framework::OperatorWithKernel {
protected:
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
return
framework
::
OpKernelType
(
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
ctx
.
Attr
<
int
>
(
"dtype"
)),
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
ctx
.
Attr
<
int
>
(
"dtype"
)),
ctx
.
GetPlace
());
ctx
.
GetPlace
());
...
@@ -112,17 +112,17 @@ uniform distribution. The random result is in set [min, max].
...
@@ -112,17 +112,17 @@ uniform distribution. The random result is in set [min, max].
class
UniformRandomOpVarTypeInference
:
public
framework
::
VarTypeInference
{
class
UniformRandomOpVarTypeInference
:
public
framework
::
VarTypeInference
{
public:
public:
void
operator
()(
const
framework
::
OpDesc
&
op_desc
,
void
operator
()(
const
framework
::
OpDesc
&
op_desc
,
framework
::
BlockDesc
*
block
)
const
override
{
framework
::
BlockDesc
*
block
)
const
override
{
auto
out_var_name
=
op_desc
.
Output
(
"Out"
).
front
();
auto
out_var_name
=
op_desc
.
Output
(
"Out"
).
front
();
if
(
block
->
FindRecursiveOrCreateVar
(
out_var_name
).
GetType
()
==
auto
var_data_type
=
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
framework
::
proto
::
VarType
::
SELECTED_ROWS
)
{
boost
::
get
<
int
>
(
op_desc
.
GetAttr
(
"dtype"
)));
block
->
FindRecursiveOrCreateVar
(
out_var_name
)
.
SetType
(
framework
::
proto
::
VarType
::
SELECTED_ROWS
);
auto
out_var
=
block
->
FindRecursiveOrCreateVar
(
out_var_name
);
}
else
{
if
(
out_var
.
GetType
()
!=
framework
::
proto
::
VarType
::
SELECTED_ROWS
)
{
block
->
FindRecursiveOrCreateVar
(
out_var_name
)
out_var
.
SetType
(
framework
::
proto
::
VarType
::
LOD_TENSOR
);
.
SetType
(
framework
::
proto
::
VarType
::
LOD_TENSOR
);
}
}
out_var
.
SetDataType
(
var_data_type
);
}
}
};
};
...
...
python/paddle/fluid/layers/ops.py
浏览文件 @
2c514102
...
@@ -14,6 +14,8 @@
...
@@ -14,6 +14,8 @@
from
__future__
import
print_function
from
__future__
import
print_function
from
.layer_function_generator
import
generate_layer_fn
,
generate_layer_fn_noattr
from
.layer_function_generator
import
generate_layer_fn
,
generate_layer_fn_noattr
from
..
import
core
from
..framework
import
convert_np_dtype_to_dtype_
__activations_noattr__
=
[
__activations_noattr__
=
[
'sigmoid'
,
'sigmoid'
,
...
@@ -58,8 +60,11 @@ _uniform_random_ = generate_layer_fn('uniform_random')
...
@@ -58,8 +60,11 @@ _uniform_random_ = generate_layer_fn('uniform_random')
def
uniform_random
(
shape
,
dtype
=
None
,
min
=
None
,
max
=
None
,
seed
=
None
):
def
uniform_random
(
shape
,
dtype
=
None
,
min
=
None
,
max
=
None
,
seed
=
None
):
locals_var
=
locals
().
keys
()
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
kwargs
=
dict
()
kwargs
=
dict
()
for
name
in
locals
()
:
for
name
in
locals
_var
:
val
=
locals
()[
name
]
val
=
locals
()[
name
]
if
val
is
not
None
:
if
val
is
not
None
:
kwargs
[
name
]
=
val
kwargs
[
name
]
=
val
...
@@ -78,8 +83,9 @@ _hard_shrink_ = generate_layer_fn('hard_shrink')
...
@@ -78,8 +83,9 @@ _hard_shrink_ = generate_layer_fn('hard_shrink')
def
hard_shrink
(
x
,
threshold
=
None
):
def
hard_shrink
(
x
,
threshold
=
None
):
locals_var
=
locals
().
keys
()
kwargs
=
dict
()
kwargs
=
dict
()
for
name
in
locals
()
:
for
name
in
locals
_var
:
val
=
locals
()[
name
]
val
=
locals
()[
name
]
if
val
is
not
None
:
if
val
is
not
None
:
kwargs
[
name
]
=
val
kwargs
[
name
]
=
val
...
@@ -99,12 +105,12 @@ _cum_sum_ = generate_layer_fn('cumsum')
...
@@ -99,12 +105,12 @@ _cum_sum_ = generate_layer_fn('cumsum')
def
cumsum
(
x
,
axis
=
None
,
exclusive
=
None
,
reverse
=
None
):
def
cumsum
(
x
,
axis
=
None
,
exclusive
=
None
,
reverse
=
None
):
locals_var
=
locals
().
keys
()
kwargs
=
dict
()
kwargs
=
dict
()
for
name
in
locals
()
:
for
name
in
locals
_var
:
val
=
locals
()[
name
]
val
=
locals
()[
name
]
if
val
is
not
None
:
if
val
is
not
None
:
kwargs
[
name
]
=
val
kwargs
[
name
]
=
val
return
_cum_sum_
(
**
kwargs
)
return
_cum_sum_
(
**
kwargs
)
...
@@ -121,8 +127,9 @@ _thresholded_relu_ = generate_layer_fn('thresholded_relu')
...
@@ -121,8 +127,9 @@ _thresholded_relu_ = generate_layer_fn('thresholded_relu')
def
thresholded_relu
(
x
,
threshold
=
None
):
def
thresholded_relu
(
x
,
threshold
=
None
):
locals_var
=
locals
().
keys
()
kwargs
=
dict
()
kwargs
=
dict
()
for
name
in
locals
()
:
for
name
in
locals
_var
:
val
=
locals
()[
name
]
val
=
locals
()[
name
]
if
val
is
not
None
:
if
val
is
not
None
:
kwargs
[
name
]
=
val
kwargs
[
name
]
=
val
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录