Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
a0acfc6a
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a0acfc6a
编写于
10月 31, 2017
作者:
Z
zchen0211
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into develop
上级
8d3324d7
0318f47e
变更
31
隐藏空白更改
内联
并排
Showing
31 changed file
with
360 addition
and
138 deletion
+360
-138
paddle/framework/attribute.cc
paddle/framework/attribute.cc
+3
-7
paddle/framework/attribute.h
paddle/framework/attribute.h
+1
-1
paddle/framework/backward.cc
paddle/framework/backward.cc
+30
-5
paddle/framework/backward_test.cc
paddle/framework/backward_test.cc
+7
-7
paddle/framework/block_desc.cc
paddle/framework/block_desc.cc
+1
-1
paddle/framework/executor.cc
paddle/framework/executor.cc
+13
-14
paddle/framework/executor.h
paddle/framework/executor.h
+2
-2
paddle/framework/op_desc.cc
paddle/framework/op_desc.cc
+24
-4
paddle/framework/op_registry.cc
paddle/framework/op_registry.cc
+5
-3
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+1
-2
paddle/framework/op_registry_test.cc
paddle/framework/op_registry_test.cc
+6
-6
paddle/framework/operator.cc
paddle/framework/operator.cc
+14
-0
paddle/framework/operator_test.cc
paddle/framework/operator_test.cc
+3
-3
paddle/framework/program_desc.h
paddle/framework/program_desc.h
+3
-1
paddle/framework/program_desc_test.cc
paddle/framework/program_desc_test.cc
+4
-4
paddle/framework/prune_test.cc
paddle/framework/prune_test.cc
+5
-5
paddle/framework/shape_inference.cc
paddle/framework/shape_inference.cc
+0
-3
paddle/framework/shape_inference.h
paddle/framework/shape_inference.h
+2
-3
paddle/framework/type_defs.h
paddle/framework/type_defs.h
+1
-1
paddle/framework/var_type_inference_test.cc
paddle/framework/var_type_inference_test.cc
+19
-17
paddle/operators/dynamic_recurrent_op_test.cc
paddle/operators/dynamic_recurrent_op_test.cc
+1
-1
paddle/operators/gaussian_random_op.cc
paddle/operators/gaussian_random_op.cc
+6
-6
paddle/operators/lookup_table_op.cc
paddle/operators/lookup_table_op.cc
+2
-2
paddle/operators/lookup_table_op.cu
paddle/operators/lookup_table_op.cu
+13
-13
paddle/operators/lookup_table_op.h
paddle/operators/lookup_table_op.h
+14
-14
paddle/operators/sequence_conv_op.cc
paddle/operators/sequence_conv_op.cc
+1
-1
paddle/pybind/protobuf.cc
paddle/pybind/protobuf.cc
+2
-1
paddle/pybind/pybind.cc
paddle/pybind/pybind.cc
+6
-9
python/paddle/v2/framework/initializer.py
python/paddle/v2/framework/initializer.py
+50
-1
python/paddle/v2/framework/tests/test_gaussian_random_op.py
python/paddle/v2/framework/tests/test_gaussian_random_op.py
+1
-1
python/paddle/v2/framework/tests/test_initializer.py
python/paddle/v2/framework/tests/test_initializer.py
+120
-0
未找到文件。
paddle/framework/attribute.cc
浏览文件 @
a0acfc6a
...
...
@@ -19,7 +19,7 @@ limitations under the License. */
namespace
paddle
{
namespace
framework
{
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
,
ProgramDesc
*
program
)
{
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
)
{
switch
(
attr_desc
.
type
())
{
case
framework
::
AttrType
::
BOOLEAN
:
{
return
attr_desc
.
b
();
...
...
@@ -61,13 +61,9 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc, ProgramDesc* program) {
}
return
val
;
}
case
framework
::
AttrType
::
BLOCK
:
{
PADDLE_ENFORCE
(
program
!=
nullptr
,
"Need to specify ProgramDesc when get a block attr"
);
return
program
->
mutable_blocks
(
attr_desc
.
block_idx
());
}
default:
PADDLE_THROW
(
"Unsupport attr type %d"
,
attr_desc
.
type
());
}
PADDLE_ENFORCE
(
false
,
"Unknown OpDesc::AttrDesc::type !"
);
return
boost
::
blank
();
}
...
...
paddle/framework/attribute.h
浏览文件 @
a0acfc6a
...
...
@@ -32,7 +32,7 @@ inline AttrType AttrTypeID() {
return
static_cast
<
AttrType
>
(
tmp
.
which
()
-
1
);
}
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
,
ProgramDesc
*
desc
);
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
);
class
AttrReader
{
public:
...
...
paddle/framework/backward.cc
浏览文件 @
a0acfc6a
...
...
@@ -18,6 +18,7 @@
#include <deque>
#include <list>
#include <memory>
#include <unordered_set>
#include "paddle/framework/block_desc.h"
#include "paddle/framework/op_registry.h"
...
...
@@ -285,6 +286,15 @@ static bool AllGradInSet(const std::vector<std::string>& names,
return
true
;
}
static
std
::
string
FwdName
(
const
std
::
string
&
grad_name
)
{
auto
pos
=
grad_name
.
find
(
"@GRAD"
);
if
(
pos
==
std
::
string
::
npos
)
{
return
""
;
}
else
{
return
grad_name
.
substr
(
0
,
pos
);
}
}
static
void
CreateGradVarInBlock
(
size_t
grad_op_start_index
,
const
std
::
unordered_map
<
std
::
string
,
std
::
string
>&
param_name_map
,
...
...
@@ -294,6 +304,7 @@ static void CreateGradVarInBlock(
for
(
size_t
op_index
=
grad_op_start_index
;
op_index
<
ops
.
size
();
++
op_index
)
{
bool
need_infer_shape
=
false
;
std
::
unordered_set
<
std
::
string
>
new_vars
;
ForEachVarName
(
ops
[
op_index
]
->
Outputs
(),
[
&
](
const
std
::
string
&
grad_var_name
)
{
if
(
block_desc
->
HasVar
(
grad_var_name
))
{
...
...
@@ -301,8 +312,7 @@ static void CreateGradVarInBlock(
}
need_infer_shape
=
true
;
auto
var
=
block_desc
->
Var
(
grad_var_name
);
// FIXME(qiao) infer the datatype
var
->
SetDataType
(
framework
::
DataType
::
FP32
);
new_vars
.
insert
(
var
->
Name
());
auto
it
=
param_name_map
.
find
(
grad_var_name
);
if
(
it
==
param_name_map
.
end
())
{
return
false
;
...
...
@@ -316,6 +326,21 @@ static void CreateGradVarInBlock(
});
if
(
need_infer_shape
)
{
ops
[
op_index
]
->
InferVarType
(
block_desc
);
for
(
auto
&
arg
:
ops
[
op_index
]
->
OutputArgumentNames
())
{
if
(
new_vars
.
find
(
arg
)
==
new_vars
.
end
())
{
continue
;
}
auto
pname
=
FwdName
(
arg
);
auto
*
param
=
block_desc
->
FindVar
(
pname
);
auto
*
grad
=
block_desc
->
FindVar
(
arg
);
if
(
param
==
nullptr
)
{
LOG
(
WARNING
)
<<
"Cannot find forward variable of "
<<
arg
<<
". Set its gradient to FP32"
;
grad
->
SetDataType
(
DataType
::
FP32
);
}
else
{
grad
->
SetDataType
(
param
->
GetDataType
());
}
}
ops
[
op_index
]
->
InferShape
(
*
block_desc
);
}
}
...
...
@@ -368,7 +393,7 @@ std::vector<std::unique_ptr<OpDescBind>> MakeBlockBackward(
ProgramDescBind
&
program_desc
,
int
block_idx
,
std
::
unordered_set
<
std
::
string
>*
no_grad_vars
,
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
grad_to_var
)
{
BlockDescBind
*
cur_block
=
program_desc
.
Block
(
block_idx
);
BlockDescBind
*
cur_block
=
program_desc
.
Mutable
Block
(
block_idx
);
std
::
vector
<
OpDescBind
*>
op_descs
=
cur_block
->
AllOps
();
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
size_t
>>
dup_out_ops
;
size_t
grad_desc_idx
=
0
;
...
...
@@ -443,7 +468,7 @@ ParamGradInfoMap AppendBackward(
}
const
int
root_block_idx
=
0
;
auto
root_block
=
program_desc
.
Block
(
root_block_idx
);
auto
root_block
=
program_desc
.
Mutable
Block
(
root_block_idx
);
// insert fill one op for target
// TODO(qiao) add some check to the target.
...
...
@@ -492,7 +517,7 @@ ParamGradInfoMap AppendBackward(
CreateGradVarInBlock
(
forward_op_num
,
grad_to_var
,
root_block
,
&
retv
);
for
(
size_t
block_index
=
forward_block_num
;
block_index
<
program_desc
.
Size
();
++
block_index
)
{
CreateGradVarInBlock
(
0
,
grad_to_var
,
program_desc
.
Block
(
block_index
),
CreateGradVarInBlock
(
0
,
grad_to_var
,
program_desc
.
Mutable
Block
(
block_index
),
&
retv
);
}
return
retv
;
...
...
paddle/framework/backward_test.cc
浏览文件 @
a0acfc6a
...
...
@@ -499,7 +499,7 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) {
TEST
(
Backward
,
simple_single_op
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
f
::
OpDescBind
*
op
=
block
->
AppendOp
();
op
->
SetType
(
"rowwise_add"
);
...
...
@@ -535,7 +535,7 @@ TEST(Backward, simple_single_op) {
TEST
(
Backward
,
default_attribute
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
f
::
OpDescBind
*
op
=
block
->
AppendOp
();
op
->
SetType
(
"mul"
);
op
->
SetInput
(
"X"
,
{
"x"
});
...
...
@@ -561,7 +561,7 @@ TEST(Backward, default_attribute) {
TEST
(
Backward
,
simple_mult_op
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
f
::
OpDescBind
*
op1
=
block
->
AppendOp
();
op1
->
SetType
(
"rowwise_add"
);
op1
->
SetInput
(
"X"
,
{
"x1"
});
...
...
@@ -644,7 +644,7 @@ TEST(Backward, simple_mult_op) {
TEST
(
Backward
,
intermedia_var_no_grad
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
f
::
OpDescBind
*
op1
=
block
->
AppendOp
();
op1
->
SetType
(
"rowwise_add"
);
op1
->
SetInput
(
"X"
,
{
"x1"
});
...
...
@@ -714,7 +714,7 @@ TEST(Backward, intermedia_var_no_grad) {
TEST
(
Backward
,
var_no_grad
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
f
::
OpDescBind
*
op1
=
block
->
AppendOp
();
op1
->
SetType
(
"mult_in_out"
);
op1
->
SetInput
(
"X"
,
{
"x1"
});
...
...
@@ -790,7 +790,7 @@ TEST(Backward, var_no_grad) {
TEST
(
Backward
,
shared_var
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
f
::
OpDescBind
*
op1
=
block
->
AppendOp
();
op1
->
SetType
(
"rowwise_add"
);
op1
->
SetInput
(
"X"
,
{
"x1"
});
...
...
@@ -880,7 +880,7 @@ TEST(Backward, shared_var) {
TEST
(
Backward
,
half_backward
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
auto
*
op1
=
block
->
AppendOp
();
op1
->
SetType
(
"minus"
);
op1
->
SetInput
(
"X"
,
{
"a"
});
...
...
paddle/framework/block_desc.cc
浏览文件 @
a0acfc6a
...
...
@@ -113,7 +113,7 @@ BlockDescBind *BlockDescBind::ParentBlock() const {
if
(
this
->
desc_
->
parent_idx
()
==
kNoneBlockIndex
)
{
return
nullptr
;
}
return
prog_
->
Block
(
static_cast
<
size_t
>
(
this
->
desc_
->
parent_idx
()));
return
prog_
->
Mutable
Block
(
static_cast
<
size_t
>
(
this
->
desc_
->
parent_idx
()));
}
BlockDesc
*
BlockDescBind
::
Proto
()
{
...
...
paddle/framework/executor.cc
浏览文件 @
a0acfc6a
...
...
@@ -73,33 +73,32 @@ static void CreateTensor(Variable* var, VarDesc::VarType var_type) {
}
}
void
Executor
::
Run
(
const
ProgramDesc
&
pdesc
,
Scope
*
scope
,
int
block_id
)
{
void
Executor
::
Run
(
const
ProgramDesc
Bind
&
pdesc
,
Scope
*
scope
,
int
block_id
)
{
// TODO(tonyyang-svail):
// - only runs on the first device (i.e. no interdevice communication)
// - will change to use multiple blocks for RNN op and Cond Op
PADDLE_ENFORCE_
GT
(
pdesc
.
blocks_size
(),
block_id
);
auto
&
block
=
pdesc
.
blocks
(
block_id
);
PADDLE_ENFORCE_
LT
(
block_id
,
pdesc
.
Size
()
);
auto
&
block
=
pdesc
.
Block
(
block_id
);
auto
&
device
=
device_contexts_
[
0
];
Scope
&
local_scope
=
scope
->
NewScope
();
for
(
auto
&
var
:
block
.
v
ars
())
{
if
(
var
.
p
ersistable
())
{
auto
*
ptr
=
scope
->
Var
(
var
.
n
ame
());
CreateTensor
(
ptr
,
var
.
t
ype
());
VLOG
(
3
)
<<
"Create Variable "
<<
var
.
n
ame
()
for
(
auto
&
var
:
block
.
AllV
ars
())
{
if
(
var
->
P
ersistable
())
{
auto
*
ptr
=
scope
->
Var
(
var
->
N
ame
());
CreateTensor
(
ptr
,
var
->
GetT
ype
());
VLOG
(
3
)
<<
"Create Variable "
<<
var
->
N
ame
()
<<
" global, which pointer is "
<<
ptr
;
}
else
{
auto
*
ptr
=
local_scope
.
Var
(
var
.
n
ame
());
CreateTensor
(
ptr
,
var
.
t
ype
());
VLOG
(
3
)
<<
"Create Variable "
<<
var
.
n
ame
()
auto
*
ptr
=
local_scope
.
Var
(
var
->
N
ame
());
CreateTensor
(
ptr
,
var
->
GetT
ype
());
VLOG
(
3
)
<<
"Create Variable "
<<
var
->
N
ame
()
<<
" locally, which pointer is "
<<
ptr
;
}
}
for
(
auto
&
op_desc
:
block
.
ops
())
{
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
const_cast
<
ProgramDesc
*>
(
&
pdesc
));
for
(
auto
&
op_desc
:
block
.
AllOps
())
{
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
op
->
Run
(
local_scope
,
*
device
);
}
...
...
paddle/framework/executor.h
浏览文件 @
a0acfc6a
...
...
@@ -14,8 +14,8 @@ limitations under the License. */
#pragma once
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_info.h"
#include "paddle/framework/program_desc.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/tensor.h"
...
...
@@ -34,7 +34,7 @@ class Executor {
* ProgramDesc
* Scope
*/
void
Run
(
const
ProgramDesc
&
,
Scope
*
,
int
);
void
Run
(
const
ProgramDesc
Bind
&
,
Scope
*
,
int
);
private:
std
::
vector
<
platform
::
DeviceContext
*>
device_contexts_
;
...
...
paddle/framework/op_desc.cc
浏览文件 @
a0acfc6a
...
...
@@ -52,6 +52,22 @@ class CompileTimeInferShapeContext : public InferShapeContext {
const
std
::
vector
<
std
::
string
>
&
Outputs
(
const
std
::
string
&
name
)
const
override
;
void
ShareLoD
(
const
std
::
string
&
in
,
const
std
::
string
&
out
,
size_t
i
=
0
,
size_t
j
=
0
)
const
override
{
PADDLE_ENFORCE_LT
(
i
,
Inputs
(
in
).
size
());
PADDLE_ENFORCE_LT
(
j
,
Outputs
(
out
).
size
());
auto
*
in_var
=
block_
.
FindVarRecursive
(
Inputs
(
in
)[
i
]);
auto
*
out_var
=
block_
.
FindVarRecursive
(
Outputs
(
out
)[
j
]);
if
(
in_var
->
GetType
()
!=
VarDesc
::
LOD_TENSOR
)
{
VLOG
(
3
)
<<
"input "
<<
in
<<
"is not LodTensor"
;
return
;
}
PADDLE_ENFORCE_EQ
(
in_var
->
GetType
(),
VarDesc
::
LOD_TENSOR
,
"The %d-th output of Output(%s) must be LoDTensor."
,
j
,
out
);
in_var
->
SetLoDLevel
(
out_var
->
GetLodLevel
());
}
private:
DDim
GetDim
(
const
std
::
string
&
name
)
const
override
;
...
...
@@ -98,7 +114,12 @@ OpDescBind::OpDescBind(const OpDesc &desc, ProgramDescBind *prog)
// restore attrs_
for
(
const
OpDesc
::
Attr
&
attr
:
desc_
.
attrs
())
{
std
::
string
attr_name
=
attr
.
name
();
attrs_
[
attr_name
]
=
GetAttrValue
(
attr
,
prog
->
Proto
());
if
(
attr
.
type
()
!=
AttrType
::
BLOCK
)
{
attrs_
[
attr_name
]
=
GetAttrValue
(
attr
);
}
else
{
auto
bid
=
attr
.
block_idx
();
attrs_
[
attr_name
]
=
prog
->
MutableBlock
(
bid
);
}
}
}
...
...
@@ -172,8 +193,7 @@ void OpDescBind::SetAttr(const std::string &name, const Attribute &v) {
}
void
OpDescBind
::
SetBlockAttr
(
const
std
::
string
&
name
,
BlockDescBind
&
block
)
{
BlockDesc
*
desc
=
block
.
Proto
();
this
->
attrs_
[
name
]
=
desc
;
this
->
attrs_
[
name
]
=
&
block
;
need_update_
=
true
;
}
...
...
@@ -192,7 +212,7 @@ Attribute OpDescBind::GetAttr(const std::string &name) const {
int
OpDescBind
::
GetBlockAttr
(
const
std
::
string
&
name
)
const
{
auto
it
=
attrs_
.
find
(
name
);
PADDLE_ENFORCE
(
it
!=
attrs_
.
end
(),
"Attribute %s is not found"
,
name
);
return
boost
::
get
<
BlockDesc
*>
(
it
->
second
)
->
idx
();
return
boost
::
get
<
BlockDesc
Bind
*>
(
it
->
second
)
->
ID
();
}
const
std
::
unordered_map
<
std
::
string
,
Attribute
>
&
OpDescBind
::
GetAttrMap
()
...
...
paddle/framework/op_registry.cc
浏览文件 @
a0acfc6a
...
...
@@ -43,13 +43,15 @@ static VariableNameMap ConvertOpDescVarsToVarNameMap(
return
ret_val
;
}
std
::
unique_ptr
<
OperatorBase
>
OpRegistry
::
CreateOp
(
const
OpDesc
&
op_desc
,
ProgramDesc
*
program
)
{
std
::
unique_ptr
<
OperatorBase
>
OpRegistry
::
CreateOp
(
const
OpDesc
&
op_desc
)
{
VLOG
(
1
)
<<
"CreateOp directly from OpDesc is deprecated. It should only be"
"used in unit tests. Use CreateOp(const OpDescBind& op_desc) "
"instead."
;
VariableNameMap
inputs
=
ConvertOpDescVarsToVarNameMap
(
op_desc
.
inputs
());
VariableNameMap
outputs
=
ConvertOpDescVarsToVarNameMap
(
op_desc
.
outputs
());
AttributeMap
attrs
;
for
(
auto
&
attr
:
op_desc
.
attrs
())
{
attrs
[
attr
.
name
()]
=
GetAttrValue
(
attr
,
program
);
attrs
[
attr
.
name
()]
=
GetAttrValue
(
attr
);
}
return
CreateOp
(
op_desc
.
type
(),
inputs
,
outputs
,
attrs
);
...
...
paddle/framework/op_registry.h
浏览文件 @
a0acfc6a
...
...
@@ -77,8 +77,7 @@ class OpRegistry {
const
VariableNameMap
&
outputs
,
AttributeMap
attrs
);
static
std
::
unique_ptr
<
OperatorBase
>
CreateOp
(
const
OpDesc
&
op_desc
,
ProgramDesc
*
program
);
static
std
::
unique_ptr
<
OperatorBase
>
CreateOp
(
const
OpDesc
&
op_desc
);
static
std
::
unique_ptr
<
OperatorBase
>
CreateOp
(
const
OpDescBind
&
op_desc
);
};
...
...
paddle/framework/op_registry_test.cc
浏览文件 @
a0acfc6a
...
...
@@ -74,7 +74,7 @@ TEST(OpRegistry, CreateOp) {
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
attr
->
set_f
(
scale
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
paddle
::
framework
::
Scope
scope
;
paddle
::
platform
::
CPUDeviceContext
dev_ctx
;
op
->
Run
(
scope
,
dev_ctx
);
...
...
@@ -95,7 +95,7 @@ TEST(OpRegistry, IllegalAttr) {
bool
caught
=
false
;
try
{
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
err
)
{
caught
=
true
;
std
::
string
msg
=
"larger_than check fail"
;
...
...
@@ -115,7 +115,7 @@ TEST(OpRegistry, DefaultValue) {
ASSERT_TRUE
(
op_desc
.
IsInitialized
());
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
paddle
::
framework
::
Scope
scope
;
paddle
::
platform
::
CPUDeviceContext
dev_ctx
;
op
->
Run
(
scope
,
dev_ctx
);
...
...
@@ -131,7 +131,7 @@ TEST(OpRegistry, CustomChecker) {
// attr 'test_attr' is not set
bool
caught
=
false
;
try
{
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
err
)
{
caught
=
true
;
std
::
string
msg
=
"Attribute 'test_attr' is required!"
;
...
...
@@ -149,7 +149,7 @@ TEST(OpRegistry, CustomChecker) {
attr
->
set_i
(
3
);
caught
=
false
;
try
{
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
err
)
{
caught
=
true
;
std
::
string
msg
=
"'test_attr' must be even!"
;
...
...
@@ -166,7 +166,7 @@ TEST(OpRegistry, CustomChecker) {
attr
->
set_name
(
"test_attr"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
INT
);
attr
->
set_i
(
4
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
paddle
::
platform
::
CPUDeviceContext
dev_ctx
;
paddle
::
framework
::
Scope
scope
;
op
->
Run
(
scope
,
dev_ctx
);
...
...
paddle/framework/operator.cc
浏览文件 @
a0acfc6a
...
...
@@ -351,6 +351,20 @@ class RuntimeInferShapeContext : public InferShapeContext {
return
op_
.
Outputs
(
name
);
}
void
ShareLoD
(
const
std
::
string
&
in
,
const
std
::
string
&
out
,
size_t
i
=
0
,
size_t
j
=
0
)
const
override
{
PADDLE_ENFORCE_LT
(
i
,
Inputs
(
in
).
size
());
PADDLE_ENFORCE_LT
(
j
,
Outputs
(
out
).
size
());
Variable
*
in_var
=
scope_
.
FindVar
(
Inputs
(
in
)[
i
]);
Variable
*
out_var
=
scope_
.
FindVar
(
Outputs
(
out
)[
j
]);
if
(
!
in_var
->
IsType
<
LoDTensor
>
())
return
;
PADDLE_ENFORCE
(
out_var
->
IsType
<
LoDTensor
>
(),
"The %d-th output of Output(%s) must be LoDTensor."
,
j
,
out
);
auto
in_tensor
=
in_var
->
Get
<
LoDTensor
>
();
auto
*
out_tensor
=
out_var
->
GetMutable
<
LoDTensor
>
();
out_tensor
->
set_lod
(
in_tensor
.
lod
());
}
private:
DDim
GetDim
(
const
std
::
string
&
name
)
const
override
{
Variable
*
var
=
scope_
.
FindVar
(
name
);
...
...
paddle/framework/operator_test.cc
浏览文件 @
a0acfc6a
...
...
@@ -83,7 +83,7 @@ TEST(OperatorBase, all) {
paddle
::
platform
::
CPUDeviceContext
device_context
;
paddle
::
framework
::
Scope
scope
;
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
scope
.
Var
(
"OUT1"
);
ASSERT_EQ
(
paddle
::
framework
::
op_run_num
,
0
);
op
->
Run
(
scope
,
device_context
);
...
...
@@ -208,7 +208,7 @@ TEST(OpKernel, all) {
paddle
::
platform
::
CPUDeviceContext
cpu_device_context
;
paddle
::
framework
::
Scope
scope
;
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
ASSERT_EQ
(
paddle
::
framework
::
cpu_kernel_run_num
,
0
);
op
->
Run
(
scope
,
cpu_device_context
);
ASSERT_EQ
(
paddle
::
framework
::
cpu_kernel_run_num
,
1
);
...
...
@@ -244,7 +244,7 @@ TEST(OpKernel, multi_inputs) {
scope
.
Var
(
"y0"
)
->
GetMutable
<
LoDTensor
>
();
scope
.
Var
(
"y1"
)
->
GetMutable
<
LoDTensor
>
();
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
op
->
Run
(
scope
,
cpu_device_context
);
}
...
...
paddle/framework/program_desc.h
浏览文件 @
a0acfc6a
...
...
@@ -37,7 +37,9 @@ class ProgramDescBind {
BlockDescBind
*
AppendBlock
(
const
BlockDescBind
&
parent
);
BlockDescBind
*
Block
(
size_t
idx
)
{
return
blocks_
[
idx
].
get
();
}
BlockDescBind
*
MutableBlock
(
size_t
idx
)
{
return
blocks_
[
idx
].
get
();
}
const
BlockDescBind
&
Block
(
size_t
idx
)
const
{
return
*
blocks_
[
idx
];
}
size_t
Size
()
const
{
return
blocks_
.
size
();
}
...
...
paddle/framework/program_desc_test.cc
浏览文件 @
a0acfc6a
...
...
@@ -20,7 +20,7 @@ namespace paddle {
namespace
framework
{
TEST
(
ProgramDesc
,
copy_ctor
)
{
ProgramDescBind
program
;
auto
*
global_block
=
program
.
Block
(
0
);
auto
*
global_block
=
program
.
Mutable
Block
(
0
);
auto
*
x
=
global_block
->
Var
(
"X"
);
x
->
SetType
(
VarDesc_VarType_LOD_TENSOR
);
x
->
SetLoDLevel
(
0
);
...
...
@@ -44,7 +44,7 @@ TEST(ProgramDesc, copy_ctor) {
ProgramDescBind
program_copy
(
program
);
auto
*
global_block_copy
=
program_copy
.
Block
(
0
);
auto
*
global_block_copy
=
program_copy
.
Mutable
Block
(
0
);
ASSERT_NE
(
global_block
,
global_block_copy
);
auto
assert_same_var
=
[
&
](
const
std
::
string
&
name
,
VarDescBind
*
var_before
)
{
...
...
@@ -82,7 +82,7 @@ TEST(ProgramDesc, copy_ctor) {
TEST
(
ProgramDescBind
,
serialize_and_deserialize
)
{
ProgramDescBind
program_origin
;
auto
*
global_block
=
program_origin
.
Block
(
0
);
auto
*
global_block
=
program_origin
.
Mutable
Block
(
0
);
auto
*
x
=
global_block
->
Var
(
"X"
);
x
->
SetType
(
VarDesc_VarType_LOD_TENSOR
);
x
->
SetLoDLevel
(
0
);
...
...
@@ -108,7 +108,7 @@ TEST(ProgramDescBind, serialize_and_deserialize) {
program_origin
.
Proto
()
->
SerializeToString
(
&
binary_str
);
ProgramDescBind
program_restored
(
binary_str
);
auto
*
global_block_restored
=
program_restored
.
Block
(
0
);
auto
*
global_block_restored
=
program_restored
.
Mutable
Block
(
0
);
ASSERT_NE
(
global_block
,
global_block_restored
);
auto
assert_same_var
=
[
&
](
const
std
::
string
&
name
,
VarDescBind
*
var_before
)
{
...
...
paddle/framework/prune_test.cc
浏览文件 @
a0acfc6a
...
...
@@ -52,7 +52,7 @@ void AddOp(const std::string &type, const f::VariableNameMap &inputs,
TEST
(
Prune
,
one_operator
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
AddOp
(
"one_one"
,
{{
"input"
,
{
"a"
}}},
{{
"output"
,
{
"b"
}}},
{},
block
);
...
...
@@ -69,7 +69,7 @@ TEST(Prune, one_operator) {
TEST
(
Prune
,
forward
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
AddOp
(
"one_one"
,
{{
"input"
,
{
"a"
}}},
{{
"output"
,
{
"b"
}}},
{},
block
);
AddOp
(
"one_one"
,
{{
"input"
,
{
"b"
}}},
{{
"output"
,
{
"c"
}}},
{},
block
);
...
...
@@ -88,7 +88,7 @@ TEST(Prune, forward) {
TEST
(
Prune
,
multi_input_op
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
AddOp
(
"one_one"
,
{{
"input"
,
{
"a0"
}}},
{{
"output"
,
{
"b0"
}}},
{},
block
);
AddOp
(
"one_one"
,
{{
"input"
,
{
"a1"
}}},
{{
"output"
,
{
"b1"
}}},
{},
block
);
...
...
@@ -106,7 +106,7 @@ TEST(Prune, multi_input_op) {
TEST
(
Prune
,
multi_output_op
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
AddOp
(
"one_two"
,
{{
"input"
,
{
"a"
}}},
{{
"output"
,
{
"b"
,
"c"
}}},
{},
block
);
AddOp
(
"one_one"
,
{{
"input"
,
{
"b"
}}},
{{
"output"
,
{
"b1"
}}},
{},
block
);
...
...
@@ -122,7 +122,7 @@ TEST(Prune, multi_output_op) {
TEST
(
Prune
,
multi_target
)
{
f
::
ProgramDescBind
program
;
f
::
BlockDescBind
*
block
=
program
.
Block
(
0
);
f
::
BlockDescBind
*
block
=
program
.
Mutable
Block
(
0
);
AddOp
(
"one_two"
,
{{
"input"
,
{
"a"
}}},
{{
"output"
,
{
"b"
,
"c"
}}},
{},
block
);
AddOp
(
"one_one"
,
{{
"input"
,
{
"b"
}}},
{{
"output"
,
{
"b1"
}}},
{},
block
);
...
...
paddle/framework/shape_inference.cc
浏览文件 @
a0acfc6a
...
...
@@ -28,9 +28,6 @@ void InferShapeContext::SetOutputsDim(
SetDims
(
names
,
dims
);
}
void
InferShapeContext
::
ShareLoD
(
const
std
::
string
&
in
,
const
std
::
string
&
out
,
size_t
i
,
size_t
j
)
const
{}
std
::
vector
<
framework
::
DDim
>
InferShapeContext
::
GetDims
(
const
std
::
vector
<
std
::
string
>
&
names
)
const
{
std
::
vector
<
framework
::
DDim
>
ret
;
...
...
paddle/framework/shape_inference.h
浏览文件 @
a0acfc6a
...
...
@@ -43,9 +43,8 @@ class InferShapeContext {
virtual
const
std
::
vector
<
std
::
string
>
&
Outputs
(
const
std
::
string
&
name
)
const
=
0
;
// TODO(qiao) implement this function
void
ShareLoD
(
const
std
::
string
&
in
,
const
std
::
string
&
out
,
size_t
i
=
0
,
size_t
j
=
0
)
const
;
virtual
void
ShareLoD
(
const
std
::
string
&
in
,
const
std
::
string
&
out
,
size_t
i
=
0
,
size_t
j
=
0
)
const
=
0
;
protected:
virtual
framework
::
DDim
GetDim
(
const
std
::
string
&
name
)
const
=
0
;
...
...
paddle/framework/type_defs.h
浏览文件 @
a0acfc6a
...
...
@@ -36,7 +36,7 @@ using VariableNameMap = std::map<std::string, std::vector<std::string>>;
using
Attribute
=
boost
::
variant
<
boost
::
blank
,
int
,
float
,
std
::
string
,
std
::
vector
<
int
>
,
std
::
vector
<
float
>
,
std
::
vector
<
std
::
string
>
,
bool
,
std
::
vector
<
bool
>
,
BlockDesc
*>
;
std
::
vector
<
bool
>
,
BlockDesc
Bind
*>
;
using
AttributeMap
=
std
::
unordered_map
<
std
::
string
,
Attribute
>
;
...
...
paddle/framework/var_type_inference_test.cc
浏览文件 @
a0acfc6a
...
...
@@ -63,41 +63,43 @@ namespace framework {
TEST
(
InferVarType
,
sum_op
)
{
ProgramDescBind
prog
;
auto
*
op
=
prog
.
Block
(
0
)
->
AppendOp
();
auto
*
op
=
prog
.
Mutable
Block
(
0
)
->
AppendOp
();
op
->
SetType
(
"sum"
);
op
->
SetInput
(
"X"
,
{
"test_a"
,
"test_b"
,
"test_c"
});
op
->
SetOutput
(
"Out"
,
{
"test_out"
});
prog
.
Block
(
0
)
->
Var
(
"test_a"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Block
(
0
)
->
Var
(
"test_b"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Block
(
0
)
->
Var
(
"test_c"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Block
(
0
)
->
Var
(
"test_out"
);
prog
.
Mutable
Block
(
0
)
->
Var
(
"test_a"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Mutable
Block
(
0
)
->
Var
(
"test_b"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Mutable
Block
(
0
)
->
Var
(
"test_c"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Mutable
Block
(
0
)
->
Var
(
"test_out"
);
op
->
InferVarType
(
prog
.
Block
(
0
));
op
->
InferVarType
(
prog
.
Mutable
Block
(
0
));
ASSERT_EQ
(
VarDesc
::
SELECTED_ROWS
,
prog
.
Block
(
0
)
->
Var
(
"test_out"
)
->
GetType
());
ASSERT_EQ
(
VarDesc
::
SELECTED_ROWS
,
prog
.
MutableBlock
(
0
)
->
Var
(
"test_out"
)
->
GetType
());
prog
.
Block
(
0
)
->
Var
(
"test_b"
)
->
SetType
(
VarDesc
::
LOD_TENSOR
);
op
->
InferVarType
(
prog
.
Block
(
0
));
ASSERT_EQ
(
VarDesc
::
LOD_TENSOR
,
prog
.
Block
(
0
)
->
Var
(
"test_out"
)
->
GetType
());
prog
.
MutableBlock
(
0
)
->
Var
(
"test_b"
)
->
SetType
(
VarDesc
::
LOD_TENSOR
);
op
->
InferVarType
(
prog
.
MutableBlock
(
0
));
ASSERT_EQ
(
VarDesc
::
LOD_TENSOR
,
prog
.
MutableBlock
(
0
)
->
Var
(
"test_out"
)
->
GetType
());
}
TEST
(
InferVarType
,
sum_op_without_infer_var_type
)
{
ProgramDescBind
prog
;
auto
*
op
=
prog
.
Block
(
0
)
->
AppendOp
();
auto
*
op
=
prog
.
Mutable
Block
(
0
)
->
AppendOp
();
op
->
SetType
(
"sum_without_infer_var_type"
);
op
->
SetInput
(
"X"
,
{
"test2_a"
,
"test2_b"
,
"test2_c"
});
op
->
SetOutput
(
"Out"
,
{
"test2_out"
});
prog
.
Block
(
0
)
->
Var
(
"test2_a"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Block
(
0
)
->
Var
(
"test2_b"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Block
(
0
)
->
Var
(
"test2_c"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Block
(
0
)
->
Var
(
"test2_out"
);
prog
.
Mutable
Block
(
0
)
->
Var
(
"test2_a"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Mutable
Block
(
0
)
->
Var
(
"test2_b"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Mutable
Block
(
0
)
->
Var
(
"test2_c"
)
->
SetType
(
VarDesc
::
SELECTED_ROWS
);
prog
.
Mutable
Block
(
0
)
->
Var
(
"test2_out"
);
op
->
InferVarType
(
prog
.
Block
(
0
));
op
->
InferVarType
(
prog
.
Mutable
Block
(
0
));
ASSERT_EQ
(
VarDesc_VarType_LOD_TENSOR
,
prog
.
Block
(
0
)
->
Var
(
"test2_out"
)
->
GetType
());
prog
.
Mutable
Block
(
0
)
->
Var
(
"test2_out"
)
->
GetType
());
}
}
// namespace framework
...
...
paddle/operators/dynamic_recurrent_op_test.cc
浏览文件 @
a0acfc6a
...
...
@@ -51,7 +51,7 @@ class RNNAlgorithmTestHelper : public ::testing::Test {
CreateGlobalVariables
();
auto
op_desc
=
CreateOpDesc
();
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
,
nullptr
);
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
dop
=
&
(
dynamic_cast
<
DynamicRecurrentOp
*>
(
op
.
get
())
->
rnn
);
InitCacheManually
();
InitStepNet
();
...
...
paddle/operators/gaussian_random_op.cc
浏览文件 @
a0acfc6a
...
...
@@ -45,14 +45,14 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of GaussianRandomOp should not be null."
);
auto
dims
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"dims
"
);
auto
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape
"
);
std
::
vector
<
int64_t
>
temp
;
temp
.
reserve
(
dims
.
size
());
for
(
auto
dim
:
dims
)
{
temp
.
reserve
(
shape
.
size
());
for
(
auto
dim
:
shape
)
{
temp
.
push_back
(
static_cast
<
int64_t
>
(
dim
));
}
PADDLE_ENFORCE
(
dims
.
size
()
>
0UL
,
"
dims can be one int or array. dims
must be set."
);
PADDLE_ENFORCE
(
shape
.
size
()
>
0UL
,
"
shape can be one int or array. shape
must be set."
);
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
temp
));
}
...
...
@@ -74,7 +74,7 @@ GaussianRandom operator.
Use to initialize tensor with gaussian random generator.
)DOC"
);
AddAttr
<
std
::
vector
<
int
>>
(
"
dims
"
,
"The dimension of random tensor."
);
AddAttr
<
std
::
vector
<
int
>>
(
"
shape
"
,
"The dimension of random tensor."
);
AddAttr
<
float
>
(
"mean"
,
"mean of random tensor."
).
SetDefault
(
.0
f
);
AddAttr
<
float
>
(
"std"
,
"std of random tensor."
).
SetDefault
(
1.0
f
);
AddAttr
<
int
>
(
"seed"
,
...
...
paddle/operators/lookup_table_op.cc
浏览文件 @
a0acfc6a
...
...
@@ -43,7 +43,7 @@ class LookupTableOp : public framework::OperatorWithKernel {
protected:
framework
::
DataType
IndicateDataType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
ToDataType
(
ctx
.
Input
<
Tensor
>
(
"W"
)
->
type
());
return
framework
::
ToDataType
(
ctx
.
Input
<
LoD
Tensor
>
(
"W"
)
->
type
());
}
};
...
...
@@ -93,7 +93,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel {
protected:
framework
::
DataType
IndicateDataType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
ToDataType
(
ctx
.
Input
<
Tensor
>
(
"W"
)
->
type
());
return
framework
::
ToDataType
(
ctx
.
Input
<
LoD
Tensor
>
(
"W"
)
->
type
());
}
};
...
...
paddle/operators/lookup_table_op.cu
浏览文件 @
a0acfc6a
...
...
@@ -61,16 +61,16 @@ template <typename T>
class
LookupTableCUDAKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
table_t
=
context
.
Input
<
Tensor
>
(
"W"
);
auto
ids_t
=
context
.
Input
<
Tensor
>
(
"Ids"
);
auto
output_t
=
context
.
Output
<
Tensor
>
(
"Out"
);
auto
*
table_t
=
context
.
Input
<
LoD
Tensor
>
(
"W"
);
auto
*
ids_t
=
context
.
Input
<
LoD
Tensor
>
(
"Ids"
);
auto
*
output_t
=
context
.
Output
<
LoD
Tensor
>
(
"Out"
);
size_t
N
=
table_t
->
dims
()[
0
];
size_t
D
=
table_t
->
dims
()[
1
];
size_t
K
=
ids_t
->
numel
();
auto
ids
=
ids_t
->
data
<
int64_t
>
();
auto
table
=
table_t
->
data
<
T
>
();
auto
output
=
output_t
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
*
ids
=
ids_t
->
data
<
int64_t
>
();
auto
*
table
=
table_t
->
data
<
T
>
();
auto
*
output
=
output_t
->
mutable_data
<
T
>
(
context
.
GetPlace
());
dim3
threads
(
128
,
8
);
dim3
grids
(
8
,
1
);
...
...
@@ -87,9 +87,9 @@ class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
bool
is_sparse
=
context
.
Attr
<
bool
>
(
"is_sparse"
);
if
(
is_sparse
)
{
auto
*
ids
=
context
.
Input
<
Tensor
>
(
"Ids"
);
auto
*
table
=
context
.
Input
<
Tensor
>
(
"W"
);
auto
*
d_output
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
ids
=
context
.
Input
<
LoD
Tensor
>
(
"Ids"
);
auto
*
table
=
context
.
Input
<
LoD
Tensor
>
(
"W"
);
auto
*
d_output
=
context
.
Input
<
LoD
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_table
=
context
.
Output
<
SelectedRows
>
(
framework
::
GradVarName
(
"W"
));
auto
*
ids_data
=
ids
->
data
<
int64_t
>
();
...
...
@@ -116,12 +116,12 @@ class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
auto
*
d_output_data
=
d_output
->
data
<
T
>
();
PADDLE_ENFORCE_EQ
(
d_table_value
->
dims
(),
d_output
->
dims
());
memory
::
Copy
(
gpu_place
,
d_table_data
,
gpu_place
,
d_output_data
,
d_output
->
numel
(),
stream
);
d_output
->
numel
()
*
sizeof
(
T
)
,
stream
);
}
else
{
auto
ids_t
=
context
.
Input
<
Tensor
>
(
"Ids"
);
auto
d_output_t
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
d_table_t
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"W"
));
auto
ids_t
=
context
.
Input
<
LoD
Tensor
>
(
"Ids"
);
auto
d_output_t
=
context
.
Input
<
LoD
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
d_table_t
=
context
.
Output
<
LoD
Tensor
>
(
framework
::
GradVarName
(
"W"
));
int
N
=
d_table_t
->
dims
()[
0
];
int
D
=
d_table_t
->
dims
()[
1
];
...
...
paddle/operators/lookup_table_op.h
浏览文件 @
a0acfc6a
...
...
@@ -19,22 +19,22 @@
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
using
LoDTensor
=
framework
::
LoD
Tensor
;
using
SelectedRows
=
framework
::
SelectedRows
;
template
<
typename
T
>
class
LookupTableKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
table_t
=
context
.
Input
<
Tensor
>
(
"W"
);
// float tensor
auto
ids_t
=
context
.
Input
<
Tensor
>
(
"Ids"
);
// int tensor
auto
output_t
=
context
.
Output
<
Tensor
>
(
"Out"
);
// float tensor
auto
*
table_t
=
context
.
Input
<
LoD
Tensor
>
(
"W"
);
// float tensor
auto
*
ids_t
=
context
.
Input
<
LoD
Tensor
>
(
"Ids"
);
// int tensor
auto
*
output_t
=
context
.
Output
<
LoD
Tensor
>
(
"Out"
);
// float tensor
int
N
=
table_t
->
dims
()[
0
];
int
D
=
table_t
->
dims
()[
1
];
auto
ids
=
ids_t
->
data
<
int64_t
>
();
auto
table
=
table_t
->
data
<
T
>
();
auto
output
=
output_t
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
*
ids
=
ids_t
->
data
<
int64_t
>
();
auto
*
table
=
table_t
->
data
<
T
>
();
auto
*
output
=
output_t
->
mutable_data
<
T
>
(
context
.
GetPlace
());
for
(
int64_t
i
=
0
;
i
<
ids_t
->
numel
();
++
i
)
{
PADDLE_ENFORCE_LT
(
ids
[
i
],
N
);
PADDLE_ENFORCE_GE
(
ids
[
i
],
0
);
...
...
@@ -49,9 +49,9 @@ class LookupTableGradKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
bool
is_sparse
=
context
.
Attr
<
bool
>
(
"is_sparse"
);
if
(
is_sparse
)
{
auto
*
ids
=
context
.
Input
<
Tensor
>
(
"Ids"
);
auto
*
table
=
context
.
Input
<
Tensor
>
(
"W"
);
auto
*
d_output
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
ids
=
context
.
Input
<
LoD
Tensor
>
(
"Ids"
);
auto
*
table
=
context
.
Input
<
LoD
Tensor
>
(
"W"
);
auto
*
d_output
=
context
.
Input
<
LoD
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_table
=
context
.
Output
<
SelectedRows
>
(
framework
::
GradVarName
(
"W"
));
auto
*
ids_data
=
ids
->
data
<
int64_t
>
();
...
...
@@ -76,10 +76,10 @@ class LookupTableGradKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ
(
d_table_value
->
dims
(),
d_output
->
dims
());
memcpy
(
d_table_data
,
d_output_data
,
sizeof
(
T
)
*
d_output
->
numel
());
}
else
{
auto
*
ids
=
context
.
Input
<
Tensor
>
(
"Ids"
);
auto
*
d_output
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_table
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"W"
));
auto
*
table
=
context
.
Input
<
Tensor
>
(
"W"
);
auto
*
ids
=
context
.
Input
<
LoD
Tensor
>
(
"Ids"
);
auto
*
d_output
=
context
.
Input
<
LoD
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_table
=
context
.
Output
<
LoD
Tensor
>
(
framework
::
GradVarName
(
"W"
));
auto
*
table
=
context
.
Input
<
LoD
Tensor
>
(
"W"
);
auto
*
ids_data
=
ids
->
data
<
int64_t
>
();
auto
ids_dim
=
ids
->
dims
();
...
...
paddle/operators/sequence_conv_op.cc
浏览文件 @
a0acfc6a
...
...
@@ -89,7 +89,7 @@ class SequenceConvGradOp : public framework::OperatorWithKernel {
}
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
ctx
->
ShareLoD
(
framework
::
GradVarName
(
"X"
),
"X"
);
ctx
->
ShareLoD
(
"X"
,
framework
::
GradVarName
(
"X"
)
);
}
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"Filter"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Filter"
),
...
...
paddle/pybind/protobuf.cc
浏览文件 @
a0acfc6a
...
...
@@ -129,7 +129,8 @@ void BindProgramDesc(py::module &m) {
}
return
retv
;
})
.
def
(
"block"
,
&
ProgramDescBind
::
Block
,
py
::
return_value_policy
::
reference
)
.
def
(
"block"
,
&
ProgramDescBind
::
MutableBlock
,
py
::
return_value_policy
::
reference
)
.
def
(
"num_blocks"
,
&
ProgramDescBind
::
Size
)
.
def
(
"serialize_to_string"
,
[](
ProgramDescBind
&
program_desc
)
->
py
::
bytes
{
...
...
paddle/pybind/pybind.cc
浏览文件 @
a0acfc6a
...
...
@@ -275,7 +275,7 @@ All parameter, weight, gradient are variables in Paddle.
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
ProgramDescBind
prog_with_targets
(
origin
);
for
(
const
auto
&
t
:
targets
)
{
prog_with_targets
.
Block
(
t
[
0
])
->
Op
(
t
[
1
])
->
MarkAsTarget
();
prog_with_targets
.
Mutable
Block
(
t
[
0
])
->
Op
(
t
[
1
])
->
MarkAsTarget
();
}
ProgramDesc
pruned_desc
;
Prune
(
*
prog_with_targets
.
Proto
(),
&
pruned_desc
);
...
...
@@ -335,7 +335,7 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_ENFORCE
(
desc
.
IsInitialized
(),
"User OpDesc is not initialized, reason %s"
,
desc
.
InitializationErrorString
());
return
OpRegistry
::
CreateOp
(
desc
,
nullptr
);
return
OpRegistry
::
CreateOp
(
desc
);
})
.
def
(
"backward"
,
[](
const
OperatorBase
&
forwardOp
,
...
...
@@ -439,7 +439,7 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_ENFORCE
(
desc
.
IsInitialized
(),
"User OpDesc is not initialized, reason %s"
,
desc
.
InitializationErrorString
());
auto
rnn_op
=
OpRegistry
::
CreateOp
(
desc
,
nullptr
);
auto
rnn_op
=
OpRegistry
::
CreateOp
(
desc
);
return
static_cast
<
operators
::
RecurrentOp
*>
(
rnn_op
.
release
());
})
.
def
(
"set_stepnet"
,
[](
operators
::
RecurrentOp
&
self
,
...
...
@@ -457,7 +457,7 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_ENFORCE
(
desc
.
IsInitialized
(),
"User OpDesc is not initialized, reason %s"
,
desc
.
InitializationErrorString
());
auto
rnn_op
=
OpRegistry
::
CreateOp
(
desc
,
nullptr
);
auto
rnn_op
=
OpRegistry
::
CreateOp
(
desc
);
return
static_cast
<
operators
::
DynamicRecurrentOp
*>
(
rnn_op
.
release
());
})
...
...
@@ -484,7 +484,7 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_ENFORCE
(
desc
.
IsInitialized
(),
"User OpDesc is not initialized, reason %s"
,
desc
.
InitializationErrorString
());
auto
cond_op
=
OpRegistry
::
CreateOp
(
desc
,
nullptr
);
auto
cond_op
=
OpRegistry
::
CreateOp
(
desc
);
return
static_cast
<
operators
::
CondOp
*>
(
cond_op
.
release
());
})
.
def
(
"set_truenet"
,
...
...
@@ -498,10 +498,7 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
Executor
>
(
m
,
"Executor"
)
.
def
(
py
::
init
<
std
::
vector
<
platform
::
Place
>
&>
())
.
def
(
"run"
,
[](
Executor
&
self
,
ProgramDescBind
*
program_bind
,
Scope
*
scope
,
int
block_id
)
{
self
.
Run
(
*
program_bind
->
Proto
(),
scope
,
block_id
);
});
.
def
(
"run"
,
&
Executor
::
Run
);
m
.
def
(
"unique_integer"
,
UniqueIntegerGenerator
);
m
.
def
(
"init_gflags"
,
InitGflags
);
...
...
python/paddle/v2/framework/initializer.py
浏览文件 @
a0acfc6a
...
...
@@ -62,7 +62,7 @@ class ConstantInitializer(Initializer):
class
UniformInitializer
(
Initializer
):
"""Implements
for
random uniform distribution initializer
"""Implements
the
random uniform distribution initializer
"""
def
__init__
(
self
,
low
=-
1.0
,
high
=
1.0
,
seed
=
0
):
...
...
@@ -75,6 +75,7 @@ class UniformInitializer(Initializer):
"""
assert
low
is
not
None
assert
high
is
not
None
assert
high
>=
low
assert
seed
is
not
None
super
(
UniformInitializer
,
self
).
__init__
()
self
.
_low
=
low
...
...
@@ -107,3 +108,51 @@ class UniformInitializer(Initializer):
})
var
.
op
=
op
return
op
class
NormalInitializer
(
Initializer
):
"""Implements the random Normal(Gaussian) distribution initializer
"""
def
__init__
(
self
,
loc
=
0.0
,
scale
=
1.0
,
seed
=
0
):
"""Constructor for NormalInitializer
Args:
loc: mean of the normal distribution
scale: standard deviation of the normal distribution
seed: random seed
"""
assert
loc
is
not
None
assert
scale
is
not
None
assert
seed
is
not
None
super
(
NormalInitializer
,
self
).
__init__
()
self
.
_mean
=
loc
self
.
_std_dev
=
scale
self
.
_seed
=
seed
def
__call__
(
self
,
var
,
block
):
"""Add normal distribution initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert
isinstance
(
var
,
framework
.
Variable
)
assert
isinstance
(
block
,
framework
.
Block
)
# Initialization Ops should be prepended and not appended
op
=
block
.
prepend_op
(
type
=
"gaussian_random"
,
outputs
=
{
"Out"
:
var
},
attrs
=
{
"shape"
:
var
.
shape
,
"data_type"
:
int
(
var
.
data_type
),
"mean"
:
self
.
_mean
,
"std"
:
self
.
_std_dev
,
"seed"
:
self
.
_seed
})
var
.
op
=
op
return
op
python/paddle/v2/framework/tests/test_gaussian_random_op.py
浏览文件 @
a0acfc6a
...
...
@@ -19,7 +19,7 @@ class TestGaussianRandomOp(unittest.TestCase):
op
=
Operator
(
"gaussian_random"
,
Out
=
'Out'
,
dims
=
[
1000
,
784
],
shape
=
[
1000
,
784
],
mean
=
.
0
,
std
=
1.
,
seed
=
10
)
...
...
python/paddle/v2/framework/tests/test_initializer.py
0 → 100644
浏览文件 @
a0acfc6a
import
unittest
import
paddle.v2.framework.framework
as
framework
import
paddle.v2.framework.initializer
as
initializer
DELTA
=
0.00001
class
TestConstantInitializer
(
unittest
.
TestCase
):
def
test_constant_initializer_default_value
(
self
):
"""Test the constant initializer with default value
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
ConstantInitializer
())
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'fill_constant'
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'value'
),
0.0
,
delta
=
DELTA
)
def
test_constant_initializer
(
self
):
"""Test constant initializer with supplied value
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
ConstantInitializer
(
2.3
))
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'fill_constant'
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'value'
),
2.3
,
delta
=
DELTA
)
class
TestUniformInitializer
(
unittest
.
TestCase
):
def
test_uniform_initializer_default_value
(
self
):
"""Test the uniform initializer with default value
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
UniformInitializer
())
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'uniform_random'
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'min'
),
-
1.0
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'max'
),
1.0
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
0
)
def
test_uniform_initializer
(
self
):
"""Test uniform initializer with supplied attributes
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
UniformInitializer
(
-
4.2
,
3.1
,
123
))
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'uniform_random'
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'min'
),
-
4.2
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'max'
),
3.1
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
123
)
class
TestNormalInitializer
(
unittest
.
TestCase
):
def
test_normal_initializer_default_value
(
self
):
"""Test the normal initializer with default value
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
NormalInitializer
())
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'gaussian_random'
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'mean'
),
0.0
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'std'
),
1.0
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
0
)
def
test_normal_initializer
(
self
):
"""Test normal initializer with supplied attributes
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
NormalInitializer
(
2.3
,
1.9
,
123
))
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'gaussian_random'
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'mean'
),
2.3
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'std'
),
1.9
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
123
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录