Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
880b2e80
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
880b2e80
编写于
1月 04, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'upstream/develop' into context
上级
5bf5650d
89bbc4f6
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
108 addition
and
169 deletion
+108
-169
paddle/framework/op_desc.cc
paddle/framework/op_desc.cc
+8
-4
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+2
-2
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+4
-2
paddle/operators/CMakeLists.txt
paddle/operators/CMakeLists.txt
+20
-96
python/paddle/v2/fluid/layers/nn.py
python/paddle/v2/fluid/layers/nn.py
+72
-63
python/paddle/v2/fluid/tests/test_layers.py
python/paddle/v2/fluid/tests/test_layers.py
+2
-2
未找到文件。
paddle/framework/op_desc.cc
浏览文件 @
880b2e80
...
@@ -260,7 +260,13 @@ struct SetAttrDescVisitor : public boost::static_visitor<void> {
...
@@ -260,7 +260,13 @@ struct SetAttrDescVisitor : public boost::static_visitor<void> {
void
operator
()(
int
v
)
const
{
attr_
->
set_i
(
v
);
}
void
operator
()(
int
v
)
const
{
attr_
->
set_i
(
v
);
}
void
operator
()(
float
v
)
const
{
attr_
->
set_f
(
v
);
}
void
operator
()(
float
v
)
const
{
attr_
->
set_f
(
v
);
}
void
operator
()(
const
std
::
string
&
v
)
const
{
attr_
->
set_s
(
v
);
}
void
operator
()(
const
std
::
string
&
v
)
const
{
attr_
->
set_s
(
v
);
}
void
operator
()(
bool
b
)
const
{
attr_
->
set_b
(
b
);
}
// Please refer to https://github.com/PaddlePaddle/Paddle/issues/7162
template
<
class
T
,
class
=
typename
std
::
enable_if
<
std
::
is_same
<
bool
,
T
>
::
value
>::
type
>
void
operator
()(
T
b
)
const
{
attr_
->
set_b
(
b
);
}
void
operator
()(
const
std
::
vector
<
int
>
&
v
)
const
{
void
operator
()(
const
std
::
vector
<
int
>
&
v
)
const
{
VectorToRepeated
(
v
,
attr_
->
mutable_ints
());
VectorToRepeated
(
v
,
attr_
->
mutable_ints
());
...
@@ -274,9 +280,7 @@ struct SetAttrDescVisitor : public boost::static_visitor<void> {
...
@@ -274,9 +280,7 @@ struct SetAttrDescVisitor : public boost::static_visitor<void> {
void
operator
()(
const
std
::
vector
<
bool
>
&
v
)
const
{
void
operator
()(
const
std
::
vector
<
bool
>
&
v
)
const
{
VectorToRepeated
(
v
,
attr_
->
mutable_bools
());
VectorToRepeated
(
v
,
attr_
->
mutable_bools
());
}
}
void
operator
()(
proto
::
BlockDesc
*
desc
)
const
{
void
operator
()(
BlockDesc
*
desc
)
const
{
attr_
->
set_block_idx
(
desc
->
ID
());
}
attr_
->
set_block_idx
(
desc
->
idx
());
}
void
operator
()(
boost
::
blank
)
const
{
PADDLE_THROW
(
"Unexpected branch"
);
}
void
operator
()(
boost
::
blank
)
const
{
PADDLE_THROW
(
"Unexpected branch"
);
}
};
};
...
...
paddle/framework/op_registry.h
浏览文件 @
880b2e80
...
@@ -37,8 +37,8 @@ class Registrar {
...
@@ -37,8 +37,8 @@ class Registrar {
public:
public:
// In our design, various kinds of classes, e.g., operators and kernels,
// In our design, various kinds of classes, e.g., operators and kernels,
// have their corresponding registry and registrar. The action of
// have their corresponding registry and registrar. The action of
// registration is in the constructor of a global registrar variable, which
,
// registration is in the constructor of a global registrar variable, which
//
however,
are not used in the code that calls package framework, and would
// are not used in the code that calls package framework, and would
// be removed from the generated binary file by the linker. To avoid such
// be removed from the generated binary file by the linker. To avoid such
// removal, we add Touch to all registrar classes and make USE_OP macros to
// removal, we add Touch to all registrar classes and make USE_OP macros to
// call this method. So, as long as the callee code calls USE_OP, the global
// call this method. So, as long as the callee code calls USE_OP, the global
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
880b2e80
...
@@ -1472,7 +1472,8 @@ TEST(Layer, RecurrentLayer) {
...
@@ -1472,7 +1472,8 @@ TEST(Layer, RecurrentLayer) {
for
(
auto
reversed
:
{
false
,
true
})
{
for
(
auto
reversed
:
{
false
,
true
})
{
config
.
layerConfig
.
set_reversed
(
reversed
);
config
.
layerConfig
.
set_reversed
(
reversed
);
config
.
testState
=
!
reversed
;
config
.
testState
=
!
reversed
;
testLayerGrad
(
config
,
"recurrent"
,
50
,
/* trans= */
false
,
useGpu
);
testLayerGrad
(
config
,
"recurrent"
,
50
,
/* trans= */
false
,
useGpu
,
false
,
1.0
);
}
}
}
}
}
}
...
@@ -1494,7 +1495,8 @@ TEST(Layer, LstmLayer) {
...
@@ -1494,7 +1495,8 @@ TEST(Layer, LstmLayer) {
for
(
auto
reversed
:
{
false
,
true
})
{
for
(
auto
reversed
:
{
false
,
true
})
{
config
.
layerConfig
.
set_reversed
(
reversed
);
config
.
layerConfig
.
set_reversed
(
reversed
);
config
.
testState
=
!
reversed
;
config
.
testState
=
!
reversed
;
testLayerGrad
(
config
,
"lstmemory"
,
100
,
/* trans= */
false
,
useGpu
);
testLayerGrad
(
config
,
"lstmemory"
,
100
,
/* trans= */
false
,
useGpu
,
false
,
0.02
);
}
}
}
}
for
(
auto
useGpu
:
{
true
})
{
for
(
auto
useGpu
:
{
true
})
{
...
...
paddle/operators/CMakeLists.txt
浏览文件 @
880b2e80
...
@@ -61,106 +61,28 @@ function(op_library TARGET)
...
@@ -61,106 +61,28 @@ function(op_library TARGET)
${
op_common_deps
}
)
${
op_common_deps
}
)
endif
()
endif
()
# net_op doesn't need pybind
# Define operators that don't need pybind here.
if
(
"
${
TARGET
}
"
STREQUAL
"net_op"
)
foreach
(
manual_pybind_op
"net_op"
"compare_op"
"logical_op"
"nccl_op"
"tensor_array_read_write_op"
)
if
(
"
${
TARGET
}
"
STREQUAL
"
${
manual_pybind_op
}
"
)
set
(
pybind_flag 1
)
set
(
pybind_flag 1
)
endif
()
endif
()
endforeach
()
if
(
"
${
TARGET
}
"
STREQUAL
"compare_op"
)
# The registration of USE_OP, please refer to paddle/framework/op_registry.h.
set
(
pybind_flag 1
)
# Note that it's enough to just adding one operator to pybind in a *_op.cc file.
file
(
APPEND
${
pybind_file
}
"USE_OP(less_than);
\n
USE_OP(equal);
\n
"
)
# And for detail pybind information, please see generated paddle/pybind/pybind.h.
endif
()
file
(
READ
${
TARGET
}
.cc TARGET_CONTENT
)
string
(
REGEX MATCH
"REGISTER_OP
\\
(.*REGISTER_OP
\\
("
multi_register
"
${
TARGET_CONTENT
}
"
)
# conv_op contains several operators
string
(
REGEX MATCH
"REGISTER_OP
\\
([a-z0-9_]*,"
one_register
"
${
multi_register
}
"
)
if
(
"
${
TARGET
}
"
STREQUAL
"conv_op"
)
if
(
one_register STREQUAL
""
)
set
(
pybind_flag 1
)
string
(
REPLACE
"_op"
""
TARGET
"
${
TARGET
}
"
)
# It's enough to just adding one operator to pybind
else
()
file
(
APPEND
${
pybind_file
}
"USE_OP(conv2d);
\n
"
)
string
(
REPLACE
"REGISTER_OP("
""
TARGET
"
${
one_register
}
"
)
endif
()
string
(
REPLACE
","
""
TARGET
"
${
TARGET
}
"
)
# conv_cudnn_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"conv_cudnn_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(conv2d_cudnn);
\n
"
)
endif
()
# pool_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"pool_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(pool2d);
\n
"
)
endif
()
# pool_cudnn_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"pool_cudnn_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(pool2d_cudnn);
\n
"
)
endif
()
if
(
"
${
TARGET
}
"
STREQUAL
"logical_op"
)
set
(
pybind_flag 1
)
file
(
APPEND
${
pybind_file
}
"USE_OP(logical_and);
\n
"
)
endif
()
# pool_with_index_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"pool_with_index_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(max_pool2d_with_index);
\n
"
)
endif
()
# conv_transpose_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"conv_transpose_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(conv2d_transpose);
\n
"
)
endif
()
# conv_transpose_cudnn_op contains two operators
if
(
"
${
TARGET
}
"
STREQUAL
"conv_transpose_cudnn_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(conv2d_transpose_cudnn);
\n
"
)
endif
()
# save_restore_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"save_restore_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_NO_KERNEL_OP(save);
\n
"
)
endif
()
# activation_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"activation_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(sigmoid);
\n
"
)
endif
()
# nccl_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"nccl_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_CUDA_ONLY_OP(ncclAllReduce);
\n
"
)
endif
()
# reduce_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"reduce_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(reduce_sum);
\n
"
)
endif
()
if
(
"
${
TARGET
}
"
STREQUAL
"tensor_array_read_write_op"
)
set
(
pybind_flag 1
)
file
(
APPEND
${
pybind_file
}
"USE_NO_KERNEL_OP(read_from_array);
\n
USE_NO_KERNEL_OP(write_to_array);
\n
"
)
endif
()
endif
()
# pybind USE_NO_KERNEL_OP
# pybind USE_NO_KERNEL_OP
# HACK: if REGISTER_OP_CPU_KERNEL presents the operator must have kernel
# HACK: if REGISTER_OP_CPU_KERNEL presents the operator must have kernel
file
(
READ
${
TARGET
}
.cc TARGET_CONTENT
)
string
(
REGEX MATCH
"REGISTER_OP_CPU_KERNEL"
regex_result
"
${
TARGET_CONTENT
}
"
)
string
(
REGEX MATCH
"REGISTER_OP_CPU_KERNEL"
regex_result
"
${
TARGET_CONTENT
}
"
)
string
(
REPLACE
"_op"
""
TARGET
"
${
TARGET
}
"
)
string
(
REPLACE
"_op"
""
TARGET
"
${
TARGET
}
"
)
if
(
${
pybind_flag
}
EQUAL 0 AND regex_result STREQUAL
""
)
if
(
${
pybind_flag
}
EQUAL 0 AND regex_result STREQUAL
""
)
...
@@ -171,7 +93,6 @@ function(op_library TARGET)
...
@@ -171,7 +93,6 @@ function(op_library TARGET)
# pybind USE_CPU_ONLY_OP
# pybind USE_CPU_ONLY_OP
list
(
LENGTH cu_srcs cu_srcs_len
)
list
(
LENGTH cu_srcs cu_srcs_len
)
list
(
LENGTH cu_cc_srcs cu_cc_srcs_len
)
list
(
LENGTH cu_cc_srcs cu_cc_srcs_len
)
if
(
${
pybind_flag
}
EQUAL 0 AND
${
cu_srcs_len
}
EQUAL 0 AND
${
cu_cc_srcs_len
}
EQUAL 0
)
if
(
${
pybind_flag
}
EQUAL 0 AND
${
cu_srcs_len
}
EQUAL 0 AND
${
cu_cc_srcs_len
}
EQUAL 0
)
file
(
APPEND
${
pybind_file
}
"USE_CPU_ONLY_OP(
${
TARGET
}
);
\n
"
)
file
(
APPEND
${
pybind_file
}
"USE_CPU_ONLY_OP(
${
TARGET
}
);
\n
"
)
set
(
pybind_flag 1
)
set
(
pybind_flag 1
)
...
@@ -188,6 +109,7 @@ add_subdirectory(nccl)
...
@@ -188,6 +109,7 @@ add_subdirectory(nccl)
if
(
WITH_GPU
)
if
(
WITH_GPU
)
op_library
(
nccl_op DEPS nccl_common
)
op_library
(
nccl_op DEPS nccl_common
)
file
(
APPEND
${
pybind_file
}
"USE_CUDA_ONLY_OP(ncclAllReduce);
\n
"
)
else
()
else
()
set
(
DEPS_OPS
${
DEPS_OPS
}
nccl_op
)
set
(
DEPS_OPS
${
DEPS_OPS
}
nccl_op
)
endif
()
endif
()
...
@@ -238,6 +160,8 @@ list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
...
@@ -238,6 +160,8 @@ list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
foreach
(
src
${
GENERAL_OPS
}
)
foreach
(
src
${
GENERAL_OPS
}
)
op_library
(
${
src
}
)
op_library
(
${
src
}
)
endforeach
()
endforeach
()
file
(
APPEND
${
pybind_file
}
"USE_OP(less_than);
\n
USE_OP(logical_and);
\n
USE_NO_KERNEL_OP(read_from_array);
\n
"
)
set
(
GLOB_OP_LIB
${
OP_LIBRARY
}
CACHE INTERNAL
"Global OP library"
)
set
(
GLOB_OP_LIB
${
OP_LIBRARY
}
CACHE INTERNAL
"Global OP library"
)
...
...
python/paddle/v2/fluid/layers/nn.py
浏览文件 @
880b2e80
...
@@ -1168,25 +1168,26 @@ def lstm_unit(x_t,
...
@@ -1168,25 +1168,26 @@ def lstm_unit(x_t,
.. math::
.. math::
i_t & = \sigma(W_{x_i}x_{t} + W_{h_i}h_{t-1} +
W_{c_i}c_{t-1} +
b_i)
i_t & = \sigma(W_{x_i}x_{t} + W_{h_i}h_{t-1} + b_i)
f_t & = \sigma(W_{x_f}x_{t} + W_{h_f}h_{t-1} +
W_{c_f}c_{t-1} +
b_f)
f_t & = \sigma(W_{x_f}x_{t} + W_{h_f}h_{t-1} + b_f)
c_t & = f_tc_{t-1} + i_t tanh (W_{x_c}x_t
+
W_{h_c}h_{t-1} + b_c)
c_t & = f_tc_{t-1} + i_t tanh (W_{x_c}x_t
+
W_{h_c}h_{t-1} + b_c)
o_t & = \sigma(W_{x_o}x_{t} + W_{h_o}h_{t-1} +
W_{c_o}c_t +
b_o)
o_t & = \sigma(W_{x_o}x_{t} + W_{h_o}h_{t-1} + b_o)
h_t & = o_t tanh(c_t)
h_t & = o_t tanh(c_t)
The inputs of lstm unit includes :math:`x_t`, :math:`h_{t-1}` and
The inputs of lstm unit include :math:`x_t`, :math:`h_{t-1}` and
:math:`c_{t-1}`. The implementation separates the linear transformation
:math:`c_{t-1}`. The 2nd dimensions of :math:`h_{t-1}` and :math:`c_{t-1}`
and non-linear transformation apart. Here, we take :math:`i_t` as an
should be same. The implementation separates the linear transformation and
example. The linear transformation is applied by calling a `fc` layer and
non-linear transformation apart. Here, we take :math:`i_t` as an example.
the equation is:
The linear transformation is applied by calling a `fc` layer and the
equation is:
.. math::
.. math::
L_{i_t} = W_{x_i}x_{t} + W_{h_i}h_{t-1} +
W_{c_i}c_{t-1} +
b_i
L_{i_t} = W_{x_i}x_{t} + W_{h_i}h_{t-1} + b_i
The non-linear transformation is applied by calling `lstm_unit_op` and the
The non-linear transformation is applied by calling `lstm_unit_op` and the
equation is:
equation is:
...
@@ -1198,9 +1199,12 @@ def lstm_unit(x_t,
...
@@ -1198,9 +1199,12 @@ def lstm_unit(x_t,
This layer has two outputs including :math:`h_t` and :math:`o_t`.
This layer has two outputs including :math:`h_t` and :math:`o_t`.
Args:
Args:
x_t (Variable): The input value of current step.
x_t (Variable): The input value of current step, a 2-D tensor with shape
hidden_t_prev (Variable): The hidden value of lstm unit.
M x N, M for batch size and N for input size.
cell_t_prev (Variable): The cell value of lstm unit.
hidden_t_prev (Variable): The hidden value of lstm unit, a 2-D tensor
with shape M x S, M for batch size and S for size of lstm unit.
cell_t_prev (Variable): The cell value of lstm unit, a 2-D tensor with
shape M x S, M for batch size and S for size of lstm unit.
forget_bias (float): The forget bias of lstm unit.
forget_bias (float): The forget bias of lstm unit.
param_attr (ParamAttr): The attributes of parameter weights, used to set
param_attr (ParamAttr): The attributes of parameter weights, used to set
initializer, name etc.
initializer, name etc.
...
@@ -1213,14 +1217,15 @@ def lstm_unit(x_t,
...
@@ -1213,14 +1217,15 @@ def lstm_unit(x_t,
Raises:
Raises:
ValueError: The ranks of **x_t**, **hidden_t_prev** and **cell_t_prev**
\
ValueError: The ranks of **x_t**, **hidden_t_prev** and **cell_t_prev**
\
not be 2 or the 1st dimensions of **x_t**, **hidden_t_prev**
\
not be 2 or the 1st dimensions of **x_t**, **hidden_t_prev**
\
and **cell_t_prev** not be the same.
and **cell_t_prev** not be the same or the 2nd dimensions of
\
**hidden_t_prev** and **cell_t_prev** not be the same.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
x_t = fluid.layers.fc(input=x_t_data, size=10)
x_t = fluid.layers.fc(input=x_t_data, size=10)
prev_hidden = fluid.layers.fc(input=prev_hidden_data, size=
2
0)
prev_hidden = fluid.layers.fc(input=prev_hidden_data, size=
3
0)
prev_cell = fluid.layers.fc(input=prev_cell_data, size=30)
prev_cell = fluid.layers.fc(input=prev_cell_data, size=30)
hidden_value, cell_value = fluid.layers.lstm_unit(x_t=x_t,
hidden_value, cell_value = fluid.layers.lstm_unit(x_t=x_t,
hidden_t_prev=prev_hidden,
hidden_t_prev=prev_hidden,
...
@@ -1239,7 +1244,11 @@ def lstm_unit(x_t,
...
@@ -1239,7 +1244,11 @@ def lstm_unit(x_t,
if
x_t
.
shape
[
0
]
!=
hidden_t_prev
.
shape
[
0
]
or
x_t
.
shape
[
if
x_t
.
shape
[
0
]
!=
hidden_t_prev
.
shape
[
0
]
or
x_t
.
shape
[
0
]
!=
cell_t_prev
.
shape
[
0
]:
0
]
!=
cell_t_prev
.
shape
[
0
]:
raise
ValueError
(
"The 1s dimension of x_t, hidden_t_prev and "
raise
ValueError
(
"The 1st dimensions of x_t, hidden_t_prev and "
"cell_t_prev must be the same."
)
if
hidden_t_prev
.
shape
[
1
]
!=
cell_t_prev
.
shape
[
1
]:
raise
ValueError
(
"The 2nd dimensions of hidden_t_prev and "
"cell_t_prev must be the same."
)
"cell_t_prev must be the same."
)
if
bias_attr
is
None
:
if
bias_attr
is
None
:
...
...
python/paddle/v2/fluid/tests/test_layers.py
浏览文件 @
880b2e80
...
@@ -177,8 +177,8 @@ class TestBook(unittest.TestCase):
...
@@ -177,8 +177,8 @@ class TestBook(unittest.TestCase):
name
=
'x_t_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
)
name
=
'x_t_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
)
x_t
=
layers
.
fc
(
input
=
x_t_data
,
size
=
10
)
x_t
=
layers
.
fc
(
input
=
x_t_data
,
size
=
10
)
prev_hidden_data
=
layers
.
data
(
prev_hidden_data
=
layers
.
data
(
name
=
'prev_hidden_data'
,
shape
=
[
10
,
2
0
],
dtype
=
'float32'
)
name
=
'prev_hidden_data'
,
shape
=
[
10
,
3
0
],
dtype
=
'float32'
)
prev_hidden
=
layers
.
fc
(
input
=
prev_hidden_data
,
size
=
2
0
)
prev_hidden
=
layers
.
fc
(
input
=
prev_hidden_data
,
size
=
3
0
)
prev_cell_data
=
layers
.
data
(
prev_cell_data
=
layers
.
data
(
name
=
'prev_cell'
,
shape
=
[
10
,
30
],
dtype
=
'float32'
)
name
=
'prev_cell'
,
shape
=
[
10
,
30
],
dtype
=
'float32'
)
prev_cell
=
layers
.
fc
(
input
=
prev_cell_data
,
size
=
30
)
prev_cell
=
layers
.
fc
(
input
=
prev_cell_data
,
size
=
30
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录