Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
17622b48
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
17622b48
编写于
9月 23, 2017
作者:
R
ranqiu
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into r-doc
上级
44c59adb
a2393fc1
变更
17
隐藏空白更改
内联
并排
Showing
17 changed file
with
95 addition
and
61 deletion
+95
-61
paddle/framework/framework.proto
paddle/framework/framework.proto
+1
-1
paddle/framework/lod_tensor.cc
paddle/framework/lod_tensor.cc
+6
-10
paddle/framework/lod_tensor.h
paddle/framework/lod_tensor.h
+4
-4
paddle/framework/lod_tensor_test.cc
paddle/framework/lod_tensor_test.cc
+6
-6
paddle/framework/operator.cc
paddle/framework/operator.cc
+2
-2
paddle/framework/tensor.h
paddle/framework/tensor.h
+5
-8
paddle/framework/tensor_impl.h
paddle/framework/tensor_impl.h
+1
-2
paddle/operators/elementwise_add_op.h
paddle/operators/elementwise_add_op.h
+2
-0
paddle/operators/elementwise_div_op.h
paddle/operators/elementwise_div_op.h
+2
-0
paddle/operators/elementwise_sub_op.h
paddle/operators/elementwise_sub_op.h
+1
-0
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+6
-7
paddle/operators/recurrent_op.h
paddle/operators/recurrent_op.h
+4
-1
paddle/operators/rnn/recurrent_op_utils.cc
paddle/operators/rnn/recurrent_op_utils.cc
+5
-6
paddle/operators/rnn/recurrent_op_utils.h
paddle/operators/rnn/recurrent_op_utils.h
+1
-1
paddle/pybind/pybind.cc
paddle/pybind/pybind.cc
+9
-10
paddle/pybind/tensor_py.h
paddle/pybind/tensor_py.h
+1
-1
python/paddle/v2/framework/tests/test_recurrent_op.py
python/paddle/v2/framework/tests/test_recurrent_op.py
+39
-2
未找到文件。
paddle/framework/framework.proto
浏览文件 @
17622b48
...
...
@@ -106,7 +106,7 @@ enum DataType {
message
LoDTensorDesc
{
required
DataType
data_type
=
1
;
repeated
int
32
dims
=
2
;
// [UNK, 640, 480] is saved as [-1, 640, 480]
repeated
int
64
dims
=
2
;
// [UNK, 640, 480] is saved as [-1, 640, 480]
optional
int32
lod_level
=
3
[
default
=
0
];
}
...
...
paddle/framework/lod_tensor.cc
浏览文件 @
17622b48
...
...
@@ -72,20 +72,16 @@ bool operator==(const LoD& a, const LoD& b) {
return
true
;
}
void
LoDTensor
::
S
lice
Levels
(
size_t
level_begin
,
size_t
level_end
)
{
void
LoDTensor
::
S
hrink
Levels
(
size_t
level_begin
,
size_t
level_end
)
{
auto
new_lod
=
framework
::
SliceLevels
(
lod_
,
level_begin
,
level_end
);
lod_
=
new_lod
;
}
void
LoDTensor
::
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
{
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
NumLevels
());
PADDLE_ENFORCE
(
elem_begin
<
NumElements
(
level
),
"element begin [%d] out of range [%d]"
,
elem_begin
,
NumElements
(
level
));
PADDLE_ENFORCE
(
elem_end
<
NumElements
(
level
)
+
1
,
"element end [%d] out of range [%d]"
,
elem_end
,
NumElements
(
level
));
void
LoDTensor
::
ShrinkInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
{
PADDLE_ENFORCE_LT
(
level
,
NumLevels
());
PADDLE_ENFORCE_LT
(
elem_begin
,
NumElements
(
level
));
PADDLE_ENFORCE_LT
(
elem_end
,
NumElements
(
level
)
+
1
);
auto
new_lod
=
framework
::
SliceInLevel
(
lod_
,
level
,
elem_begin
,
elem_end
);
lod_
=
new_lod
;
...
...
paddle/framework/lod_tensor.h
浏览文件 @
17622b48
...
...
@@ -89,15 +89,15 @@ class LoDTensor : public Tensor {
}
/*
* S
lice of
levels[level_begin:level_end]
* S
hrink
levels[level_begin:level_end]
*/
void
S
lice
Levels
(
size_t
level_begin
,
size_t
level_end
);
void
S
hrink
Levels
(
size_t
level_begin
,
size_t
level_end
);
/*
* S
lice of
elements of a level, [elem_begin: elem_end]
* S
hrink
elements of a level, [elem_begin: elem_end]
* @note: low performance in slice lod_.
*/
void
S
lice
InLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
);
void
S
hrink
InLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
);
private:
LoD
lod_
;
...
...
paddle/framework/lod_tensor_test.cc
浏览文件 @
17622b48
...
...
@@ -56,11 +56,11 @@ TEST_F(LoDTensorTester, NumElements) {
ASSERT_EQ
(
lod_tensor_
.
NumElements
(
2
),
8UL
);
}
TEST_F
(
LoDTensorTester
,
S
lice
Levels
)
{
TEST_F
(
LoDTensorTester
,
S
hrink
Levels
)
{
// slice 1 level
for
(
size_t
level
=
0
;
level
<
3UL
;
++
level
)
{
LoDTensor
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
.
S
lice
Levels
(
level
,
level
+
1
);
new_lod_tensor
.
S
hrink
Levels
(
level
,
level
+
1
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
1UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor_
.
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor_
.
data
<
float
>
());
...
...
@@ -68,7 +68,7 @@ TEST_F(LoDTensorTester, SliceLevels) {
// slice 2 level
for
(
size_t
level
=
0
;
level
<
2UL
;
++
level
)
{
LoDTensor
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
.
S
lice
Levels
(
level
,
level
+
2
);
new_lod_tensor
.
S
hrink
Levels
(
level
,
level
+
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor_
.
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
...
...
@@ -77,10 +77,10 @@ TEST_F(LoDTensorTester, SliceLevels) {
}
}
TEST_F
(
LoDTensorTester
,
S
lice
InLevel
)
{
TEST_F
(
LoDTensorTester
,
S
hrink
InLevel
)
{
size_t
level
=
0
;
LoDTensor
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
.
S
lice
InLevel
(
level
,
0
,
2
);
new_lod_tensor
.
S
hrink
InLevel
(
level
,
0
,
2
);
EXPECT_EQ
(
new_lod_tensor
.
NumLevels
(),
3UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
...
...
@@ -89,7 +89,7 @@ TEST_F(LoDTensorTester, SliceInLevel) {
level
=
1
;
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
.
S
lice
InLevel
(
level
,
0
,
2
);
new_lod_tensor
.
S
hrink
InLevel
(
level
,
0
,
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
...
...
paddle/framework/operator.cc
浏览文件 @
17622b48
...
...
@@ -60,8 +60,8 @@ std::string OperatorBase::Output(const std::string& name) const {
const
std
::
vector
<
std
::
string
>&
OperatorBase
::
Outputs
(
const
std
::
string
&
name
)
const
{
auto
it
=
outputs_
.
find
(
name
);
PADDLE_ENFORCE
(
it
!=
outputs_
.
end
(),
"Op %s does not have output
%s"
,
type_
,
name
);
PADDLE_ENFORCE
(
it
!=
outputs_
.
end
(),
"Op %s does not have output
called %s"
,
type_
,
name
);
return
it
->
second
;
}
...
...
paddle/framework/tensor.h
浏览文件 @
17622b48
...
...
@@ -29,16 +29,19 @@ limitations under the License. */
namespace
paddle
{
namespace
framework
{
namespace
pybind
{
namespace
details
{
template
<
bool
less
,
size_t
i
,
typename
...
args
>
struct
CastToPyBufferImpl
;
}
}
// namespace pybind
namespace
framework
{
class
Tensor
{
public:
template
<
bool
less
,
size_t
i
,
typename
...
args
>
friend
struct
details
::
CastToPyBufferImpl
;
friend
struct
pybind
::
details
::
CastToPyBufferImpl
;
template
<
typename
T
,
size_t
D
,
int
MajorType
,
typename
IndexType
>
friend
struct
EigenTensor
;
...
...
@@ -165,12 +168,6 @@ class Tensor {
/*! points to dimensions of memory block. */
DDim
dims_
;
/**
* A cache of the number of elements in a tensor.
* Would be 0 for an uninitialized tensor.
*/
int64_t
numel_
;
/**
* @brief A PlaceHolder may be shared by more than one tensor.
*
...
...
paddle/framework/tensor_impl.h
浏览文件 @
17622b48
...
...
@@ -147,13 +147,12 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
inline
Tensor
&
Tensor
::
Resize
(
const
DDim
&
dims
)
{
dims_
=
dims
;
numel_
=
product
(
dims_
);
return
*
this
;
}
inline
const
DDim
&
Tensor
::
dims
()
const
{
return
dims_
;
}
inline
int64_t
Tensor
::
numel
()
const
{
return
numel_
;
}
inline
int64_t
Tensor
::
numel
()
const
{
return
product
(
dims_
)
;
}
template
<
typename
T
>
inline
Tensor
ReshapeToMatrix
(
const
Tensor
&
src
,
int
num_col_dims
)
{
...
...
paddle/operators/elementwise_add_op.h
浏览文件 @
17622b48
...
...
@@ -12,6 +12,8 @@
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h"
namespace
paddle
{
...
...
paddle/operators/elementwise_div_op.h
浏览文件 @
17622b48
...
...
@@ -12,6 +12,8 @@
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h"
namespace
paddle
{
...
...
paddle/operators/elementwise_sub_op.h
浏览文件 @
17622b48
...
...
@@ -12,6 +12,7 @@
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h"
namespace
paddle
{
...
...
paddle/operators/recurrent_op.cc
浏览文件 @
17622b48
...
...
@@ -80,7 +80,6 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
// Now all variables in scope must be created outside of op.
PADDLE_ENFORCE_NOT_NULL
(
stepnet_
);
PADDLE_ENFORCE
(
!
(
*
stepnet_
)
->
Outputs
().
empty
(),
"stepnet_ op has no outputs"
);
PADDLE_ENFORCE
(
!
(
*
stepnet_
)
->
Outputs
().
empty
(),
"net_op has no outputs"
);
if
(
seq_len_
>
step_scopes
->
size
())
{
for
(
size_t
i
=
step_scopes
->
size
();
i
<
seq_len_
;
++
i
)
{
...
...
@@ -129,8 +128,8 @@ const rnn::ArgumentName RecurrentOp::kArgName{
"memories"
,
"pre_memories"
,
"boot_memories"
};
const
rnn
::
ArgumentName
RecurrentGradientOp
::
kArgName
{
"step_net"
,
"step_scopes
"
,
"outlink@grad"
,
"inlink@grad
"
,
"memories"
,
"pre_memories"
,
"boot_memories@grad
"
};
"step_net"
,
"step_scopes
@GRAD"
,
"outlinks@GRAD"
,
"inlinks@GRAD
"
,
"memories"
,
"pre_memories"
,
"boot_memories@GRAD
"
};
RecurrentOp
::
RecurrentOp
(
const
std
::
string
&
type
,
const
framework
::
VariableNameMap
&
inputs
,
...
...
@@ -226,13 +225,13 @@ RecurrentGradientOp::RecurrentGradientOp(
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
:
OperatorBase
(
type
,
inputs
,
outputs
,
attrs
)
{
rnn
::
InitArgument
(
kArgName
,
&
arg_
,
*
this
);
rnn
::
InitArgument
(
kArgName
,
&
arg_
,
*
this
,
true
/*is grad*/
);
alg_
.
Init
(
&
arg_
,
&
stepnet_
);
}
}
// namespace operators
}
// namespace paddle
REGISTER_OP
_WITHOUT_GRADIENT
(
recurrent
,
paddle
::
operators
::
RecurrentOp
,
paddle
::
operators
::
RecurrentAlgorithmProtoAndCheckerMaker
);
REGISTER_OP
(
recurrent
,
paddle
::
operators
::
RecurrentOp
,
paddle
::
operators
::
RecurrentAlgorithmProtoAndCheckerMaker
,
recurrent_grad
,
paddle
::
operators
::
RecurrentGradientOp
);
paddle/operators/recurrent_op.h
浏览文件 @
17622b48
...
...
@@ -22,7 +22,7 @@ namespace paddle {
namespace
operators
{
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
// TODO(
Yan Chunwei):
// TODO(
Superjom)
// 1. No-padding computing for sequences with indifinite length in one batch.
// 2. Hierarchical RNN for sequence with sub-sequence.
// 3. Internal Memory.
...
...
@@ -177,6 +177,9 @@ class RecurrentGradientOp : public framework::OperatorBase {
static
const
rnn
::
ArgumentName
kArgName
;
/*
* set a stepnet that is created according to a RecurrentOp's stepnet.
*/
void
set_stepnet
(
std
::
unique_ptr
<
OperatorBase
>
net
)
{
stepnet_
=
std
::
move
(
net
);
}
...
...
paddle/operators/rnn/recurrent_op_utils.cc
浏览文件 @
17622b48
...
...
@@ -109,15 +109,14 @@ void LinkMemories(const std::vector<Scope*>& scopes,
}
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
framework
::
OperatorBase
&
op
)
{
arg
->
step_scopes
=
op
.
Output
(
name
.
step_scopes
);
const
framework
::
OperatorBase
&
op
,
bool
is_grad
)
{
arg
->
step_scopes
=
is_grad
?
op
.
Input
(
name
.
step_scopes
)
:
op
.
Output
(
name
.
step_scopes
);
arg
->
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
arg
->
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
auto
boot_memories
=
op
.
Inputs
(
name
.
boot_memories
);
auto
boot_memories
=
is_grad
?
op
.
Outputs
(
name
.
boot_memories
)
:
op
.
Inputs
(
name
.
boot_memories
);
// attributes
auto
memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
memories
);
auto
pre_memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
pre_memories
);
...
...
paddle/operators/rnn/recurrent_op_utils.h
浏览文件 @
17622b48
...
...
@@ -78,7 +78,7 @@ void LinkMemories(const std::vector<Scope*>& step_scopes,
const
int
offset
,
bool
infer_shape_mode
);
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
framework
::
OperatorBase
&
op
);
const
framework
::
OperatorBase
&
op
,
bool
is_grad
=
false
);
}
// namespace rnn
}
// namespace operators
...
...
paddle/pybind/pybind.cc
浏览文件 @
17622b48
...
...
@@ -34,12 +34,7 @@ limitations under the License. */
namespace
py
=
pybind11
;
namespace
paddle
{
namespace
framework
{
using
Tensor
=
framework
::
Tensor
;
using
LoDTensor
=
framework
::
LoDTensor
;
using
LoD
=
framework
::
LoD
;
namespace
pybind
{
static
size_t
UniqueIntegerGenerator
()
{
static
std
::
atomic
<
size_t
>
generator
;
return
generator
.
fetch_add
(
1
);
...
...
@@ -56,6 +51,10 @@ bool IsCompileGPU() {
PYBIND11_PLUGIN
(
core
)
{
py
::
module
m
(
"core"
,
"C++ core of PaddlePaddle"
);
// using framework in this function. Since it is inside a function, it will
// not cause namespace pollution.
using
namespace
paddle
::
framework
;
// NOLINT
py
::
class_
<
Tensor
>
(
m
,
"Tensor"
,
py
::
buffer_protocol
())
.
def_buffer
(
[](
Tensor
&
self
)
->
py
::
buffer_info
{
return
CastToPyBuffer
(
self
);
})
...
...
@@ -107,7 +106,7 @@ PYBIND11_PLUGIN(core) {
#ifdef PADDLE_ONLY_CPU
new
(
&
instance
)
LoDTensor
(
lod
);
#else
paddle
::
framework
::
LoD
new_lod
;
LoD
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
new
(
&
instance
)
LoDTensor
(
new_lod
);
...
...
@@ -118,7 +117,7 @@ PYBIND11_PLUGIN(core) {
#ifdef PADDLE_ONLY_CPU
self
.
set_lod
(
lod
);
#else
paddle
::
framework
::
LoD
new_lod
;
LoD
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
self
.
set_lod
(
new_lod
);
...
...
@@ -132,7 +131,7 @@ PYBIND11_PLUGIN(core) {
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
transform
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
),
[](
paddle
::
framework
::
Vector
<
size_t
>
item
)
->
[](
Vector
<
size_t
>
item
)
->
std
::
vector
<
size_t
>
{
std
::
vector
<
size_t
>
v
;
v
.
reserve
(
item
.
size
());
...
...
@@ -317,5 +316,5 @@ All parameter, weight, gradient are variables in Paddle.
return
m
.
ptr
();
}
}
// namespace
framework
}
// namespace
pybind
}
// namespace paddle
paddle/pybind/tensor_py.h
浏览文件 @
17622b48
...
...
@@ -23,7 +23,7 @@ namespace py = pybind11;
namespace
paddle
{
namespace
framework
{
namespace
pybind
{
namespace
details
{
...
...
python/paddle/v2/framework/tests/test_recurrent_op.py
浏览文件 @
17622b48
...
...
@@ -3,6 +3,7 @@ import paddle.v2.framework.core as core
import
unittest
import
numpy
as
np
from
paddle.v2.framework.op
import
Operator
,
RecurrentOp
from
op_test
import
get_numeric_gradient
def
py_sigmoid
(
x
):
...
...
@@ -47,7 +48,7 @@ class PySimpleRNN(object):
else
:
pre_mem
=
self
.
h_boot
xW
=
np
.
matmul
(
x
,
self
.
W
)
hU
=
np
.
matmul
(
mem
,
self
.
U
)
hU
=
np
.
matmul
(
pre_
mem
,
self
.
U
)
sum
=
xW
+
hU
self
.
mems
[
step_id
]
=
py_sigmoid
(
sum
)
...
...
@@ -68,7 +69,7 @@ def create_tensor(scope, name, shape, np_data):
return
tensor
class
TestRecurrentOp
(
unittest
.
TestCase
):
class
RecurrentOpTest
(
unittest
.
TestCase
):
'''
Test RNNOp
...
...
@@ -158,6 +159,42 @@ class TestRecurrentOp(unittest.TestCase):
print
print
'py_output'
,
py_output
self
.
assertEqual
(
pd_output
.
shape
,
py_output
.
shape
)
self
.
assertTrue
(
np
.
isclose
(
pd_output
,
py_output
,
rtol
=
0.1
).
all
())
class
RecurrentGradientOpTest
(
unittest
.
TestCase
):
def
create_forward_op
(
self
):
self
.
forward_op
=
RecurrentOp
(
# inputs
inlinks
=
[
"x"
],
boot_memories
=
[
"h_boot"
],
step_net
=
"stepnet"
,
# outputs
outlinks
=
[
"h"
],
step_scopes
=
"step_scopes"
,
# attributes
pre_memories
=
[
"h@pre"
],
memories
=
[
"h@alias"
])
# create a stepnet for RNN
stepnet
=
core
.
Net
.
create
()
x_fc_op
=
Operator
(
"mul"
,
X
=
"x@alias"
,
Y
=
"W"
,
Out
=
"Wx"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"add"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@alias"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
stepnet
.
append_op
(
op
)
stepnet
.
complete_add_op
(
True
)
self
.
forward_op
.
set_stepnet
(
stepnet
)
def
create_gradient_op
(
self
):
a
=
set
()
backward_op
=
core
.
RecurrentOp
.
backward
(
self
.
forward_op
,
a
)
def
test_grad
(
self
):
self
.
create_forward_op
()
self
.
create_gradient_op
()
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录