Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
bc1c3e3e
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bc1c3e3e
编写于
4月 18, 2022
作者:
Z
zyfncg
提交者:
GitHub
4月 18, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Create Tensor by paddle::empty in custom operator (#41840)
* create tensor by empty in custom op * fix some bug
上级
694ac20f
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
38 addition
and
50 deletion
+38
-50
paddle/fluid/eager/auto_code_generator/final_state_generator/codegen_utils.py
...uto_code_generator/final_state_generator/codegen_utils.py
+1
-1
paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py
...auto_code_generator/final_state_generator/python_c_gen.py
+1
-1
paddle/fluid/pybind/eager_utils.cc
paddle/fluid/pybind/eager_utils.cc
+4
-6
paddle/fluid/pybind/eager_utils.h
paddle/fluid/pybind/eager_utils.h
+4
-6
paddle/phi/common/place.h
paddle/phi/common/place.h
+0
-3
paddle/phi/tests/api/test_data_transform.cc
paddle/phi/tests/api/test_data_transform.cc
+8
-11
paddle/phi/tests/api/test_scale_benchmark.cc
paddle/phi/tests/api/test_scale_benchmark.cc
+1
-1
python/paddle/fluid/tests/custom_op/context_pool_test_op.cc
python/paddle/fluid/tests/custom_op/context_pool_test_op.cc
+2
-4
python/paddle/fluid/tests/custom_op/custom_concat_op.cc
python/paddle/fluid/tests/custom_op/custom_concat_op.cc
+4
-4
python/paddle/fluid/tests/custom_op/custom_conj_op.cc
python/paddle/fluid/tests/custom_op/custom_conj_op.cc
+1
-1
python/paddle/fluid/tests/custom_op/custom_relu_op.cc
python/paddle/fluid/tests/custom_op/custom_relu_op.cc
+4
-4
python/paddle/fluid/tests/custom_op/custom_relu_op.cu
python/paddle/fluid/tests/custom_op/custom_relu_op.cu
+4
-4
python/paddle/fluid/tests/custom_op/custom_tanh_op.cc
python/paddle/fluid/tests/custom_op/custom_tanh_op.cc
+4
-4
未找到文件。
paddle/fluid/eager/auto_code_generator/final_state_generator/codegen_utils.py
浏览文件 @
bc1c3e3e
...
...
@@ -45,7 +45,7 @@ yaml_types_mapping = {
'int'
:
'int'
,
'int32_t'
:
'int32_t'
,
'int64_t'
:
'int64_t'
,
'size_t'
:
'size_t'
,
\
'float'
:
'float'
,
'double'
:
'double'
,
'bool'
:
'bool'
,
\
'str'
:
'std::string'
,
\
'Place'
:
'paddle::
experimental::
Place'
,
'DataLayout'
:
'paddle::experimental::DataLayout'
,
'DataType'
:
'paddle::experimental::DataType'
,
\
'Place'
:
'paddle::Place'
,
'DataLayout'
:
'paddle::experimental::DataLayout'
,
'DataType'
:
'paddle::experimental::DataType'
,
\
'int64_t[]'
:
'std::vector<int64_t>'
,
'int[]'
:
'std::vector<int>'
,
'Tensor'
:
'Tensor'
,
'Tensor[]'
:
'std::vector<Tensor>'
,
...
...
paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py
浏览文件 @
bc1c3e3e
...
...
@@ -46,7 +46,7 @@ atype_to_parsing_function = {
"std::vector<std::string>"
:
"CastPyArg2Strings"
,
"paddle::experimental::Scalar"
:
"CastPyArg2Scalar"
,
"paddle::experimental::IntArray"
:
"CastPyArg2IntArray"
,
"paddle::
experimental::
Place"
:
"CastPyArg2Place"
,
"paddle::Place"
:
"CastPyArg2Place"
,
"paddle::experimental::DataType"
:
"CastPyArg2DataType"
,
}
...
...
paddle/fluid/pybind/eager_utils.cc
浏览文件 @
bc1c3e3e
...
...
@@ -1151,15 +1151,13 @@ std::vector<paddle::framework::Scope*> GetScopePtrListFromArgs(
return
result
;
}
paddle
::
experimental
::
Place
CastPyArg2Place
(
PyObject
*
obj
,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
)
{
paddle
::
Place
CastPyArg2Place
(
PyObject
*
obj
,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
)
{
return
CastPyArg2Place
(
obj
,
arg_pos
);
}
paddle
::
experimental
::
DataType
CastPyArg2DataType
(
PyObject
*
obj
,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
)
{
paddle
::
DataType
CastPyArg2DataType
(
PyObject
*
obj
,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
)
{
if
(
obj
==
Py_None
)
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"%s(): argument (position %d) must be "
...
...
paddle/fluid/pybind/eager_utils.h
浏览文件 @
bc1c3e3e
...
...
@@ -162,13 +162,11 @@ paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
);
paddle
::
experimental
::
Place
CastPyArg2Place
(
PyObject
*
obj
,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
);
paddle
::
Place
CastPyArg2Place
(
PyObject
*
obj
,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
);
paddle
::
experimental
::
DataType
CastPyArg2DataType
(
PyObject
*
obj
,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
);
paddle
::
DataType
CastPyArg2DataType
(
PyObject
*
obj
,
const
std
::
string
&
op_type
,
ssize_t
arg_pos
);
paddle
::
optional
<
const
paddle
::
experimental
::
Tensor
&>
GetOptionalTensorFromArgs
(
const
std
::
string
&
op_type
,
const
std
::
string
&
arg_name
,
PyObject
*
args
,
...
...
paddle/phi/common/place.h
浏览文件 @
bc1c3e3e
...
...
@@ -213,9 +213,6 @@ std::ostream& operator<<(std::ostream&, const Place&);
namespace
paddle
{
namespace
experimental
{
using
AllocationType
=
phi
::
AllocationType
;
using
Place
=
phi
::
Place
;
using
CPUPlace
=
phi
::
CPUPlace
;
using
GPUPlace
=
phi
::
GPUPlace
;
using
GPUPinnedPlace
=
phi
::
GPUPinnedPlace
;
using
XPUPlace
=
phi
::
XPUPlace
;
using
NPUPlace
=
phi
::
NPUPlace
;
...
...
paddle/phi/tests/api/test_data_transform.cc
浏览文件 @
bc1c3e3e
...
...
@@ -37,13 +37,11 @@ namespace tests {
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
data_transform_same_place
)
{
// 1. create tensor
auto
x
=
paddle
::
experimental
::
full
({
3
,
3
},
1.0
,
experimental
::
DataType
::
COMPLEX128
,
experimental
::
CPUPlace
());
auto
x
=
paddle
::
experimental
::
full
({
3
,
3
},
1.0
,
DataType
::
COMPLEX128
,
CPUPlace
());
auto
y
=
paddle
::
experimental
::
full
(
{
3
,
3
},
2.0
,
experimental
::
DataType
::
FLOAT32
,
experimental
::
CPUPlace
());
auto
y
=
paddle
::
experimental
::
full
({
3
,
3
},
2.0
,
DataType
::
FLOAT32
,
CPUPlace
());
std
::
vector
<
phi
::
dtype
::
complex
<
double
>>
sum
(
9
,
6.0
);
...
...
@@ -75,10 +73,10 @@ TEST(API, data_transform_same_place) {
TEST
(
Tensor
,
data_transform_diff_place
)
{
// 1. create tensor
auto
x
=
paddle
::
experimental
::
full
(
{
3
,
3
},
1.0
,
experimental
::
DataType
::
FLOAT64
,
experimental
::
CPUPlace
());
{
3
,
3
},
1.0
,
experimental
::
DataType
::
FLOAT64
,
CPUPlace
());
auto
y
=
paddle
::
experimental
::
full
(
{
3
,
3
},
2.0
,
experimental
::
DataType
::
FLOAT64
,
experimental
::
GPUPlace
());
{
3
,
3
},
2.0
,
experimental
::
DataType
::
FLOAT64
,
GPUPlace
());
std
::
vector
<
float
>
sum
(
9
,
6.0
);
...
...
@@ -93,10 +91,9 @@ TEST(Tensor, data_transform_diff_place) {
ASSERT_EQ
(
out
.
dtype
(),
phi
::
DataType
::
FLOAT64
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
ASSERT_EQ
(
out
.
impl
()
->
place
(),
phi
::
TransToPhiPlace
(
experimental
::
Backend
::
GPU
));
ASSERT_EQ
(
out
.
impl
()
->
place
(),
phi
::
TransToPhiPlace
(
phi
::
Backend
::
GPU
));
auto
ref_out
=
experimental
::
copy_to
(
out
,
experimental
::
CPUPlace
(),
true
);
auto
ref_out
=
experimental
::
copy_to
(
out
,
CPUPlace
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
ref_out
.
impl
());
for
(
size_t
i
=
0
;
i
<
9
;
i
++
)
{
...
...
paddle/phi/tests/api/test_scale_benchmark.cc
浏览文件 @
bc1c3e3e
...
...
@@ -30,7 +30,7 @@ namespace tests {
TEST
(
API
,
scale
)
{
auto
x
=
experimental
::
full
(
{
3
,
4
},
1.0
,
experimental
::
DataType
::
FLOAT32
,
experimental
::
CPUPlace
());
{
3
,
4
},
1.0
,
experimental
::
DataType
::
FLOAT32
,
CPUPlace
());
const
size_t
cycles
=
300
;
phi
::
tests
::
Timer
timer
;
...
...
python/paddle/fluid/tests/custom_op/context_pool_test_op.cc
浏览文件 @
bc1c3e3e
...
...
@@ -22,8 +22,7 @@
std
::
vector
<
paddle
::
Tensor
>
ContextPoolTest
(
const
paddle
::
Tensor
&
x
)
{
// 1. test cpu context
paddle
::
experimental
::
Place
cpu_place
(
paddle
::
experimental
::
AllocationType
::
CPU
);
paddle
::
Place
cpu_place
(
paddle
::
experimental
::
AllocationType
::
CPU
);
auto
*
cpu_ctx
=
paddle
::
experimental
::
DeviceContextPool
::
Instance
()
.
Get
<
paddle
::
experimental
::
AllocationType
::
CPU
>
(
cpu_place
);
...
...
@@ -34,8 +33,7 @@ std::vector<paddle::Tensor> ContextPoolTest(const paddle::Tensor& x) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
// 2. test gpu context
paddle
::
experimental
::
Place
gpu_place
(
paddle
::
experimental
::
AllocationType
::
GPU
);
paddle
::
Place
gpu_place
(
paddle
::
experimental
::
AllocationType
::
GPU
);
auto
*
gpu_ctx
=
paddle
::
experimental
::
DeviceContextPool
::
Instance
()
.
Get
<
paddle
::
experimental
::
AllocationType
::
GPU
>
(
gpu_place
);
...
...
python/paddle/fluid/tests/custom_op/custom_concat_op.cc
浏览文件 @
bc1c3e3e
...
...
@@ -75,7 +75,7 @@ std::vector<paddle::Tensor> ConcatForwardDynamicAxis(
auto
out_shape
=
ComputeOutShape
(
in_shapes
,
axis
);
// create output
auto
out
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
out_shape
);
auto
out
=
paddle
::
empty
(
out_shape
,
inputs
[
0
].
type
(),
paddle
::
CPUPlace
()
);
// calc
PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES
(
...
...
@@ -106,7 +106,7 @@ std::vector<paddle::Tensor> ConcatBackwardDynamicAxis(
// create outputs
std
::
vector
<
paddle
::
Tensor
>
grad_inputs
;
for
(
auto
&
t
:
inputs
)
{
auto
grad
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
t
.
shap
e
());
auto
grad
=
paddle
::
empty
(
t
.
shape
(),
t
.
dtype
(),
t
.
plac
e
());
grad_inputs
.
emplace_back
(
grad
);
}
...
...
@@ -161,7 +161,7 @@ std::vector<paddle::Tensor> ConcatForwardStaticAxis(
auto
out_shape
=
ComputeOutShape
(
in_shapes
,
final_axis
);
// create output
auto
out
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
out_shape
);
auto
out
=
paddle
::
empty
(
out_shape
,
inputs
[
0
].
type
(),
paddle
::
CPUPlace
()
);
// calc
PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES
(
...
...
@@ -190,7 +190,7 @@ std::vector<paddle::Tensor> ConcatBackwardStaticAxis(
// create outputs
std
::
vector
<
paddle
::
Tensor
>
grad_inputs
;
for
(
auto
&
t
:
inputs
)
{
auto
grad
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
t
.
shap
e
());
auto
grad
=
paddle
::
empty
(
t
.
shape
(),
t
.
dtype
(),
t
.
plac
e
());
grad_inputs
.
emplace_back
(
grad
);
}
...
...
python/paddle/fluid/tests/custom_op/custom_conj_op.cc
浏览文件 @
bc1c3e3e
...
...
@@ -71,7 +71,7 @@ void ConjCPUKernel(const data_t* x_data, int64_t numel, data_t* out_data) {
std
::
vector
<
paddle
::
Tensor
>
ConjFunction
(
const
paddle
::
Tensor
&
x
)
{
CHECK_INPUT
(
x
);
paddle
::
Tensor
out
(
x
.
place
(),
x
.
shap
e
());
paddle
::
Tensor
out
=
paddle
::
empty
(
x
.
shape
(),
x
.
dtype
(),
x
.
plac
e
());
PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES
(
x
.
type
(),
"ConjCPUKernel"
,
([
&
]
{
...
...
python/paddle/fluid/tests/custom_op/custom_relu_op.cc
浏览文件 @
bc1c3e3e
...
...
@@ -54,7 +54,7 @@ void relu_cpu_double_backward_kernel(const data_t* out_data,
}
std
::
vector
<
paddle
::
Tensor
>
relu_cpu_forward
(
const
paddle
::
Tensor
&
x
)
{
auto
out
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
x
.
shap
e
());
auto
out
=
paddle
::
empty
(
x
.
shape
(),
x
.
dtype
(),
x
.
plac
e
());
PD_DISPATCH_FLOATING_TYPES
(
x
.
type
(),
"relu_cpu_forward"
,
([
&
]
{
...
...
@@ -68,7 +68,7 @@ std::vector<paddle::Tensor> relu_cpu_forward(const paddle::Tensor& x) {
std
::
vector
<
paddle
::
Tensor
>
relu_cpu_backward
(
const
paddle
::
Tensor
&
x
,
const
paddle
::
Tensor
&
out
,
const
paddle
::
Tensor
&
grad_out
)
{
auto
grad_x
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
x
.
shap
e
());
auto
grad_x
=
paddle
::
empty
(
x
.
shape
(),
x
.
dtype
(),
x
.
plac
e
());
PD_DISPATCH_FLOATING_TYPES
(
out
.
type
(),
"relu_cpu_backward"
,
([
&
]
{
relu_cpu_backward_kernel
<
data_t
>
(
...
...
@@ -85,7 +85,7 @@ std::vector<paddle::Tensor> relu_cpu_double_backward(
const
paddle
::
Tensor
&
out
,
const
paddle
::
Tensor
&
ddx
)
{
CHECK_CPU_INPUT
(
out
);
CHECK_CPU_INPUT
(
ddx
);
auto
ddout
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
out
.
shap
e
());
auto
ddout
=
paddle
::
empty
(
out
.
shape
(),
out
.
dtype
(),
out
.
plac
e
());
PD_DISPATCH_FLOATING_TYPES
(
out
.
type
(),
"relu_cpu_double_backward"
,
([
&
]
{
relu_cpu_double_backward_kernel
<
data_t
>
(
...
...
@@ -165,7 +165,7 @@ PD_BUILD_DOUBLE_GRAD_OP(custom_relu)
std
::
vector
<
paddle
::
Tensor
>
relu_cpu_backward_without_x
(
const
paddle
::
Tensor
&
out
,
const
paddle
::
Tensor
&
grad_out
)
{
auto
grad_x
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
out
.
shap
e
());
auto
grad_x
=
paddle
::
empty
(
out
.
shape
(),
out
.
dtype
(),
out
.
plac
e
());
PD_DISPATCH_FLOATING_TYPES
(
out
.
type
(),
"relu_cpu_backward"
,
([
&
]
{
relu_cpu_backward_kernel
<
data_t
>
(
...
...
python/paddle/fluid/tests/custom_op/custom_relu_op.cu
浏览文件 @
bc1c3e3e
...
...
@@ -54,7 +54,7 @@ __global__ void relu_cuda_double_backward_kernel(const data_t* out_data,
std
::
vector
<
paddle
::
Tensor
>
relu_cuda_forward
(
const
paddle
::
Tensor
&
x
)
{
CHECK_GPU_INPUT
(
x
);
auto
out
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kGPU
,
x
.
shap
e
());
auto
out
=
paddle
::
empty
(
x
.
shape
(),
x
.
dtype
(),
x
.
plac
e
());
int
numel
=
x
.
size
();
int
block
=
512
;
...
...
@@ -74,7 +74,7 @@ std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
CHECK_GPU_INPUT
(
x
);
CHECK_GPU_INPUT
(
out
);
CHECK_GPU_INPUT
(
grad_out
);
auto
grad_x
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kGPU
,
x
.
shap
e
());
auto
grad_x
=
paddle
::
empty
(
x
.
shape
(),
x
.
dtype
(),
x
.
plac
e
());
int
numel
=
out
.
size
();
int
block
=
512
;
...
...
@@ -95,7 +95,7 @@ std::vector<paddle::Tensor> relu_cuda_double_backward(
const
paddle
::
Tensor
&
out
,
const
paddle
::
Tensor
&
ddx
)
{
CHECK_GPU_INPUT
(
out
);
CHECK_GPU_INPUT
(
ddx
);
auto
ddout
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kGPU
,
out
.
shap
e
());
auto
ddout
=
paddle
::
empty
(
out
.
shape
(),
out
.
dtype
(),
out
.
plac
e
());
int64_t
numel
=
out
.
size
();
int64_t
block
=
512
;
...
...
@@ -117,7 +117,7 @@ std::vector<paddle::Tensor> relu_cuda_double_backward(
std
::
vector
<
paddle
::
Tensor
>
relu_cuda_backward_without_x
(
const
paddle
::
Tensor
&
out
,
const
paddle
::
Tensor
&
grad_out
)
{
auto
grad_x
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kGPU
,
out
.
shap
e
());
auto
grad_x
=
paddle
::
empty
(
out
.
shape
(),
out
.
dtype
(),
out
.
plac
e
());
int
numel
=
out
.
size
();
int
block
=
512
;
...
...
python/paddle/fluid/tests/custom_op/custom_tanh_op.cc
浏览文件 @
bc1c3e3e
...
...
@@ -68,7 +68,7 @@ void tanh_cpu_double_backward_kernel(const data_t* out_data,
std
::
vector
<
paddle
::
Tensor
>
TanhForward
(
const
paddle
::
Tensor
&
x
)
{
CHECK_CPU_INPUT
(
x
);
auto
out
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
x
.
shap
e
());
auto
out
=
paddle
::
empty
(
x
.
shape
(),
x
.
dtype
(),
x
.
plac
e
());
PD_DISPATCH_FLOATING_TYPES
(
x
.
dtype
(),
"tanh_cpu_forward"
,
([
&
]
{
...
...
@@ -82,7 +82,7 @@ std::vector<paddle::Tensor> TanhForward(const paddle::Tensor& x) {
std
::
vector
<
paddle
::
Tensor
>
TanhBackward
(
const
paddle
::
Tensor
&
out
,
const
paddle
::
Tensor
&
grad_out
)
{
CHECK_CPU_INPUT
(
out
);
auto
grad_x
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
out
.
shap
e
());
auto
grad_x
=
paddle
::
empty
(
out
.
shape
(),
out
.
dtype
(),
out
.
plac
e
());
PD_DISPATCH_FLOATING_TYPES
(
out
.
dtype
(),
"tanh_cpu_backward"
,
([
&
]
{
tanh_cpu_backward_kernel
<
data_t
>
(
...
...
@@ -101,8 +101,8 @@ std::vector<paddle::Tensor> TanhDoubleBackward(const paddle::Tensor& out,
CHECK_CPU_INPUT
(
out
);
CHECK_CPU_INPUT
(
ddx
);
CHECK_CPU_INPUT
(
dout
);
auto
dout_new
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
out
.
shap
e
());
auto
ddout
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
,
out
.
shap
e
());
auto
dout_new
=
paddle
::
empty
(
out
.
shape
(),
out
.
dtype
(),
out
.
plac
e
());
auto
ddout
=
paddle
::
empty
(
out
.
shape
(),
out
.
dtype
(),
out
.
plac
e
());
PD_DISPATCH_FLOATING_TYPES
(
out
.
dtype
(),
"tanh_cpu_double_backward"
,
([
&
]
{
tanh_cpu_double_backward_kernel
<
data_t
>
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录