Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
85c203b1
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
85c203b1
编写于
6月 01, 2018
作者:
W
whs
提交者:
GitHub
6月 01, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Make bilinear_interp_op support attrs from input. (#11041)
* Make bilinear_interp_op support attrs from input. * Fix python api.
上级
bfecb572
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
111 addition
and
15 deletion
+111
-15
paddle/fluid/operators/bilinear_interp_op.cc
paddle/fluid/operators/bilinear_interp_op.cc
+23
-0
paddle/fluid/operators/bilinear_interp_op.cu
paddle/fluid/operators/bilinear_interp_op.cu
+23
-2
paddle/fluid/operators/bilinear_interp_op.h
paddle/fluid/operators/bilinear_interp_op.h
+18
-4
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+13
-6
python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py
...n/paddle/fluid/tests/unittests/test_bilinear_interp_op.py
+34
-3
未找到文件。
paddle/fluid/operators/bilinear_interp_op.cc
浏览文件 @
85c203b1
...
@@ -34,9 +34,22 @@ class BilinearInterpOp : public framework::OperatorWithKernel {
...
@@ -34,9 +34,22 @@ class BilinearInterpOp : public framework::OperatorWithKernel {
int
out_w
=
ctx
->
Attrs
().
Get
<
int
>
(
"out_w"
);
int
out_w
=
ctx
->
Attrs
().
Get
<
int
>
(
"out_w"
);
PADDLE_ENFORCE_EQ
(
dim_x
.
size
(),
4
,
"X's dimension must be 4"
);
PADDLE_ENFORCE_EQ
(
dim_x
.
size
(),
4
,
"X's dimension must be 4"
);
if
(
ctx
->
HasInput
(
"OutSize"
))
{
auto
out_size_dim
=
ctx
->
GetInputDim
(
"OutSize"
);
PADDLE_ENFORCE_EQ
(
out_size_dim
.
size
(),
1
,
"OutSize's dimension size must be 1"
);
PADDLE_ENFORCE_EQ
(
out_size_dim
[
0
],
2
,
"OutSize's dim[0] must be 2"
);
}
std
::
vector
<
int64_t
>
dim_out
({
dim_x
[
0
],
dim_x
[
1
],
out_h
,
out_w
});
std
::
vector
<
int64_t
>
dim_out
({
dim_x
[
0
],
dim_x
[
1
],
out_h
,
out_w
});
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
dim_out
));
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
dim_out
));
}
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
framework
::
ToDataType
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
type
()),
ctx
.
GetPlace
());
}
};
};
class
BilinearInterpOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
class
BilinearInterpOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
...
@@ -45,6 +58,10 @@ class BilinearInterpOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -45,6 +58,10 @@ class BilinearInterpOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"X"
,
AddInput
(
"X"
,
"(Tensor) The input tensor of bilinear interpolation, "
"(Tensor) The input tensor of bilinear interpolation, "
"This is a 4-D tensor with shape of (N x C x h x w)"
);
"This is a 4-D tensor with shape of (N x C x h x w)"
);
AddInput
(
"OutSize"
,
"(Tensor) This is a 1-D tensor with two number. "
"The first number is height and the second number is width."
)
.
AsDispensable
();
AddOutput
(
"Out"
,
AddOutput
(
"Out"
,
"(Tensor) The dimension of output is (N x C x out_h x out_w]"
);
"(Tensor) The dimension of output is (N x C x out_h x out_w]"
);
...
@@ -78,6 +95,12 @@ class BilinearInterpOpGrad : public framework::OperatorWithKernel {
...
@@ -78,6 +95,12 @@ class BilinearInterpOpGrad : public framework::OperatorWithKernel {
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
dim_x
);
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
dim_x
);
}
}
}
}
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
framework
::
ToDataType
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
type
()),
ctx
.
GetPlace
());
}
};
};
}
// namespace operators
}
// namespace operators
...
...
paddle/fluid/operators/bilinear_interp_op.cu
浏览文件 @
85c203b1
...
@@ -102,10 +102,21 @@ class BilinearInterpOpCUDAKernel : public framework::OpKernel<T> {
...
@@ -102,10 +102,21 @@ class BilinearInterpOpCUDAKernel : public framework::OpKernel<T> {
auto
*
input_t
=
ctx
.
Input
<
Tensor
>
(
"X"
);
// float tensor
auto
*
input_t
=
ctx
.
Input
<
Tensor
>
(
"X"
);
// float tensor
auto
*
output_t
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
// float tensor
auto
*
output_t
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
// float tensor
auto
*
input
=
input_t
->
data
<
T
>
();
auto
*
input
=
input_t
->
data
<
T
>
();
auto
*
output
=
output_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
int
out_h
=
ctx
.
Attr
<
int
>
(
"out_h"
);
int
out_h
=
ctx
.
Attr
<
int
>
(
"out_h"
);
int
out_w
=
ctx
.
Attr
<
int
>
(
"out_w"
);
int
out_w
=
ctx
.
Attr
<
int
>
(
"out_w"
);
auto
out_dims
=
output_t
->
dims
();
auto
out_size_t
=
ctx
.
Input
<
Tensor
>
(
"OutSize"
);
if
(
out_size_t
!=
nullptr
)
{
Tensor
sizes
;
framework
::
TensorCopy
(
*
out_size_t
,
platform
::
CPUPlace
(),
&
sizes
);
auto
size_data
=
sizes
.
data
<
int
>
();
out_h
=
size_data
[
0
];
out_w
=
size_data
[
1
];
}
auto
*
output
=
output_t
->
mutable_data
<
T
>
(
{
out_dims
[
0
],
out_dims
[
1
],
out_h
,
out_w
},
ctx
.
GetPlace
());
int
batch_size
=
input_t
->
dims
()[
0
];
int
batch_size
=
input_t
->
dims
()[
0
];
int
channels
=
input_t
->
dims
()[
1
];
int
channels
=
input_t
->
dims
()[
1
];
int
in_h
=
input_t
->
dims
()[
2
];
int
in_h
=
input_t
->
dims
()[
2
];
...
@@ -139,8 +150,8 @@ class BilinearInterpGradOpCUDAKernel : public framework::OpKernel<T> {
...
@@ -139,8 +150,8 @@ class BilinearInterpGradOpCUDAKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
d_input_t
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d_input_t
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d_output_t
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_output_t
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_input
=
d_input_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
*
d_output
=
d_output_t
->
data
<
T
>
();
auto
*
d_output
=
d_output_t
->
data
<
T
>
();
auto
*
d_input
=
d_input_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
&
device_ctx
=
auto
&
device_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
...
@@ -149,6 +160,16 @@ class BilinearInterpGradOpCUDAKernel : public framework::OpKernel<T> {
...
@@ -149,6 +160,16 @@ class BilinearInterpGradOpCUDAKernel : public framework::OpKernel<T> {
int
out_h
=
ctx
.
Attr
<
int
>
(
"out_h"
);
int
out_h
=
ctx
.
Attr
<
int
>
(
"out_h"
);
int
out_w
=
ctx
.
Attr
<
int
>
(
"out_w"
);
int
out_w
=
ctx
.
Attr
<
int
>
(
"out_w"
);
auto
out_size_t
=
ctx
.
Input
<
Tensor
>
(
"OutSize"
);
if
(
out_size_t
!=
nullptr
)
{
Tensor
sizes
;
framework
::
TensorCopy
(
*
out_size_t
,
platform
::
CPUPlace
(),
&
sizes
);
auto
size_data
=
sizes
.
data
<
int
>
();
out_h
=
size_data
[
0
];
out_w
=
size_data
[
1
];
}
int
batch_size
=
d_input_t
->
dims
()[
0
];
int
batch_size
=
d_input_t
->
dims
()[
0
];
int
channels
=
d_input_t
->
dims
()[
1
];
int
channels
=
d_input_t
->
dims
()[
1
];
int
in_h
=
d_input_t
->
dims
()[
2
];
int
in_h
=
d_input_t
->
dims
()[
2
];
...
...
paddle/fluid/operators/bilinear_interp_op.h
浏览文件 @
85c203b1
...
@@ -24,11 +24,18 @@ class BilinearInterpKernel : public framework::OpKernel<T> {
...
@@ -24,11 +24,18 @@ class BilinearInterpKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
input_t
=
ctx
.
Input
<
Tensor
>
(
"X"
);
// float tensor
auto
*
input_t
=
ctx
.
Input
<
Tensor
>
(
"X"
);
// float tensor
auto
*
output_t
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
// float tensor
auto
*
output_t
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
// float tensor
auto
out_dims
=
output_t
->
dims
();
auto
*
input
=
input_t
->
data
<
T
>
();
auto
*
input
=
input_t
->
data
<
T
>
();
auto
*
output
=
output_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
int
out_h
=
ctx
.
Attr
<
int
>
(
"out_h"
);
int
out_h
=
ctx
.
Attr
<
int
>
(
"out_h"
);
int
out_w
=
ctx
.
Attr
<
int
>
(
"out_w"
);
int
out_w
=
ctx
.
Attr
<
int
>
(
"out_w"
);
auto
out_size_t
=
ctx
.
Input
<
Tensor
>
(
"OutSize"
);
if
(
out_size_t
!=
nullptr
)
{
auto
out_size_data
=
out_size_t
->
data
<
int
>
();
out_h
=
out_size_data
[
0
];
out_w
=
out_size_data
[
1
];
}
auto
*
output
=
output_t
->
mutable_data
<
T
>
(
{
out_dims
[
0
],
out_dims
[
1
],
out_h
,
out_w
},
ctx
.
GetPlace
());
int
batch_size
=
input_t
->
dims
()[
0
];
int
batch_size
=
input_t
->
dims
()[
0
];
int
channels
=
input_t
->
dims
()[
1
];
int
channels
=
input_t
->
dims
()[
1
];
int
in_h
=
input_t
->
dims
()[
2
];
int
in_h
=
input_t
->
dims
()[
2
];
...
@@ -83,9 +90,8 @@ class BilinearInterpGradKernel : public framework::OpKernel<T> {
...
@@ -83,9 +90,8 @@ class BilinearInterpGradKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
d_input_t
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d_input_t
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d_output_t
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_output_t
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_input
=
d_input_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
*
d_output
=
d_output_t
->
data
<
T
>
();
auto
*
d_output
=
d_output_t
->
data
<
T
>
();
auto
*
d_input
=
d_input_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
&
device_ctx
=
auto
&
device_ctx
=
ctx
.
template
device_context
<
platform
::
CPUDeviceContext
>();
ctx
.
template
device_context
<
platform
::
CPUDeviceContext
>();
math
::
SetConstant
<
platform
::
CPUDeviceContext
,
T
>
zero
;
math
::
SetConstant
<
platform
::
CPUDeviceContext
,
T
>
zero
;
...
@@ -93,6 +99,14 @@ class BilinearInterpGradKernel : public framework::OpKernel<T> {
...
@@ -93,6 +99,14 @@ class BilinearInterpGradKernel : public framework::OpKernel<T> {
int
out_h
=
ctx
.
Attr
<
int
>
(
"out_h"
);
int
out_h
=
ctx
.
Attr
<
int
>
(
"out_h"
);
int
out_w
=
ctx
.
Attr
<
int
>
(
"out_w"
);
int
out_w
=
ctx
.
Attr
<
int
>
(
"out_w"
);
auto
out_size_t
=
ctx
.
Input
<
Tensor
>
(
"OutSize"
);
if
(
out_size_t
!=
nullptr
)
{
auto
out_size_data
=
out_size_t
->
data
<
int
>
();
out_h
=
out_size_data
[
0
];
out_w
=
out_size_data
[
1
];
}
int
batch_size
=
d_input_t
->
dims
()[
0
];
int
batch_size
=
d_input_t
->
dims
()[
0
];
int
channels
=
d_input_t
->
dims
()[
1
];
int
channels
=
d_input_t
->
dims
()[
1
];
int
in_h
=
d_input_t
->
dims
()[
2
];
int
in_h
=
d_input_t
->
dims
()[
2
];
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
85c203b1
...
@@ -3944,7 +3944,7 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None):
...
@@ -3944,7 +3944,7 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None):
input (Variable): The input tensor of bilinear interpolation,
input (Variable): The input tensor of bilinear interpolation,
This is a 4-D tensor of the shape
This is a 4-D tensor of the shape
(num_batches, channels, in_h, in_w).
(num_batches, channels, in_h, in_w).
out_shape(list|tuple|None): Output shape of bilinear interpolation
out_shape(list|tuple|
Variable|
None): Output shape of bilinear interpolation
layer, the shape is (out_h, out_w).
layer, the shape is (out_h, out_w).
Default: None
Default: None
scale(int|None): The multiplier for the input height or width.
scale(int|None): The multiplier for the input height or width.
...
@@ -3971,13 +3971,20 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None):
...
@@ -3971,13 +3971,20 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None):
def
_is_list_or_turple_
(
data
):
def
_is_list_or_turple_
(
data
):
return
(
isinstance
(
data
,
list
)
or
isinstance
(
data
,
tuple
))
return
(
isinstance
(
data
,
list
)
or
isinstance
(
data
,
tuple
))
out_h
=
0
out_w
=
0
inputs
=
{
"X"
:
input
}
if
out_shape
is
not
None
:
if
out_shape
is
not
None
:
if
not
(
_is_list_or_turple_
(
out_shape
)
and
len
(
out_shape
)
==
2
):
if
not
(
_is_list_or_turple_
(
out_shape
)
and
len
(
out_shape
)
==
2
)
and
(
out_shape
is
not
Variable
):
raise
ValueError
(
'out_shape should be a list or tuple '
,
raise
ValueError
(
'out_shape should be a list or tuple '
,
'with length 2, (out_h, out_w).'
)
'with length 2, (out_h, out_w).'
)
out_shape
=
list
(
map
(
int
,
out_shape
))
if
_is_list_or_turple_
(
out_shape
):
out_h
=
out_shape
[
0
]
out_shape
=
list
(
map
(
int
,
out_shape
))
out_w
=
out_shape
[
1
]
out_h
=
out_shape
[
0
]
out_w
=
out_shape
[
1
]
else
:
inputs
[
'OutSize'
]
=
out_shape
else
:
else
:
out_h
=
int
(
input
.
shape
[
2
]
*
scale
)
out_h
=
int
(
input
.
shape
[
2
]
*
scale
)
out_w
=
int
(
input
.
shape
[
3
]
*
scale
)
out_w
=
int
(
input
.
shape
[
3
]
*
scale
)
...
@@ -3985,7 +3992,7 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None):
...
@@ -3985,7 +3992,7 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None):
out
=
helper
.
create_tmp_variable
(
dtype
)
out
=
helper
.
create_tmp_variable
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"bilinear_interp"
,
type
=
"bilinear_interp"
,
inputs
=
{
"X"
:
input
}
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
out
},
outputs
=
{
"Out"
:
out
},
attrs
=
{
"out_h"
:
out_h
,
attrs
=
{
"out_h"
:
out_h
,
"out_w"
:
out_w
})
"out_w"
:
out_w
})
...
...
python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py
浏览文件 @
85c203b1
...
@@ -17,7 +17,10 @@ import numpy as np
...
@@ -17,7 +17,10 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
def
bilinear_interp_np
(
input
,
out_h
,
out_w
):
def
bilinear_interp_np
(
input
,
out_h
,
out_w
,
out_size
):
if
out_size
is
not
None
:
out_h
=
out_size
[
0
]
out_w
=
out_size
[
1
]
batch_size
,
channel
,
in_h
,
in_w
=
input
.
shape
batch_size
,
channel
,
in_h
,
in_w
=
input
.
shape
if
out_h
>
1
:
if
out_h
>
1
:
ratio_h
=
(
in_h
-
1.0
)
/
(
out_h
-
1.0
)
ratio_h
=
(
in_h
-
1.0
)
/
(
out_h
-
1.0
)
...
@@ -49,12 +52,15 @@ def bilinear_interp_np(input, out_h, out_w):
...
@@ -49,12 +52,15 @@ def bilinear_interp_np(input, out_h, out_w):
class
TestBilinearInterpOp
(
OpTest
):
class
TestBilinearInterpOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
out_size
=
None
self
.
init_test_case
()
self
.
init_test_case
()
self
.
op_type
=
"bilinear_interp"
self
.
op_type
=
"bilinear_interp"
input_np
=
np
.
random
.
random
(
self
.
input_shape
).
astype
(
"float32"
)
input_np
=
np
.
random
.
random
(
self
.
input_shape
).
astype
(
"float32"
)
output_np
=
bilinear_interp_np
(
input_np
,
self
.
out_h
,
self
.
out_w
)
output_np
=
bilinear_interp_np
(
input_np
,
self
.
out_h
,
self
.
out_w
,
self
.
out_size
)
self
.
inputs
=
{
'X'
:
input_np
}
self
.
inputs
=
{
'X'
:
input_np
}
if
self
.
out_size
is
not
None
:
self
.
inputs
[
'OutSize'
]
=
self
.
out_size
self
.
attrs
=
{
'out_h'
:
self
.
out_h
,
'out_w'
:
self
.
out_w
}
self
.
attrs
=
{
'out_h'
:
self
.
out_h
,
'out_w'
:
self
.
out_w
}
self
.
outputs
=
{
'Out'
:
output_np
}
self
.
outputs
=
{
'Out'
:
output_np
}
...
@@ -68,6 +74,7 @@ class TestBilinearInterpOp(OpTest):
...
@@ -68,6 +74,7 @@ class TestBilinearInterpOp(OpTest):
self
.
input_shape
=
[
2
,
3
,
4
,
4
]
self
.
input_shape
=
[
2
,
3
,
4
,
4
]
self
.
out_h
=
2
self
.
out_h
=
2
self
.
out_w
=
2
self
.
out_w
=
2
self
.
out_size
=
np
.
array
([
3
,
3
]).
astype
(
"int32"
)
class
TestCase1
(
TestBilinearInterpOp
):
class
TestCase1
(
TestBilinearInterpOp
):
...
@@ -91,5 +98,29 @@ class TestCase3(TestBilinearInterpOp):
...
@@ -91,5 +98,29 @@ class TestCase3(TestBilinearInterpOp):
self
.
out_w
=
128
self
.
out_w
=
128
class
TestCase4
(
TestBilinearInterpOp
):
def
init_test_case
(
self
):
self
.
input_shape
=
[
4
,
1
,
7
,
8
]
self
.
out_h
=
1
self
.
out_w
=
1
self
.
out_size
=
np
.
array
([
2
,
2
]).
astype
(
"int32"
)
class
TestCase5
(
TestBilinearInterpOp
):
def
init_test_case
(
self
):
self
.
input_shape
=
[
3
,
3
,
9
,
6
]
self
.
out_h
=
12
self
.
out_w
=
12
self
.
out_size
=
np
.
array
([
11
,
11
]).
astype
(
"int32"
)
class
TestCase6
(
TestBilinearInterpOp
):
def
init_test_case
(
self
):
self
.
input_shape
=
[
1
,
1
,
128
,
64
]
self
.
out_h
=
64
self
.
out_w
=
128
self
.
out_size
=
np
.
array
([
65
,
129
]).
astype
(
"int32"
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录