Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
5ceb7d12
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5ceb7d12
编写于
8月 06, 2017
作者:
Y
Yi Wang
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/paddlepaddle/paddle
into eigen_warning
上级
b36f3ae7
9eeb8fde
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
119 addition
and
91 deletion
+119
-91
CMakeLists.txt
CMakeLists.txt
+2
-2
cmake/configure.cmake
cmake/configure.cmake
+0
-2
cmake/external/gflags.cmake
cmake/external/gflags.cmake
+8
-1
paddle/framework/operator.cc
paddle/framework/operator.cc
+2
-2
paddle/framework/operator.h
paddle/framework/operator.h
+4
-4
paddle/function/nnpack/NNPACKConvOp.cpp
paddle/function/nnpack/NNPACKConvOp.cpp
+53
-47
paddle/gserver/layers/ExpandConvLayer.cpp
paddle/gserver/layers/ExpandConvLayer.cpp
+1
-2
python/paddle/v2/framework/tests/op_test_util.py
python/paddle/v2/framework/tests/op_test_util.py
+13
-8
python/paddle/v2/framework/tests/test_add_two_op.py
python/paddle/v2/framework/tests/test_add_two_op.py
+5
-3
python/paddle/v2/framework/tests/test_cross_entropy_op.py
python/paddle/v2/framework/tests/test_cross_entropy_op.py
+6
-4
python/paddle/v2/framework/tests/test_mean_op.py
python/paddle/v2/framework/tests/test_mean_op.py
+2
-2
python/paddle/v2/framework/tests/test_mul_op.py
python/paddle/v2/framework/tests/test_mul_op.py
+5
-3
python/paddle/v2/framework/tests/test_rowwise_add_op.py
python/paddle/v2/framework/tests/test_rowwise_add_op.py
+5
-3
python/paddle/v2/framework/tests/test_sgd_op.py
python/paddle/v2/framework/tests/test_sgd_op.py
+7
-4
python/paddle/v2/framework/tests/test_sigmoid_op.py
python/paddle/v2/framework/tests/test_sigmoid_op.py
+2
-2
python/paddle/v2/framework/tests/test_softmax_op.py
python/paddle/v2/framework/tests/test_softmax_op.py
+4
-2
未找到文件。
CMakeLists.txt
浏览文件 @
5ceb7d12
...
...
@@ -36,8 +36,8 @@ include(simd)
################################ Configurations #######################################
option
(
WITH_GPU
"Compile PaddlePaddle with NVIDIA GPU"
${
CUDA_FOUND
}
)
option
(
WITH_AVX
"Compile PaddlePaddle with AVX intrinsics"
${
AVX_FOUND
}
)
option
(
WITH_MKLDNN
"Compile PaddlePaddle with mkl-dnn support."
OFF
)
option
(
WITH_MKLML
"Compile PaddlePaddle with mklml package."
OFF
)
option
(
WITH_MKLDNN
"Compile PaddlePaddle with mkl-dnn support."
${
AVX_FOUND
}
)
option
(
WITH_MKLML
"Compile PaddlePaddle with mklml package."
${
AVX_FOUND
}
)
option
(
WITH_DSO
"Compile PaddlePaddle with dynamic linked CUDA"
ON
)
option
(
WITH_TESTING
"Compile PaddlePaddle with unit testing"
ON
)
option
(
WITH_SWIG_PY
"Compile PaddlePaddle with inference api"
ON
)
...
...
cmake/configure.cmake
浏览文件 @
5ceb7d12
...
...
@@ -74,8 +74,6 @@ if(WITH_MKLDNN)
set
(
OPENMP_FLAGS
"-fopenmp"
)
set
(
CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS
${
OPENMP_FLAGS
}
)
set
(
CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS
${
OPENMP_FLAGS
}
)
set
(
CMAKE_SHARED_LINKER_FLAGS
"
${
CMAKE_SHARED_LINKER_FLAGS
}
-L
${
MKLDNN_IOMP_DIR
}
-liomp5 -Wl,--as-needed"
)
set
(
CMAKE_EXE_LINKER_FLAGS
"
${
CMAKE_EXE_LINKER_FLAGS
}
-L
${
MKLDNN_IOMP_DIR
}
-liomp5 -Wl,--as-needed"
)
set
(
CMAKE_C_FLAGS
"
${
CMAKE_C_FLAGS
}
${
OPENMP_FLAGS
}
"
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
${
OPENMP_FLAGS
}
"
)
else
()
...
...
cmake/external/gflags.cmake
浏览文件 @
5ceb7d12
...
...
@@ -28,7 +28,14 @@ INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
ExternalProject_Add
(
extern_gflags
${
EXTERNAL_PROJECT_LOG_ARGS
}
GIT_REPOSITORY
"https://github.com/gflags/gflags.git"
# TODO(yiwang): The annoying warnings mentioned in
# https://github.com/PaddlePaddle/Paddle/issues/3277 are caused by
# gflags. I fired a PR https://github.com/gflags/gflags/pull/230
# to fix it. Before it gets accepted by the gflags team, we use
# my personal fork, which contains above fix, temporarily. Let's
# change this back to the official Github repo once my PR is
# merged.
GIT_REPOSITORY
"https://github.com/wangkuiyi/gflags.git"
PREFIX
${
GFLAGS_SOURCES_DIR
}
UPDATE_COMMAND
""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=
${
CMAKE_CXX_COMPILER
}
...
...
paddle/framework/operator.cc
浏览文件 @
5ceb7d12
...
...
@@ -22,14 +22,14 @@ namespace framework {
template
<
>
Eigen
::
DefaultDevice
&
ExecutionContext
::
GetEigenDevice
<
platform
::
CPUPlace
,
Eigen
::
DefaultDevice
>
()
const
{
return
*
device_context_
.
get_eigen_device
<
Eigen
::
DefaultDevice
>
();
return
*
device_context_
->
get_eigen_device
<
Eigen
::
DefaultDevice
>
();
}
#ifndef PADDLE_ONLY_CPU
template
<
>
Eigen
::
GpuDevice
&
ExecutionContext
::
GetEigenDevice
<
platform
::
GPUPlace
,
Eigen
::
GpuDevice
>
()
const
{
return
*
device_context_
.
get_eigen_device
<
Eigen
::
GpuDevice
>
();
return
*
device_context_
->
get_eigen_device
<
Eigen
::
GpuDevice
>
();
}
#endif
...
...
paddle/framework/operator.h
浏览文件 @
5ceb7d12
...
...
@@ -252,7 +252,7 @@ struct EigenDeviceConverter<platform::GPUPlace> {
class
ExecutionContext
:
public
OperatorContext
{
public:
ExecutionContext
(
const
OperatorBase
*
op
,
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
device_context
)
const
platform
::
DeviceContext
*
device_context
)
:
OperatorContext
(
op
,
scope
),
device_context_
(
device_context
)
{}
template
<
typename
PlaceType
,
...
...
@@ -260,9 +260,9 @@ class ExecutionContext : public OperatorContext {
typename
EigenDeviceConverter
<
PlaceType
>::
EigenDeviceType
>
DeviceType
&
GetEigenDevice
()
const
;
platform
::
Place
GetPlace
()
const
{
return
device_context_
.
GetPlace
();
}
platform
::
Place
GetPlace
()
const
{
return
device_context_
->
GetPlace
();
}
const
platform
::
DeviceContext
&
device_context_
;
const
platform
::
DeviceContext
*
device_context_
;
};
class
OpKernel
{
...
...
@@ -311,7 +311,7 @@ class OperatorWithKernel : public OperatorBase {
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
final
{
auto
&
opKernel
=
AllOpKernels
().
at
(
type_
).
at
(
OpKernelKey
(
dev_ctx
));
opKernel
->
Compute
(
ExecutionContext
(
this
,
scope
,
dev_ctx
));
opKernel
->
Compute
(
ExecutionContext
(
this
,
scope
,
&
dev_ctx
));
}
static
std
::
unordered_map
<
std
::
string
/* op_type */
,
OpKernelMap
>&
...
...
paddle/function/nnpack/NNPACKConvOp.cpp
浏览文件 @
5ceb7d12
...
...
@@ -49,9 +49,7 @@ class NNPACKConvFunction : public ConvFunctionBase {
public:
void
init
(
const
FuncConfig
&
config
)
override
{
ConvFunctionBase
::
init
(
config
);
CHECK_EQ
(
groups_
,
(
size_t
)
1
);
algorithm_
=
get_nnp_convolution_algorithm
(
config
.
get
<
std
::
string
>
(
"algo"
));
// algorithm_ = nnp_convolution_algorithm_auto;
transform_strategy_
=
nnp_convolution_transform_strategy_compute
;
nnp_status
status
=
nnp_initialize
();
CHECK_EQ
(
status
,
nnp_status_success
);
...
...
@@ -67,8 +65,7 @@ public:
}
}
virtual
void
check
(
const
BufferArgs
&
inputs
,
const
BufferArgs
&
outputs
)
override
{
void
check
(
const
BufferArgs
&
inputs
,
const
BufferArgs
&
outputs
)
override
{
const
TensorShape
&
input
=
inputs
[
0
].
shape
();
const
TensorShape
&
filter
=
inputs
[
1
].
shape
();
const
TensorShape
&
output
=
outputs
[
0
].
shape
();
...
...
@@ -91,8 +88,8 @@ public:
size_t
filterHeight
=
getFilterHeight
(
filter
);
size_t
filterWidth
=
getFilterWidth
(
filter
);
size_t
outputChannels
=
output
[
1
];
//
size_t outputHeight = output[2];
//
size_t outputWidth = output[3];
size_t
outputHeight
=
output
[
2
];
size_t
outputWidth
=
output
[
3
];
nnp_size
inputSize
=
{.
width
=
inputWidth
,
.
height
=
inputHeight
};
nnp_padding
padding
=
{.
top
=
(
size_t
)
paddingH
(),
...
...
@@ -171,49 +168,58 @@ public:
}
}
size_t
inputOffset
=
inputChannels
/
groups_
*
inputHeight
*
inputWidth
;
size_t
outputOffset
=
outputChannels
/
groups_
*
outputHeight
*
outputWidth
;
size_t
filterOffset
=
filter
.
getElements
()
/
groups_
;
if
(
batchSize
==
1
)
{
nnp_status
status
=
nnp_convolution_inference
(
algorithm_
,
transform_strategy_
,
inputChannels
,
outputChannels
,
inputSize
,
padding
,
kernelSize
,
outputSubsampling
,
inputData
,
filterData
,
nullptr
,
/* bias */
outputData
,
bufferPtr
,
sizePtr
,
nnp_activation_identity
,
nullptr
,
threadpool_
,
/* threadpool */
nullptr
);
CHECK_EQ
(
status
,
nnp_status_success
);
for
(
size_t
g
=
0
;
g
<
groups_
;
g
++
)
{
nnp_status
status
=
nnp_convolution_inference
(
algorithm_
,
transform_strategy_
,
inputChannels
/
groups_
,
outputChannels
/
groups_
,
inputSize
,
padding
,
kernelSize
,
outputSubsampling
,
inputData
+
inputOffset
*
g
,
filterData
+
filterOffset
*
g
,
nullptr
,
/* bias */
outputData
+
outputOffset
*
g
,
bufferPtr
,
sizePtr
,
nnp_activation_identity
,
nullptr
,
threadpool_
,
/* threadpool */
nullptr
);
CHECK_EQ
(
status
,
nnp_status_success
);
}
}
else
{
// only supports stride = 1
CHECK_EQ
(
strideH
(),
1
);
CHECK_EQ
(
strideW
(),
1
);
nnp_status
status
=
nnp_convolution_output
(
algorithm_
,
batchSize
,
inputChannels
,
outputChannels
,
inputSize
,
padding
,
kernelSize
,
inputData
,
filterData
,
nullptr
,
/* bias */
outputData
,
bufferPtr
,
sizePtr
,
nnp_activation_identity
,
nullptr
,
threadpool_
,
/* threadpool */
nullptr
);
CHECK_EQ
(
status
,
nnp_status_success
);
for
(
size_t
g
=
0
;
g
<
groups_
;
g
++
)
{
// only supports stride = 1
CHECK_EQ
(
strideH
(),
1
);
CHECK_EQ
(
strideW
(),
1
);
nnp_status
status
=
nnp_convolution_output
(
algorithm_
,
batchSize
,
inputChannels
/
groups_
,
outputChannels
/
groups_
,
inputSize
,
padding
,
kernelSize
,
inputData
+
inputOffset
*
g
,
filterData
+
filterOffset
*
g
,
nullptr
,
/* bias */
outputData
+
outputOffset
*
g
,
bufferPtr
,
sizePtr
,
nnp_activation_identity
,
nullptr
,
threadpool_
,
/* threadpool */
nullptr
);
CHECK_EQ
(
status
,
nnp_status_success
);
}
}
}
...
...
paddle/gserver/layers/ExpandConvLayer.cpp
浏览文件 @
5ceb7d12
...
...
@@ -57,8 +57,7 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
convGradFilterType
=
"GemmConvGradFilter"
;
}
if
(
FLAGS_use_nnpack
)
{
CHECK_EQ
(
isDeconv_
,
false
);
if
(
FLAGS_use_nnpack
&&
!
isDeconv_
)
{
createFunction
(
forward_
,
"NNPACKConv"
,
FuncConfig
()
...
...
python/paddle/v2/framework/tests/op_test_util.py
浏览文件 @
5ceb7d12
...
...
@@ -33,23 +33,28 @@ class OpTestMeta(type):
for
place
in
places
:
for
in_name
in
func
.
all_input_args
:
if
hasattr
(
self
,
in_name
)
:
if
hasattr
(
self
,
"inputs"
)
and
in_name
in
self
.
inputs
:
kwargs
[
in_name
]
=
in_name
var
=
scope
.
new_var
(
in_name
).
get_tensor
()
arr
=
getattr
(
self
,
in_name
)
arr
=
self
.
inputs
[
in_name
]
var
.
set_dims
(
arr
.
shape
)
var
.
set
(
arr
,
place
)
else
:
kwargs
[
in_name
]
=
"@EMPTY@"
for
out_name
in
func
.
all_output_args
:
if
hasattr
(
self
,
out_name
):
kwargs
[
out_name
]
=
out_name
scope
.
new_var
(
out_name
).
get_tensor
()
if
not
hasattr
(
self
,
"outputs"
):
raise
ValueError
(
"The test op must set self.outputs dict."
)
if
out_name
not
in
self
.
outputs
:
raise
ValueError
(
"The %s is not in self.outputs dict."
%
(
out_name
))
kwargs
[
out_name
]
=
out_name
scope
.
new_var
(
out_name
).
get_tensor
()
for
attr_name
in
func
.
all_attr_args
:
if
hasattr
(
self
,
attr_name
)
:
kwargs
[
attr_name
]
=
getattr
(
self
,
attr_name
)
if
hasattr
(
self
,
"attrs"
)
and
attr_name
in
self
.
attrs
:
kwargs
[
attr_name
]
=
self
.
attrs
[
attr_name
]
op
=
func
(
**
kwargs
)
...
...
@@ -60,7 +65,7 @@ class OpTestMeta(type):
for
out_name
in
func
.
all_output_args
:
actual
=
numpy
.
array
(
scope
.
find_var
(
out_name
).
get_tensor
())
expect
=
getattr
(
self
,
out_name
)
expect
=
self
.
outputs
[
out_name
]
numpy
.
isclose
(
actual
,
expect
)
obj
.
test_all
=
test_all
...
...
python/paddle/v2/framework/tests/test_add_two_op.py
浏览文件 @
5ceb7d12
...
...
@@ -12,9 +12,11 @@ class TestAddOp(unittest.TestCase):
def
setUp
(
self
):
self
.
type
=
"add_two"
self
.
X
=
numpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
self
.
Y
=
numpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
self
.
Out
=
self
.
X
+
self
.
Y
self
.
inputs
=
{
'X'
:
numpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
),
'Y'
:
numpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
+
self
.
inputs
[
'Y'
]}
class
TestAddGradOp
(
unittest
.
TestCase
):
...
...
python/paddle/v2/framework/tests/test_cross_entropy_op.py
浏览文件 @
5ceb7d12
...
...
@@ -7,15 +7,17 @@ class TestSGD(unittest.TestCase):
__metaclass__
=
OpTestMeta
def
setUp
(
self
):
# TODO this unit test is not passed
self
.
type
=
"onehot_cross_entropy"
batch_size
=
100
class_num
=
10
self
.
X
=
numpy
.
random
.
random
((
batch_size
,
class_num
)).
astype
(
"float32"
)
self
.
label
=
5
*
numpy
.
ones
(
batch_size
).
astype
(
"int32"
)
X
=
numpy
.
random
.
random
((
batch_size
,
class_num
)).
astype
(
"float32"
)
label
=
5
*
numpy
.
ones
(
batch_size
).
astype
(
"int32"
)
self
.
inputs
=
{
'X'
:
X
,
'label'
:
label
}
Y
=
[]
for
i
in
range
(
0
,
batch_size
):
Y
.
append
(
-
numpy
.
log
(
self
.
X
[
i
][
self
.
label
[
i
]]))
self
.
Y
=
numpy
.
array
(
Y
).
astype
(
"float32"
)
Y
.
append
(
-
numpy
.
log
(
X
[
i
][
label
[
i
]]))
self
.
outputs
=
{
'Y'
:
numpy
.
array
(
Y
).
astype
(
"float32"
)}
# TODO(superjom) add gradient check
...
...
python/paddle/v2/framework/tests/test_mean_op.py
浏览文件 @
5ceb7d12
...
...
@@ -8,8 +8,8 @@ class TestMeanOp(unittest.TestCase):
def
setUp
(
self
):
self
.
type
=
"mean"
self
.
X
=
np
.
random
.
random
((
32
,
784
)).
astype
(
"float32"
)
self
.
Out
=
np
.
mean
(
self
.
X
)
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
784
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
np
.
mean
(
self
.
inputs
[
'X'
])}
if
__name__
==
'__main__'
:
...
...
python/paddle/v2/framework/tests/test_mul_op.py
浏览文件 @
5ceb7d12
...
...
@@ -8,9 +8,11 @@ class TestMulOp(unittest.TestCase):
def
setUp
(
self
):
self
.
type
=
"mul"
self
.
X
=
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
)
self
.
Y
=
np
.
random
.
random
((
84
,
100
)).
astype
(
"float32"
)
self
.
Out
=
np
.
dot
(
self
.
X
,
self
.
Y
)
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
84
,
100
)).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
np
.
dot
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'Y'
])}
if
__name__
==
'__main__'
:
...
...
python/paddle/v2/framework/tests/test_rowwise_add_op.py
浏览文件 @
5ceb7d12
...
...
@@ -8,9 +8,11 @@ class TestRowwiseAddOp(unittest.TestCase):
def
setUp
(
self
):
self
.
type
=
"rowwise_add"
self
.
X
=
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
)
self
.
b
=
np
.
random
.
random
(
84
).
astype
(
"float32"
)
self
.
Out
=
np
.
add
(
self
.
X
,
self
.
b
)
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
),
'b'
:
np
.
random
.
random
(
84
).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
np
.
add
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'b'
])}
if
__name__
==
'__main__'
:
...
...
python/paddle/v2/framework/tests/test_sgd_op.py
浏览文件 @
5ceb7d12
...
...
@@ -8,10 +8,13 @@ class TestSGD(unittest.TestCase):
def
setUp
(
self
):
self
.
type
=
"sgd"
self
.
param
=
numpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
self
.
grad
=
numpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
self
.
learning_rate
=
0.1
self
.
param_out
=
self
.
param
-
self
.
learning_rate
*
self
.
grad
w
=
numpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
g
=
numpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
lr
=
0.1
self
.
inputs
=
{
'param'
:
w
,
'grad'
:
g
}
self
.
attrs
=
{
'learning_rate'
:
lr
}
self
.
outputs
=
{
'param_out'
:
w
-
lr
*
g
}
if
__name__
==
"__main__"
:
...
...
python/paddle/v2/framework/tests/test_sigmoid_op.py
浏览文件 @
5ceb7d12
...
...
@@ -8,8 +8,8 @@ class TestSigmoidOp(unittest.TestCase):
def
setUp
(
self
):
self
.
type
=
"sigmoid"
self
.
X
=
np
.
random
.
random
((
32
,
100
)).
astype
(
"float32"
)
self
.
Y
=
1
/
(
1
+
np
.
exp
(
-
self
.
X
))
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
100
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Y'
:
1
/
(
1
+
np
.
exp
(
-
self
.
inputs
[
'X'
]))}
if
__name__
==
'__main__'
:
...
...
python/paddle/v2/framework/tests/test_softmax_op.py
浏览文件 @
5ceb7d12
...
...
@@ -19,8 +19,10 @@ class TestSoftmaxOp(unittest.TestCase):
def
setUp
(
self
):
self
.
type
=
"softmax"
self
.
X
=
np
.
random
.
random
((
32
,
100
)).
astype
(
"float32"
)
self
.
Y
=
np
.
apply_along_axis
(
stable_softmax
,
1
,
self
.
X
)
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
100
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Y'
:
np
.
apply_along_axis
(
stable_softmax
,
1
,
self
.
inputs
[
'X'
])
}
class
TestSoftmaxGradOp
(
unittest
.
TestCase
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录