Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
797a7184
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
797a7184
编写于
4月 06, 2018
作者:
Y
Yi Wang
提交者:
GitHub
4月 06, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Unify Fluid code to Google C++ style (#9685)
上级
d00bd9eb
变更
20
隐藏空白更改
内联
并排
Showing
20 changed file
with
52 addition
and
109 deletion
+52
-109
paddle/fluid/.clang-format
paddle/fluid/.clang-format
+0
-0
paddle/fluid/inference/io.cc
paddle/fluid/inference/io.cc
+3
-6
paddle/fluid/inference/io.h
paddle/fluid/inference/io.h
+1
-2
paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc
...e/fluid/inference/tests/book/test_inference_fit_a_line.cc
+2
-2
paddle/fluid/inference/tests/book/test_inference_image_classification.cc
...ference/tests/book/test_inference_image_classification.cc
+6
-8
paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc
...ference/tests/book/test_inference_label_semantic_roles.cc
+8
-24
paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc
...d/inference/tests/book/test_inference_recognize_digits.cc
+6
-8
paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc
...nference/tests/book/test_inference_rnn_encoder_decoder.cc
+4
-4
paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc
...ference/tests/book/test_inference_understand_sentiment.cc
+1
-3
paddle/fluid/inference/tests/test_helper.h
paddle/fluid/inference/tests/test_helper.h
+8
-17
paddle/fluid/memory/.clang-format
paddle/fluid/memory/.clang-format
+0
-5
paddle/fluid/operators/.clang-format
paddle/fluid/operators/.clang-format
+0
-5
paddle/fluid/platform/.clang-format
paddle/fluid/platform/.clang-format
+0
-5
paddle/fluid/pybind/.clang-format
paddle/fluid/pybind/.clang-format
+0
-5
paddle/fluid/recordio/chunk.cc
paddle/fluid/recordio/chunk.cc
+4
-4
paddle/fluid/recordio/chunk.h
paddle/fluid/recordio/chunk.h
+2
-2
paddle/fluid/recordio/header.h
paddle/fluid/recordio/header.h
+2
-2
paddle/fluid/recordio/scanner.h
paddle/fluid/recordio/scanner.h
+2
-2
paddle/fluid/recordio/writer.h
paddle/fluid/recordio/writer.h
+3
-4
paddle/fluid/string/.clang-format
paddle/fluid/string/.clang-format
+0
-1
未找到文件。
paddle/fluid/
framework/
.clang-format
→
paddle/fluid/.clang-format
浏览文件 @
797a7184
文件已移动
paddle/fluid/inference/io.cc
浏览文件 @
797a7184
...
...
@@ -41,8 +41,7 @@ bool IsPersistable(const framework::VarDesc* var) {
return
false
;
}
void
LoadPersistables
(
framework
::
Executor
&
executor
,
framework
::
Scope
&
scope
,
void
LoadPersistables
(
framework
::
Executor
&
executor
,
framework
::
Scope
&
scope
,
const
framework
::
ProgramDesc
&
main_program
,
const
std
::
string
&
dirname
,
const
std
::
string
&
param_filename
)
{
...
...
@@ -108,10 +107,8 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor& executor,
}
std
::
unique_ptr
<
framework
::
ProgramDesc
>
Load
(
framework
::
Executor
&
executor
,
framework
::
Scope
&
scope
,
const
std
::
string
&
prog_filename
,
const
std
::
string
&
param_filename
)
{
framework
::
Executor
&
executor
,
framework
::
Scope
&
scope
,
const
std
::
string
&
prog_filename
,
const
std
::
string
&
param_filename
)
{
std
::
string
model_filename
=
prog_filename
;
std
::
string
program_desc_str
;
ReadBinaryFile
(
model_filename
,
program_desc_str
);
...
...
paddle/fluid/inference/io.h
浏览文件 @
797a7184
...
...
@@ -24,8 +24,7 @@ limitations under the License. */
namespace
paddle
{
namespace
inference
{
void
LoadPersistables
(
framework
::
Executor
&
executor
,
framework
::
Scope
&
scope
,
void
LoadPersistables
(
framework
::
Executor
&
executor
,
framework
::
Scope
&
scope
,
const
framework
::
ProgramDesc
&
main_program
,
const
std
::
string
&
dirname
,
const
std
::
string
&
param_filename
);
...
...
paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc
浏览文件 @
797a7184
...
...
@@ -30,8 +30,8 @@ TEST(inference, fit_a_line) {
// The second dim of the input tensor should be 13
// The input data should be >= 0
int64_t
batch_size
=
10
;
SetupTensor
<
float
>
(
input
,
{
batch_size
,
13
},
static_cast
<
float
>
(
0
),
static_cast
<
float
>
(
10
));
SetupTensor
<
float
>
(
input
,
{
batch_size
,
13
},
static_cast
<
float
>
(
0
),
static_cast
<
float
>
(
10
));
std
::
vector
<
paddle
::
framework
::
LoDTensor
*>
cpu_feeds
;
cpu_feeds
.
push_back
(
&
input
);
...
...
paddle/fluid/inference/tests/book/test_inference_image_classification.cc
浏览文件 @
797a7184
...
...
@@ -35,10 +35,8 @@ TEST(inference, image_classification) {
paddle
::
framework
::
LoDTensor
input
;
// Use normilized image pixels as input data,
// which should be in the range [0.0, 1.0].
SetupTensor
<
float
>
(
input
,
{
FLAGS_batch_size
,
3
,
32
,
32
},
static_cast
<
float
>
(
0
),
static_cast
<
float
>
(
1
));
SetupTensor
<
float
>
(
input
,
{
FLAGS_batch_size
,
3
,
32
,
32
},
static_cast
<
float
>
(
0
),
static_cast
<
float
>
(
1
));
std
::
vector
<
paddle
::
framework
::
LoDTensor
*>
cpu_feeds
;
cpu_feeds
.
push_back
(
&
input
);
...
...
@@ -48,8 +46,8 @@ TEST(inference, image_classification) {
// Run inference on CPU
LOG
(
INFO
)
<<
"--- CPU Runs: ---"
;
TestInference
<
paddle
::
platform
::
CPUPlace
>
(
dirname
,
cpu_feeds
,
cpu_fetchs1
,
FLAGS_repeat
);
TestInference
<
paddle
::
platform
::
CPUPlace
>
(
dirname
,
cpu_feeds
,
cpu_fetchs1
,
FLAGS_repeat
);
LOG
(
INFO
)
<<
output1
.
dims
();
#ifdef PADDLE_WITH_CUDA
...
...
@@ -59,8 +57,8 @@ TEST(inference, image_classification) {
// Run inference on CUDA GPU
LOG
(
INFO
)
<<
"--- GPU Runs: ---"
;
TestInference
<
paddle
::
platform
::
CUDAPlace
>
(
dirname
,
cpu_feeds
,
cpu_fetchs2
,
FLAGS_repeat
);
TestInference
<
paddle
::
platform
::
CUDAPlace
>
(
dirname
,
cpu_feeds
,
cpu_fetchs2
,
FLAGS_repeat
);
LOG
(
INFO
)
<<
output2
.
dims
();
CheckError
<
float
>
(
output1
,
output2
);
...
...
paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc
浏览文件 @
797a7184
...
...
@@ -36,37 +36,21 @@ TEST(inference, label_semantic_roles) {
int64_t
predicate_dict_len
=
3162
;
int64_t
mark_dict_len
=
2
;
SetupLoDTensor
(
word
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
word
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
word_dict_len
-
1
));
SetupLoDTensor
(
predicate
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
predicate
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
predicate_dict_len
-
1
));
SetupLoDTensor
(
ctx_n2
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
ctx_n2
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
word_dict_len
-
1
));
SetupLoDTensor
(
ctx_n1
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
ctx_n1
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
word_dict_len
-
1
));
SetupLoDTensor
(
ctx_0
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
ctx_0
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
word_dict_len
-
1
));
SetupLoDTensor
(
ctx_p1
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
ctx_p1
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
word_dict_len
-
1
));
SetupLoDTensor
(
ctx_p2
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
ctx_p2
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
word_dict_len
-
1
));
SetupLoDTensor
(
mark
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
mark
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
mark_dict_len
-
1
));
std
::
vector
<
paddle
::
framework
::
LoDTensor
*>
cpu_feeds
;
...
...
paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc
浏览文件 @
797a7184
...
...
@@ -35,10 +35,8 @@ TEST(inference, recognize_digits) {
paddle
::
framework
::
LoDTensor
input
;
// Use normilized image pixels as input data,
// which should be in the range [-1.0, 1.0].
SetupTensor
<
float
>
(
input
,
{
FLAGS_batch_size
,
1
,
28
,
28
},
static_cast
<
float
>
(
-
1
),
static_cast
<
float
>
(
1
));
SetupTensor
<
float
>
(
input
,
{
FLAGS_batch_size
,
1
,
28
,
28
},
static_cast
<
float
>
(
-
1
),
static_cast
<
float
>
(
1
));
std
::
vector
<
paddle
::
framework
::
LoDTensor
*>
cpu_feeds
;
cpu_feeds
.
push_back
(
&
input
);
...
...
@@ -49,8 +47,8 @@ TEST(inference, recognize_digits) {
// Run inference on CPU
LOG
(
INFO
)
<<
"--- CPU Runs: is_combined="
<<
is_combined
<<
" ---"
;
TestInference
<
paddle
::
platform
::
CPUPlace
>
(
dirname
,
cpu_feeds
,
cpu_fetchs1
,
FLAGS_repeat
,
is_combined
);
TestInference
<
paddle
::
platform
::
CPUPlace
>
(
dirname
,
cpu_feeds
,
cpu_fetchs1
,
FLAGS_repeat
,
is_combined
);
LOG
(
INFO
)
<<
output1
.
dims
();
#ifdef PADDLE_WITH_CUDA
...
...
@@ -60,8 +58,8 @@ TEST(inference, recognize_digits) {
// Run inference on CUDA GPU
LOG
(
INFO
)
<<
"--- GPU Runs: is_combined="
<<
is_combined
<<
" ---"
;
TestInference
<
paddle
::
platform
::
CUDAPlace
>
(
dirname
,
cpu_feeds
,
cpu_fetchs2
,
FLAGS_repeat
,
is_combined
);
TestInference
<
paddle
::
platform
::
CUDAPlace
>
(
dirname
,
cpu_feeds
,
cpu_fetchs2
,
FLAGS_repeat
,
is_combined
);
LOG
(
INFO
)
<<
output2
.
dims
();
CheckError
<
float
>
(
output1
,
output2
);
...
...
paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc
浏览文件 @
797a7184
...
...
@@ -32,10 +32,10 @@ TEST(inference, rnn_encoder_decoder) {
paddle
::
framework
::
LoDTensor
word_data
,
trg_word
;
paddle
::
framework
::
LoD
lod
{{
0
,
4
,
10
}};
SetupLoDTensor
(
word_data
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
1
));
SetupLoDTensor
(
trg_word
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
1
));
SetupLoDTensor
(
word_data
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
1
));
SetupLoDTensor
(
trg_word
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
1
));
std
::
vector
<
paddle
::
framework
::
LoDTensor
*>
cpu_feeds
;
cpu_feeds
.
push_back
(
&
word_data
);
...
...
paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc
浏览文件 @
797a7184
...
...
@@ -33,9 +33,7 @@ TEST(inference, understand_sentiment) {
paddle
::
framework
::
LoD
lod
{{
0
,
4
,
10
}};
int64_t
word_dict_len
=
5147
;
SetupLoDTensor
(
words
,
lod
,
static_cast
<
int64_t
>
(
0
),
SetupLoDTensor
(
words
,
lod
,
static_cast
<
int64_t
>
(
0
),
static_cast
<
int64_t
>
(
word_dict_len
-
1
));
std
::
vector
<
paddle
::
framework
::
LoDTensor
*>
cpu_feeds
;
...
...
paddle/fluid/inference/tests/test_helper.h
浏览文件 @
797a7184
...
...
@@ -19,9 +19,7 @@ limitations under the License. */
template
<
typename
T
>
void
SetupTensor
(
paddle
::
framework
::
LoDTensor
&
input
,
paddle
::
framework
::
DDim
dims
,
T
lower
,
T
upper
)
{
paddle
::
framework
::
DDim
dims
,
T
lower
,
T
upper
)
{
srand
(
time
(
0
));
T
*
input_ptr
=
input
.
mutable_data
<
T
>
(
dims
,
paddle
::
platform
::
CPUPlace
());
for
(
int
i
=
0
;
i
<
input
.
numel
();
++
i
)
{
...
...
@@ -33,8 +31,7 @@ void SetupTensor(paddle::framework::LoDTensor& input,
template
<
typename
T
>
void
SetupTensor
(
paddle
::
framework
::
LoDTensor
&
input
,
paddle
::
framework
::
DDim
dims
,
std
::
vector
<
T
>&
data
)
{
paddle
::
framework
::
DDim
dims
,
std
::
vector
<
T
>&
data
)
{
CHECK_EQ
(
paddle
::
framework
::
product
(
dims
),
static_cast
<
int64_t
>
(
data
.
size
()));
T
*
input_ptr
=
input
.
mutable_data
<
T
>
(
dims
,
paddle
::
platform
::
CPUPlace
());
memcpy
(
input_ptr
,
data
.
data
(),
input
.
numel
()
*
sizeof
(
T
));
...
...
@@ -42,9 +39,7 @@ void SetupTensor(paddle::framework::LoDTensor& input,
template
<
typename
T
>
void
SetupLoDTensor
(
paddle
::
framework
::
LoDTensor
&
input
,
paddle
::
framework
::
LoD
&
lod
,
T
lower
,
T
upper
)
{
paddle
::
framework
::
LoD
&
lod
,
T
lower
,
T
upper
)
{
input
.
set_lod
(
lod
);
int
dim
=
lod
[
0
][
lod
[
0
].
size
()
-
1
];
SetupTensor
<
T
>
(
input
,
{
dim
,
1
},
lower
,
upper
);
...
...
@@ -52,8 +47,7 @@ void SetupLoDTensor(paddle::framework::LoDTensor& input,
template
<
typename
T
>
void
SetupLoDTensor
(
paddle
::
framework
::
LoDTensor
&
input
,
paddle
::
framework
::
DDim
dims
,
paddle
::
framework
::
LoD
lod
,
paddle
::
framework
::
DDim
dims
,
paddle
::
framework
::
LoD
lod
,
std
::
vector
<
T
>&
data
)
{
const
size_t
level
=
lod
.
size
()
-
1
;
CHECK_EQ
(
dims
[
0
],
static_cast
<
int64_t
>
((
lod
[
level
]).
back
()));
...
...
@@ -92,8 +86,7 @@ template <typename Place>
void
TestInference
(
const
std
::
string
&
dirname
,
const
std
::
vector
<
paddle
::
framework
::
LoDTensor
*>&
cpu_feeds
,
std
::
vector
<
paddle
::
framework
::
LoDTensor
*>&
cpu_fetchs
,
const
int
repeat
=
1
,
const
bool
is_combined
=
false
)
{
const
int
repeat
=
1
,
const
bool
is_combined
=
false
)
{
// 1. Define place, executor, scope
auto
place
=
Place
();
auto
executor
=
paddle
::
framework
::
Executor
(
place
);
...
...
@@ -132,11 +125,9 @@ void TestInference(const std::string& dirname,
// `fluid.io.save_inference_model`.
std
::
string
prog_filename
=
"__model_combined__"
;
std
::
string
param_filename
=
"__params_combined__"
;
inference_program
=
paddle
::
inference
::
Load
(
executor
,
*
scope
,
dirname
+
"/"
+
prog_filename
,
dirname
+
"/"
+
param_filename
);
inference_program
=
paddle
::
inference
::
Load
(
executor
,
*
scope
,
dirname
+
"/"
+
prog_filename
,
dirname
+
"/"
+
param_filename
);
}
else
{
// Parameters are saved in separate files sited in the specified
// `dirname`.
...
...
paddle/fluid/memory/.clang-format
已删除
100644 → 0
浏览文件 @
d00bd9eb
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
paddle/fluid/operators/.clang-format
已删除
100644 → 0
浏览文件 @
d00bd9eb
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
paddle/fluid/platform/.clang-format
已删除
100644 → 0
浏览文件 @
d00bd9eb
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
paddle/fluid/pybind/.clang-format
已删除
100644 → 0
浏览文件 @
d00bd9eb
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
paddle/fluid/recordio/chunk.cc
浏览文件 @
797a7184
...
...
@@ -58,8 +58,8 @@ static void ReadStreamByBuf(std::istream& in, size_t limit, Callback callback) {
* Copy stream in to another stream
*/
static
void
PipeStream
(
std
::
istream
&
in
,
std
::
ostream
&
os
)
{
ReadStreamByBuf
(
in
,
0
,
[
&
os
](
const
char
*
buf
,
size_t
len
)
{
os
.
write
(
buf
,
len
);
});
ReadStreamByBuf
(
in
,
0
,
[
&
os
](
const
char
*
buf
,
size_t
len
)
{
os
.
write
(
buf
,
len
);
});
}
/**
...
...
@@ -68,8 +68,8 @@ static void PipeStream(std::istream& in, std::ostream& os) {
static
uint32_t
Crc32Stream
(
std
::
istream
&
in
,
size_t
limit
=
0
)
{
uint32_t
crc
=
static_cast
<
uint32_t
>
(
crc32
(
0
,
nullptr
,
0
));
ReadStreamByBuf
(
in
,
limit
,
[
&
crc
](
const
char
*
buf
,
size_t
len
)
{
crc
=
static_cast
<
uint32_t
>
(
crc32
(
crc
,
reinterpret_cast
<
const
Bytef
*>
(
buf
),
static_cast
<
uInt
>
(
len
)));
crc
=
static_cast
<
uint32_t
>
(
crc32
(
crc
,
reinterpret_cast
<
const
Bytef
*>
(
buf
),
static_cast
<
uInt
>
(
len
)));
});
return
crc
;
}
...
...
paddle/fluid/recordio/chunk.h
浏览文件 @
797a7184
...
...
@@ -24,7 +24,7 @@ namespace recordio {
// A Chunk contains the Header and optionally compressed records.
class
Chunk
{
public:
public:
Chunk
()
:
num_bytes_
(
0
)
{}
void
Add
(
const
std
::
string
&
buf
)
{
num_bytes_
+=
buf
.
size
();
...
...
@@ -46,7 +46,7 @@ public:
bool
Empty
()
const
{
return
records_
.
empty
();
}
private:
private:
std
::
vector
<
std
::
string
>
records_
;
// sum of record lengths in bytes.
size_t
num_bytes_
;
...
...
paddle/fluid/recordio/header.h
浏览文件 @
797a7184
...
...
@@ -37,7 +37,7 @@ enum class Compressor : uint32_t {
// Header is the metadata of Chunk
class
Header
{
public:
public:
Header
();
Header
(
uint32_t
num
,
uint32_t
sum
,
Compressor
ct
,
uint32_t
cs
);
...
...
@@ -51,7 +51,7 @@ public:
Compressor
CompressType
()
const
{
return
compressor_
;
}
uint32_t
CompressSize
()
const
{
return
compress_size_
;
}
private:
private:
uint32_t
num_records_
;
uint32_t
checksum_
;
Compressor
compressor_
;
...
...
paddle/fluid/recordio/scanner.h
浏览文件 @
797a7184
...
...
@@ -21,7 +21,7 @@ namespace paddle {
namespace
recordio
{
class
Scanner
{
public:
public:
explicit
Scanner
(
std
::
unique_ptr
<
std
::
istream
>&&
stream
);
explicit
Scanner
(
const
std
::
string
&
filename
);
...
...
@@ -32,7 +32,7 @@ public:
bool
HasNext
()
const
;
private:
private:
std
::
unique_ptr
<
std
::
istream
>
stream_
;
Chunk
cur_chunk_
;
size_t
offset_
;
...
...
paddle/fluid/recordio/writer.h
浏览文件 @
797a7184
...
...
@@ -18,9 +18,8 @@ namespace paddle {
namespace
recordio
{
class
Writer
{
public:
Writer
(
std
::
ostream
*
sout
,
Compressor
compressor
,
public:
Writer
(
std
::
ostream
*
sout
,
Compressor
compressor
,
size_t
max_num_records_in_chunk
=
1000
)
:
stream_
(
*
sout
),
max_num_records_in_chunk_
(
max_num_records_in_chunk
),
...
...
@@ -32,7 +31,7 @@ public:
~
Writer
();
private:
private:
std
::
ostream
&
stream_
;
size_t
max_num_records_in_chunk_
;
Chunk
cur_chunk_
;
...
...
paddle/fluid/string/.clang-format
已删除
120000 → 0
浏览文件 @
d00bd9eb
../framework/.clang-format
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录