Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
xxadev
tensorflow
提交
809f9e7d
T
tensorflow
项目概览
xxadev
/
tensorflow
与 Fork 源项目一致
从无法访问的项目Fork
通知
3
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
tensorflow
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
809f9e7d
编写于
3月 17, 2017
作者:
B
Benoit Steiner
提交者:
TensorFlower Gardener
3月 17, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Moved the function that converts metagraphdef into grappler item to its own
file Change: 150498236
上级
0212e49a
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
270 addition
and
206 deletion
+270
-206
tensorflow/core/grappler/BUILD
tensorflow/core/grappler/BUILD
+6
-1
tensorflow/core/grappler/grappler_item.cc
tensorflow/core/grappler/grappler_item.cc
+0
-187
tensorflow/core/grappler/grappler_item.h
tensorflow/core/grappler/grappler_item.h
+0
-18
tensorflow/core/grappler/grappler_item_builder.cc
tensorflow/core/grappler/grappler_item_builder.cc
+217
-0
tensorflow/core/grappler/grappler_item_builder.h
tensorflow/core/grappler/grappler_item_builder.h
+47
-0
未找到文件。
tensorflow/core/grappler/BUILD
浏览文件 @
809f9e7d
...
...
@@ -64,9 +64,13 @@ cc_library(
cc_library
(
name
=
"grappler_item"
,
srcs
=
[
"grappler_item.cc"
,
],
hdrs
=
[
"grappler_item.h"
],
visibility
=
[
"//visibility:public"
],
deps
=
[
":utils"
,
"//tensorflow/core:framework"
,
"//tensorflow/core:protos_all_cc"
,
],
...
...
@@ -75,8 +79,9 @@ cc_library(
cc_library
(
name
=
"grappler_item_builder"
,
srcs
=
[
"grappler_item.cc"
,
"grappler_item
_builder
.cc"
,
],
hdrs
=
[
"grappler_item_builder.h"
],
visibility
=
[
"//visibility:public"
],
deps
=
[
":grappler_item"
,
...
...
tensorflow/core/grappler/grappler_item.cc
浏览文件 @
809f9e7d
...
...
@@ -19,199 +19,12 @@ limitations under the License.
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/variable.pb.h"
#include "tensorflow/core/grappler/inputs/utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace
tensorflow
{
namespace
grappler
{
namespace
{
void
InitializeTensor
(
DataType
type
,
Tensor
*
tensor
)
{
const
int
period
=
7
;
if
(
type
==
DT_FLOAT
)
{
auto
flat
=
tensor
->
flat
<
float
>
();
// Populate numbers 0, 0.1, 0.2, ..., 0.5, 0.6, 0, 0.1, 0.2, ...
for
(
int
i
=
0
;
i
<
flat
.
size
();
i
++
)
{
flat
(
i
)
=
static_cast
<
float
>
(
i
%
period
)
/
10.0
f
;
}
}
else
if
(
type
==
DT_INT64
)
{
auto
flat
=
tensor
->
flat
<
int64
>
();
// Populate numbers 0, 1, 2, ..., 5, 6, 0, 1, 2, ...
for
(
int
i
=
0
;
i
<
flat
.
size
();
i
++
)
{
flat
(
i
)
=
i
%
period
;
}
}
else
{
memset
(
const_cast
<
char
*>
(
tensor
->
tensor_data
().
data
()),
0
,
tensor
->
tensor_data
().
size
());
}
}
}
// namespace
// static
std
::
unique_ptr
<
GrapplerItem
>
GrapplerItem
::
FromMetaGraphDef
(
const
string
&
id
,
const
MetaGraphDef
&
meta_graph
,
const
ItemConfig
&
cfg
)
{
if
(
id
.
empty
())
{
LOG
(
ERROR
)
<<
"id must be non-empty."
;
return
nullptr
;
}
std
::
unique_ptr
<
GrapplerItem
>
new_item
(
new
GrapplerItem
());
new_item
->
id
=
id
;
new_item
->
graph
=
meta_graph
.
graph_def
();
// Attempt to detect the fetch node(s).
if
(
meta_graph
.
collection_def
().
count
(
"train_op"
)
>
0
)
{
const
CollectionDef
&
nodes
=
meta_graph
.
collection_def
().
at
(
"train_op"
);
if
(
nodes
.
has_node_list
())
{
for
(
const
auto
&
node
:
nodes
.
node_list
().
value
())
{
const
string
name
=
NodeName
(
node
);
if
(
name
.
empty
())
{
LOG
(
ERROR
)
<<
"Invalid fetch node name "
<<
node
<<
", skipping this input"
;
return
nullptr
;
}
LOG
(
INFO
)
<<
"Will use fetch node "
<<
name
;
new_item
->
fetch
.
push_back
(
name
);
}
}
}
if
(
new_item
->
fetch
.
empty
())
{
LOG
(
ERROR
)
<<
"Failed to detect the fetch node(s), skipping this input"
;
return
nullptr
;
}
for
(
auto
&
node
:
*
new_item
->
graph
.
mutable_node
())
{
// Delete user specified placement if requested.
if
(
cfg
.
ignore_user_placement
)
{
node
.
clear_device
();
}
if
(
node
.
op
()
==
"Placeholder"
||
node
.
op
()
==
"PlaceholderV2"
)
{
if
(
node
.
attr
().
count
(
"dtype"
)
==
0
)
{
LOG
(
ERROR
)
<<
"Unknown type for placeholder "
<<
node
.
name
()
<<
", skipping this input"
;
return
nullptr
;
}
DataType
type
=
node
.
attr
().
at
(
"dtype"
).
type
();
if
(
node
.
attr
().
count
(
"shape"
)
==
0
)
{
LOG
(
INFO
)
<<
"Unknown shape for placeholder "
<<
node
.
name
()
<<
", skipping this input"
;
return
nullptr
;
}
TensorShape
shape
(
node
.
attr
().
at
(
"shape"
).
shape
());
// Some placeholder nodes have a mis-match between the node
// attribute "shape" and a different node attribute "_output_shapes".
// Specifically, a shape with shape.dims() == 0 could indicate either
// a scalar or an unknown shape. In those cases, we check _output_shapes
// for additional information.
// This case is observed in the bnmt graphs. Have not observed any
// cases where there was more than 1 _output_shapes, so limit it
// to cases where there is only 1 _output_shapes.
// We only do this if cfg.placeholder_unknown_output_shape_dim has
// been set to avoid crashing non-BNMT graphs.
if
((
cfg
.
placeholder_unknown_output_shape_dim
>=
0
)
&&
(
shape
.
dims
()
==
0
)
&&
(
node
.
attr
().
count
(
"_output_shapes"
)
==
1
)
&&
(
node
.
attr
().
at
(
"_output_shapes"
).
list
().
shape
(
0
).
dim_size
()
!=
0
))
{
shape
.
Clear
();
for
(
int
dim_i
=
0
;
dim_i
<
node
.
attr
().
at
(
"_output_shapes"
).
list
().
shape
(
0
).
dim_size
();
dim_i
++
)
{
const
::
tensorflow
::
TensorShapeProto_Dim
dim
=
node
.
attr
().
at
(
"_output_shapes"
).
list
().
shape
(
0
).
dim
(
dim_i
);
if
(
dim
.
size
()
==
-
1
)
{
shape
.
AddDim
(
cfg
.
placeholder_unknown_output_shape_dim
);
}
else
{
shape
.
AddDim
(
node
.
attr
()
.
at
(
"_output_shapes"
)
.
list
()
.
shape
(
0
)
.
dim
(
dim_i
)
.
size
());
}
}
}
Tensor
fake_input
(
type
,
shape
);
InitializeTensor
(
type
,
&
fake_input
);
new_item
->
feed
.
emplace_back
(
node
.
name
(),
fake_input
);
}
if
(
cfg
.
ignore_colocation
)
{
auto
attr
=
node
.
mutable_attr
();
auto
it
=
attr
->
find
(
"_class"
);
if
(
it
!=
attr
->
end
())
{
attr
->
erase
(
it
);
}
}
}
for
(
const
string
&
var_collection
:
{
"variables"
,
"local_variables"
,
"model_variables"
,
"trainable_variables"
})
{
if
(
meta_graph
.
collection_def
().
count
(
var_collection
)
==
0
)
{
continue
;
}
const
CollectionDef
&
vars
=
meta_graph
.
collection_def
().
at
(
var_collection
);
for
(
const
auto
&
raw_var
:
vars
.
bytes_list
().
value
())
{
VariableDef
var
;
var
.
ParseFromString
(
raw_var
);
if
(
!
var
.
initializer_name
().
empty
())
{
new_item
->
init_ops
.
push_back
(
var
.
initializer_name
());
}
}
}
if
(
meta_graph
.
collection_def
().
count
(
"table_initializer"
)
>
0
)
{
const
CollectionDef
&
inits
=
meta_graph
.
collection_def
().
at
(
"table_initializer"
);
if
(
inits
.
has_node_list
())
{
for
(
const
auto
&
node
:
inits
.
node_list
().
value
())
{
new_item
->
init_ops
.
push_back
(
node
);
}
}
}
if
(
meta_graph
.
collection_def
().
count
(
"queue_runners"
)
>
0
)
{
const
CollectionDef
&
vars
=
meta_graph
.
collection_def
().
at
(
"queue_runners"
);
for
(
const
auto
&
raw
:
vars
.
bytes_list
().
value
())
{
QueueRunnerDef
queue_runner
;
if
(
!
queue_runner
.
ParseFromString
(
raw
))
{
LOG
(
ERROR
)
<<
"Could parse queue_runners, skipping this input"
;
return
nullptr
;
}
if
(
queue_runner
.
cancel_op_name
().
empty
())
{
LOG
(
ERROR
)
<<
"Queue without a cancel op, skipping this input"
;
return
nullptr
;
}
new_item
->
queue_runners
.
push_back
(
queue_runner
);
}
}
// Make sure we still can access the input files (aka "asset_filepaths") since
// these might have been moved or deleted, the cns cell might have been shut
// down, or we might be running as a user who does not have access to the
// files.
if
(
meta_graph
.
collection_def
().
count
(
"asset_filepaths"
)
>
0
)
{
const
CollectionDef
&
file_paths
=
meta_graph
.
collection_def
().
at
(
"asset_filepaths"
);
std
::
vector
<
string
>
paths
;
for
(
const
auto
&
raw_path
:
file_paths
.
bytes_list
().
value
())
{
paths
.
push_back
(
raw_path
);
}
if
(
!
FilesExist
(
paths
,
nullptr
))
{
LOG
(
ERROR
)
<<
"Can't access one or more of the asset files, skipping this input"
;
return
nullptr
;
}
}
return
new_item
;
}
std
::
vector
<
const
NodeDef
*>
GrapplerItem
::
MainOpsFanin
()
const
{
return
ComputeTransitiveFanin
(
graph
,
fetch
);
}
...
...
tensorflow/core/grappler/grappler_item.h
浏览文件 @
809f9e7d
...
...
@@ -26,31 +26,13 @@ limitations under the License.
#include "tensorflow/core/protobuf/queue_runner.pb.h"
namespace
tensorflow
{
class
MetaGraphDef
;
namespace
grappler
{
struct
ItemConfig
{
// If true, ignore all user specified node placement.
bool
ignore_user_placement
=
true
;
// If true, ignore all user specified colocation attributes.
bool
ignore_colocation
=
true
;
// Dimension to use if a placeholder node has an _output_shapes attribute with
// a dimension of -1.
int32
placeholder_unknown_output_shape_dim
=
-
1
;
};
// A TensorFlow model to optimize.
// Models are represented by the combination of a graph, one of more fetch
// nodes, and potentially a set of nodes to feed.
// TODO(volunteer_needed): turn this struct into a class.
struct
GrapplerItem
{
// Factory method for creating a GrapplerItem from a MetaGraphDef.
// Returns nullptr if the given meta_graph cannot be converted.
static
std
::
unique_ptr
<
GrapplerItem
>
FromMetaGraphDef
(
const
string
&
id
,
const
MetaGraphDef
&
meta_graph
,
const
ItemConfig
&
cfg
);
string
id
;
// A unique id for this item
// Inputs
...
...
tensorflow/core/grappler/grappler_item_builder.cc
0 → 100644
浏览文件 @
809f9e7d
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variable.pb.h"
#include "tensorflow/core/grappler/inputs/utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace
tensorflow
{
namespace
grappler
{
namespace
{
void
InitializeTensor
(
DataType
type
,
Tensor
*
tensor
)
{
const
int
period
=
7
;
if
(
type
==
DT_FLOAT
)
{
auto
flat
=
tensor
->
flat
<
float
>
();
// Populate numbers 0, 0.1, 0.2, ..., 0.5, 0.6, 0, 0.1, 0.2, ...
for
(
int
i
=
0
;
i
<
flat
.
size
();
i
++
)
{
flat
(
i
)
=
static_cast
<
float
>
(
i
%
period
)
/
10.0
f
;
}
}
else
if
(
type
==
DT_INT64
)
{
auto
flat
=
tensor
->
flat
<
int64
>
();
// Populate numbers 0, 1, 2, ..., 5, 6, 0, 1, 2, ...
for
(
int
i
=
0
;
i
<
flat
.
size
();
i
++
)
{
flat
(
i
)
=
i
%
period
;
}
}
else
{
memset
(
const_cast
<
char
*>
(
tensor
->
tensor_data
().
data
()),
0
,
tensor
->
tensor_data
().
size
());
}
}
}
// namespace
// static
std
::
unique_ptr
<
GrapplerItem
>
GrapplerItemFromMetaGraphDef
(
const
string
&
id
,
const
MetaGraphDef
&
meta_graph
,
const
ItemConfig
&
cfg
)
{
if
(
id
.
empty
())
{
LOG
(
ERROR
)
<<
"id must be non-empty."
;
return
nullptr
;
}
std
::
unique_ptr
<
GrapplerItem
>
new_item
(
new
GrapplerItem
());
new_item
->
id
=
id
;
new_item
->
graph
=
meta_graph
.
graph_def
();
// Attempt to detect the fetch node(s).
if
(
meta_graph
.
collection_def
().
count
(
"train_op"
)
>
0
)
{
const
CollectionDef
&
nodes
=
meta_graph
.
collection_def
().
at
(
"train_op"
);
if
(
nodes
.
has_node_list
())
{
for
(
const
auto
&
node
:
nodes
.
node_list
().
value
())
{
const
string
name
=
NodeName
(
node
);
if
(
name
.
empty
())
{
LOG
(
ERROR
)
<<
"Invalid fetch node name "
<<
node
<<
", skipping this input"
;
return
nullptr
;
}
LOG
(
INFO
)
<<
"Will use fetch node "
<<
name
;
new_item
->
fetch
.
push_back
(
name
);
}
}
}
if
(
new_item
->
fetch
.
empty
())
{
LOG
(
ERROR
)
<<
"Failed to detect the fetch node(s), skipping this input"
;
return
nullptr
;
}
for
(
auto
&
node
:
*
new_item
->
graph
.
mutable_node
())
{
// Delete user specified placement if requested.
if
(
cfg
.
ignore_user_placement
)
{
node
.
clear_device
();
}
if
(
node
.
op
()
==
"Placeholder"
||
node
.
op
()
==
"PlaceholderV2"
)
{
if
(
node
.
attr
().
count
(
"dtype"
)
==
0
)
{
LOG
(
ERROR
)
<<
"Unknown type for placeholder "
<<
node
.
name
()
<<
", skipping this input"
;
return
nullptr
;
}
DataType
type
=
node
.
attr
().
at
(
"dtype"
).
type
();
if
(
node
.
attr
().
count
(
"shape"
)
==
0
)
{
LOG
(
INFO
)
<<
"Unknown shape for placeholder "
<<
node
.
name
()
<<
", skipping this input"
;
return
nullptr
;
}
TensorShape
shape
(
node
.
attr
().
at
(
"shape"
).
shape
());
// Some placeholder nodes have a mis-match between the node
// attribute "shape" and a different node attribute "_output_shapes".
// Specifically, a shape with shape.dims() == 0 could indicate either
// a scalar or an unknown shape. In those cases, we check _output_shapes
// for additional information.
// This case is observed in the bnmt graphs. Have not observed any
// cases where there was more than 1 _output_shapes, so limit it
// to cases where there is only 1 _output_shapes.
// We only do this if cfg.placeholder_unknown_output_shape_dim has
// been set to avoid crashing non-BNMT graphs.
if
((
cfg
.
placeholder_unknown_output_shape_dim
>=
0
)
&&
(
shape
.
dims
()
==
0
)
&&
(
node
.
attr
().
count
(
"_output_shapes"
)
==
1
)
&&
(
node
.
attr
().
at
(
"_output_shapes"
).
list
().
shape
(
0
).
dim_size
()
!=
0
))
{
shape
.
Clear
();
for
(
int
dim_i
=
0
;
dim_i
<
node
.
attr
().
at
(
"_output_shapes"
).
list
().
shape
(
0
).
dim_size
();
dim_i
++
)
{
const
::
tensorflow
::
TensorShapeProto_Dim
dim
=
node
.
attr
().
at
(
"_output_shapes"
).
list
().
shape
(
0
).
dim
(
dim_i
);
if
(
dim
.
size
()
==
-
1
)
{
shape
.
AddDim
(
cfg
.
placeholder_unknown_output_shape_dim
);
}
else
{
shape
.
AddDim
(
node
.
attr
()
.
at
(
"_output_shapes"
)
.
list
()
.
shape
(
0
)
.
dim
(
dim_i
)
.
size
());
}
}
}
Tensor
fake_input
(
type
,
shape
);
InitializeTensor
(
type
,
&
fake_input
);
new_item
->
feed
.
emplace_back
(
node
.
name
(),
fake_input
);
}
if
(
cfg
.
ignore_colocation
)
{
auto
attr
=
node
.
mutable_attr
();
auto
it
=
attr
->
find
(
"_class"
);
if
(
it
!=
attr
->
end
())
{
attr
->
erase
(
it
);
}
}
}
for
(
const
string
&
var_collection
:
{
"variables"
,
"local_variables"
,
"model_variables"
,
"trainable_variables"
})
{
if
(
meta_graph
.
collection_def
().
count
(
var_collection
)
==
0
)
{
continue
;
}
const
CollectionDef
&
vars
=
meta_graph
.
collection_def
().
at
(
var_collection
);
for
(
const
auto
&
raw_var
:
vars
.
bytes_list
().
value
())
{
VariableDef
var
;
var
.
ParseFromString
(
raw_var
);
if
(
!
var
.
initializer_name
().
empty
())
{
new_item
->
init_ops
.
push_back
(
var
.
initializer_name
());
}
}
}
if
(
meta_graph
.
collection_def
().
count
(
"table_initializer"
)
>
0
)
{
const
CollectionDef
&
inits
=
meta_graph
.
collection_def
().
at
(
"table_initializer"
);
if
(
inits
.
has_node_list
())
{
for
(
const
auto
&
node
:
inits
.
node_list
().
value
())
{
new_item
->
init_ops
.
push_back
(
node
);
}
}
}
if
(
meta_graph
.
collection_def
().
count
(
"queue_runners"
)
>
0
)
{
const
CollectionDef
&
vars
=
meta_graph
.
collection_def
().
at
(
"queue_runners"
);
for
(
const
auto
&
raw
:
vars
.
bytes_list
().
value
())
{
QueueRunnerDef
queue_runner
;
if
(
!
queue_runner
.
ParseFromString
(
raw
))
{
LOG
(
ERROR
)
<<
"Could parse queue_runners, skipping this input"
;
return
nullptr
;
}
if
(
queue_runner
.
cancel_op_name
().
empty
())
{
LOG
(
ERROR
)
<<
"Queue without a cancel op, skipping this input"
;
return
nullptr
;
}
new_item
->
queue_runners
.
push_back
(
queue_runner
);
}
}
// Make sure we still can access the input files (aka "asset_filepaths") since
// these might have been moved or deleted, the cns cell might have been shut
// down, or we might be running as a user who does not have access to the
// files.
if
(
meta_graph
.
collection_def
().
count
(
"asset_filepaths"
)
>
0
)
{
const
CollectionDef
&
file_paths
=
meta_graph
.
collection_def
().
at
(
"asset_filepaths"
);
std
::
vector
<
string
>
paths
;
for
(
const
auto
&
raw_path
:
file_paths
.
bytes_list
().
value
())
{
paths
.
push_back
(
raw_path
);
}
if
(
!
FilesExist
(
paths
,
nullptr
))
{
LOG
(
ERROR
)
<<
"Can't access one or more of the asset files, skipping this input"
;
return
nullptr
;
}
}
return
new_item
;
}
}
// end namespace grappler
}
// end namespace tensorflow
tensorflow/core/grappler/grappler_item_builder.h
0 → 100644
浏览文件 @
809f9e7d
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_GRAPPLER_GRAPPLER_ITEM_BUILDER_H_
#define TENSORFLOW_GRAPPLER_GRAPPLER_ITEM_BUILDER_H_
#include <memory>
#include <string>
#include "tensorflow/core/grappler/grappler_item.h"
namespace
tensorflow
{
class
MetaGraphDef
;
namespace
grappler
{
struct
ItemConfig
{
// If true, ignore all user specified node placement.
bool
ignore_user_placement
=
true
;
// If true, ignore all user specified colocation attributes.
bool
ignore_colocation
=
true
;
// Dimension to use if a placeholder node has an _output_shapes attribute with
// a dimension of -1.
int
placeholder_unknown_output_shape_dim
=
-
1
;
};
// Factory method for creating a GrapplerItem from a MetaGraphDef.
// Returns nullptr if the given meta_graph cannot be converted.
std
::
unique_ptr
<
GrapplerItem
>
GrapplerItemFromMetaGraphDef
(
const
string
&
id
,
const
MetaGraphDef
&
meta_graph
,
const
ItemConfig
&
cfg
);
}
// end namespace grappler
}
// end namespace tensorflow
#endif // TENSORFLOW_GRAPPLER_GRAPPLER_ITEM_BUILDER_H_
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录