未验证 提交 2ca3d3f7 编写于 作者: Z zhangbo9674 提交者: GitHub

[new executor]Support CINN use InterpreterCore (#48911)

* cinn use interpretercore

* fix bug

* fix compile bug

* fix scope bug

* refine code

* refine code by comment

* refine code by comment
上级 0839bba3
...@@ -622,7 +622,8 @@ void BuildOpFuncList(const platform::Place& place, ...@@ -622,7 +622,8 @@ void BuildOpFuncList(const platform::Place& place,
// NOTE(Ruibiao): We do not encourage directly using scope in OP kernel. // NOTE(Ruibiao): We do not encourage directly using scope in OP kernel.
// But some OPs do have such behavior (e.g., cinn_launch OP). Here // But some OPs do have such behavior (e.g., cinn_launch OP). Here
// special treatment for them. // special treatment for them.
if (op_with_kernel->Type() == "cinn_launch") { if (op_with_kernel->Type() == "cinn_launch" ||
op_with_kernel->Type() == "cinn_instruction_run") {
VLOG(6) << "OP(" << op_with_kernel->Type() VLOG(6) << "OP(" << op_with_kernel->Type()
<< ") use scope in kernel, " << ") use scope in kernel, "
"so pass a real scope to " "so pass a real scope to "
......
...@@ -108,7 +108,8 @@ InterpreterCore::InterpreterCore(const platform::Place& place, ...@@ -108,7 +108,8 @@ InterpreterCore::InterpreterCore(const platform::Place& place,
const std::set<std::string>& skip_gc_vars, const std::set<std::string>& skip_gc_vars,
framework::Scope* scope, framework::Scope* scope,
bool used_for_jit, bool used_for_jit,
bool used_for_control_flow_op) bool used_for_control_flow_op,
bool used_for_cinn)
: place_(place), : place_(place),
block_(block), block_(block),
execution_config_(place, block.OpSize()), execution_config_(place, block.OpSize()),
...@@ -121,9 +122,9 @@ InterpreterCore::InterpreterCore(const platform::Place& place, ...@@ -121,9 +122,9 @@ InterpreterCore::InterpreterCore(const platform::Place& place,
execution_config_.used_for_jit = used_for_jit; execution_config_.used_for_jit = used_for_jit;
execution_config_.used_for_control_flow_op = used_for_control_flow_op; execution_config_.used_for_control_flow_op = used_for_control_flow_op;
execution_config_.create_local_scope = !used_for_jit && execution_config_.create_local_scope =
FLAGS_new_executor_use_local_scope && !used_for_jit && FLAGS_new_executor_use_local_scope &&
!used_for_control_flow_op; !used_for_control_flow_op && !used_for_cinn;
execution_config_.skip_gc_vars = skip_gc_vars; execution_config_.skip_gc_vars = skip_gc_vars;
execution_config_.Log(/*log_level=*/8); execution_config_.Log(/*log_level=*/8);
...@@ -425,8 +426,9 @@ void InterpreterCore::BuildAndCacheInstructionCtx(Instruction* instr_node) { ...@@ -425,8 +426,9 @@ void InterpreterCore::BuildAndCacheInstructionCtx(Instruction* instr_node) {
} }
// set runtime_ctx and infershape_ctx_ // set runtime_ctx and infershape_ctx_
if (instr_node->OpBase()->Type() == "cinn_launch") { // OP use scope in if (instr_node->OpBase()->Type() == "cinn_launch" ||
// kernel instr_node->OpBase()->Type() == "cinn_instruction_run") { // OP use scope
// in kernel
Scope* local_scope = HasLocalScope() ? var_scope_.GetMutableLocalScope() Scope* local_scope = HasLocalScope() ? var_scope_.GetMutableLocalScope()
: var_scope_.GetMutableScope(); : var_scope_.GetMutableScope();
instr_node->ResetContextWithScope(ins_map, outs_map, *local_scope); instr_node->ResetContextWithScope(ins_map, outs_map, *local_scope);
......
...@@ -46,7 +46,8 @@ class InterpreterCore { ...@@ -46,7 +46,8 @@ class InterpreterCore {
const std::set<std::string>& skip_gc_vars, const std::set<std::string>& skip_gc_vars,
Scope* scope, Scope* scope,
bool used_for_jit = false, bool used_for_jit = false,
bool used_for_control_flow_op = false); bool used_for_control_flow_op = false,
bool used_for_cinn = false);
~InterpreterCore(); ~InterpreterCore();
......
...@@ -15,6 +15,7 @@ cc_library( ...@@ -15,6 +15,7 @@ cc_library(
build_strategy build_strategy
device_context device_context
parallel_executor parallel_executor
standalone_executor
transform_type transform_type
cinn) cinn)
......
...@@ -88,16 +88,15 @@ class TestCinnInstructionRunOp : public ::testing::Test { ...@@ -88,16 +88,15 @@ class TestCinnInstructionRunOp : public ::testing::Test {
cinn_launch_op->Run(scope, place); cinn_launch_op->Run(scope, place);
} }
void RunAndCheck(const platform::Place& place) { void RunAndCheck(const platform::Place& place, framework::Scope* scope) {
// Run ops and check the computation results // Run ops and check the computation results
framework::Scope scope; InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, scope);
InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, &scope); scope->Var(test_op_out_name)->GetMutable<phi::DenseTensor>();
scope.Var(test_op_out_name)->GetMutable<phi::DenseTensor>(); scope->Var(add_op_out_name)->GetMutable<phi::DenseTensor>();
scope.Var(add_op_out_name)->GetMutable<phi::DenseTensor>(); elementwise_add_op->Run(*scope, place);
elementwise_add_op->Run(scope, place); cinn_launch_op->Run(*scope, place);
cinn_launch_op->Run(scope, place); CompareOpResult<float>(scope->GetVar(test_op_out_name),
CompareOpResult<float>(scope.GetVar(test_op_out_name), scope->GetVar(add_op_out_name));
scope.GetVar(add_op_out_name));
} }
void TearDown() override { CinnCompiler::GetInstance()->Clear(); } void TearDown() override { CinnCompiler::GetInstance()->Clear(); }
...@@ -106,17 +105,21 @@ class TestCinnInstructionRunOp : public ::testing::Test { ...@@ -106,17 +105,21 @@ class TestCinnInstructionRunOp : public ::testing::Test {
TEST_F(TestCinnInstructionRunOp, CPU) { TEST_F(TestCinnInstructionRunOp, CPU) {
platform::CPUPlace place; platform::CPUPlace place;
Compile(place); Compile(place);
RunAndCheck(place); framework::Scope scope1;
RunAndCheck(place, &scope1);
// the second run on the same place is to check the cache logic // the second run on the same place is to check the cache logic
RunAndCheck(place); framework::Scope scope2;
RunAndCheck(place, &scope2);
} }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
TEST_F(TestCinnInstructionRunOp, GPU) { TEST_F(TestCinnInstructionRunOp, GPU) {
platform::CUDAPlace place; platform::CUDAPlace place;
Compile(place); Compile(place);
RunAndCheck(place); framework::Scope scope1;
RunAndCheck(place); RunAndCheck(place, &scope1);
framework::Scope scope2;
RunAndCheck(place, &scope2);
} }
#endif #endif
......
...@@ -88,8 +88,9 @@ CinnLaunchContext::CinnLaunchContext(const framework::ir::Graph& graph, ...@@ -88,8 +88,9 @@ CinnLaunchContext::CinnLaunchContext(const framework::ir::Graph& graph,
} }
// Convert the CINN runtime program to a Paddle graph // Convert the CINN runtime program to a Paddle graph
runtime_graph_ = std::make_unique<framework::ir::Graph>( runtime_program_desc_ = BuildCompiledProgram(graph, compiled_obj);
BuildCompiledProgram(graph, compiled_obj)); runtime_graph_ =
std::make_unique<framework::ir::Graph>(*runtime_program_desc_.get());
auto& outer_varinfo = graph.Get<Name2VarInfoMap>(kMemOptVarInfoFromMainGraph); auto& outer_varinfo = graph.Get<Name2VarInfoMap>(kMemOptVarInfoFromMainGraph);
runtime_graph_->SetNotOwned<Name2VarInfoMap>(kMemOptVarInfoFromMainGraph, runtime_graph_->SetNotOwned<Name2VarInfoMap>(kMemOptVarInfoFromMainGraph,
&outer_varinfo); &outer_varinfo);
...@@ -100,6 +101,7 @@ CinnLaunchContext::CinnLaunchContext(const framework::ir::Graph& graph, ...@@ -100,6 +101,7 @@ CinnLaunchContext::CinnLaunchContext(const framework::ir::Graph& graph,
// that means it can be erased after graph execution // that means it can be erased after graph execution
if (!outer_varinfo.count(var_name)) { if (!outer_varinfo.count(var_name)) {
skip_eager_vars_.emplace_back(var_name); skip_eager_vars_.emplace_back(var_name);
skip_gc_vars_.insert(var_name);
} }
}; };
std::for_each( std::for_each(
...@@ -313,12 +315,14 @@ void CinnLaunchContext::AssignInternalVariable(const std::string& var_name) { ...@@ -313,12 +315,14 @@ void CinnLaunchContext::AssignInternalVariable(const std::string& var_name) {
}); });
} }
framework::ProgramDesc CinnLaunchContext::BuildCompiledProgram( std::unique_ptr<framework::ProgramDesc> CinnLaunchContext::BuildCompiledProgram(
const framework::ir::Graph& graph, const CinnCompiledObject& compiled_obj) { const framework::ir::Graph& graph, const CinnCompiledObject& compiled_obj) {
CinnRuntimeProgram* runtime_program = compiled_obj.runtime_program.get(); CinnRuntimeProgram* runtime_program = compiled_obj.runtime_program.get();
// Step 0: Create an empty program_desc, there will be only one block // Step 0: Create an empty program_desc, there will be only one block
framework::ProgramDesc program_desc; // framework::ProgramDesc program_desc;
auto* block = program_desc.MutableBlock(0); std::unique_ptr<framework::ProgramDesc> program_desc(
new framework::ProgramDesc());
auto* block = program_desc->MutableBlock(0);
const std::vector<std::unique_ptr<CinnInstruction>>& instructions = const std::vector<std::unique_ptr<CinnInstruction>>& instructions =
runtime_program->GetRunInstructions(); runtime_program->GetRunInstructions();
...@@ -445,6 +449,46 @@ ParallelExecutor* CinnLaunchContext::InitializePE(const platform::Place& place, ...@@ -445,6 +449,46 @@ ParallelExecutor* CinnLaunchContext::InitializePE(const platform::Place& place,
return parallel_executor_.get(); return parallel_executor_.get();
} }
framework::InterpreterCore* CinnLaunchContext::InitializeInterpreterCore(
const platform::Place& place, framework::Scope* scope) {
if (!interpreter_core_ || scope != cached_scope_) {
VLOG(1) << "interpreter_core_ is null or scope != cached_scope_: "
"interpreter_core_: "
<< interpreter_core_.get() << "; scope: " << scope
<< "; cached_scope_: " << cached_scope_;
for (auto&& var_name : internal_var_names_) {
auto* var = scope->FindVar(var_name);
if (var != nullptr) {
continue;
}
framework::InitializeVariable(scope->Var(var_name),
framework::proto::VarType::LOD_TENSOR);
}
if (!interpreter_core_) {
interpreter_core_ = std::make_unique<framework::InterpreterCore>(
place,
runtime_program_desc_->Block(0),
skip_gc_vars_,
scope,
/*used_for_jit*/ false,
/*used_for_control_flow_op*/ false,
/*used_for_cinn*/ true);
} else {
interpreter_core_->reset_scope(scope);
}
UpdateCapturedEnv(*scope, place);
}
for (auto&& var_name : initialized_beforehand_vars_) {
auto* var = scope->GetVar(var_name);
auto* buffer = GetCinnBufferOfVar(var_name);
auto dim = framework::DDim(buffer->dims, buffer->dimensions);
var->GetMutable<phi::DenseTensor>()->Resize(dim);
var->GetMutable<phi::DenseTensor>()->mutable_data(
place, framework::paddle2cinn::TransToPaddleDataType(buffer->type));
}
return interpreter_core_.get();
}
cinn_buffer_t* CinnLaunchContext::GetCinnBufferOfVar( cinn_buffer_t* CinnLaunchContext::GetCinnBufferOfVar(
const std::string& var_name) { const std::string& var_name) {
auto it = paddle2cinn_varmap_.find(var_name); auto it = paddle2cinn_varmap_.find(var_name);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <vector> #include <vector>
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/parallel_executor.h" #include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/phi/core/ddim.h" #include "paddle/phi/core/ddim.h"
...@@ -74,6 +75,9 @@ class CinnLaunchContext { ...@@ -74,6 +75,9 @@ class CinnLaunchContext {
framework::ParallelExecutor* InitializePE(const platform::Place& place, framework::ParallelExecutor* InitializePE(const platform::Place& place,
framework::Scope* scope); framework::Scope* scope);
framework::InterpreterCore* InitializeInterpreterCore(
const platform::Place& place, framework::Scope* scope);
// explicitly update several environment variables captured // explicitly update several environment variables captured
// by callback of execution arguments // by callback of execution arguments
void UpdateCapturedEnv(const framework::Scope& scope, void UpdateCapturedEnv(const framework::Scope& scope,
...@@ -132,7 +136,7 @@ class CinnLaunchContext { ...@@ -132,7 +136,7 @@ class CinnLaunchContext {
// Construct a Paddle ProgramDesc with the CINN runtime // Construct a Paddle ProgramDesc with the CINN runtime
// instructions included in the compiled CINN Program // instructions included in the compiled CINN Program
framework::ProgramDesc BuildCompiledProgram( std::unique_ptr<framework::ProgramDesc> BuildCompiledProgram(
const framework::ir::Graph& graph, const framework::ir::Graph& graph,
const CinnCompiledObject& compiled_obj); const CinnCompiledObject& compiled_obj);
...@@ -155,6 +159,10 @@ class CinnLaunchContext { ...@@ -155,6 +159,10 @@ class CinnLaunchContext {
// the variable scope compiled from cinn // the variable scope compiled from cinn
const std::shared_ptr<CinnScope> cinn_scope_; const std::shared_ptr<CinnScope> cinn_scope_;
std::unique_ptr<framework::ProgramDesc> runtime_program_desc_;
std::unique_ptr<framework::InterpreterCore> interpreter_core_;
std::set<std::string> skip_gc_vars_;
// the ir::Graph object converted from the program compiled by CINN // the ir::Graph object converted from the program compiled by CINN
std::unique_ptr<framework::ir::Graph> runtime_graph_; std::unique_ptr<framework::ir::Graph> runtime_graph_;
// a ParallelExecutor to execute the runtime graph // a ParallelExecutor to execute the runtime graph
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/platform/profiler.h"
DECLARE_bool(enable_pe_launch_cinn); DECLARE_bool(enable_pe_launch_cinn);
DECLARE_bool(enable_interpretercore_launch_cinn);
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -135,12 +136,21 @@ class CinnLaunchOpKernel : public framework::OpKernel<T> { ...@@ -135,12 +136,21 @@ class CinnLaunchOpKernel : public framework::OpKernel<T> {
// Step 4. Execute the compiled CINN instructions by a PE or // Step 4. Execute the compiled CINN instructions by a PE or
// by the CINN compiled program in sequential order // by the CINN compiled program in sequential order
if (FLAGS_enable_pe_launch_cinn) { if (FLAGS_enable_pe_launch_cinn) {
platform::RecordEvent record_event_4( if (FLAGS_enable_interpretercore_launch_cinn) {
"Step 4. Execute the runtime graph by PE."); platform::RecordEvent record_event_4(
VLOG(4) << "Execute the runtime graph by PE"; "Step 4. Execute the runtime program by InterpreterCore.");
framework::Scope& exec_scope = scope.NewScope(); VLOG(4) << "Execute the runtime program by InterpreterCore";
auto* pe = launch_context->InitializePE(place, &exec_scope); auto* interpreter_core = launch_context->InitializeInterpreterCore(
pe->RunWithoutFetch(launch_context->GetSkipEagerVars()); place, const_cast<framework::Scope*>(&scope));
interpreter_core->Run({});
} else {
platform::RecordEvent record_event_4(
"Step 4. Execute the runtime graph by PE.");
VLOG(4) << "Execute the runtime graph by PE";
framework::Scope& exec_scope = scope.NewScope();
auto* pe = launch_context->InitializePE(place, &exec_scope);
pe->RunWithoutFetch(launch_context->GetSkipEagerVars());
}
} else { } else {
platform::RecordEvent record_event_4( platform::RecordEvent record_event_4(
"Step 4. Execute the compiled executable program."); "Step 4. Execute the compiled executable program.");
......
...@@ -36,6 +36,7 @@ USE_OP(cinn_instruction_run); ...@@ -36,6 +36,7 @@ USE_OP(cinn_instruction_run);
USE_OP_ITSELF(elementwise_add); USE_OP_ITSELF(elementwise_add);
DECLARE_double(eager_delete_tensor_gb); DECLARE_double(eager_delete_tensor_gb);
DECLARE_bool(enable_pe_launch_cinn); DECLARE_bool(enable_pe_launch_cinn);
DECLARE_bool(enable_interpretercore_launch_cinn);
DECLARE_bool(enable_cinn_auto_tune); DECLARE_bool(enable_cinn_auto_tune);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
...@@ -74,31 +75,34 @@ class TestCinnLaunchOp : public ::testing::Test { ...@@ -74,31 +75,34 @@ class TestCinnLaunchOp : public ::testing::Test {
{{}}); {{}});
} }
void RunAndCheck(const platform::Place& place) { void RunAndCheck(const platform::Place& place, framework::Scope* scope) {
// Run ops and check the computation results // Run ops and check the computation results
framework::Scope scope; InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, scope);
InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, &scope); scope->Var(test_op_out_name)->GetMutable<phi::DenseTensor>();
scope.Var(test_op_out_name)->GetMutable<phi::DenseTensor>(); scope->Var(add_op_out_name)->GetMutable<phi::DenseTensor>();
scope.Var(add_op_out_name)->GetMutable<phi::DenseTensor>(); elementwise_add_op->Run(*scope, place);
elementwise_add_op->Run(scope, place); cinn_launch_op->Run(*scope, place);
cinn_launch_op->Run(scope, place); CompareOpResult<float>(scope->GetVar(test_op_out_name),
CompareOpResult<float>(scope.GetVar(test_op_out_name), scope->GetVar(add_op_out_name));
scope.GetVar(add_op_out_name));
} }
void TearDown() override { CinnCompiler::GetInstance()->Clear(); } void TearDown() override { CinnCompiler::GetInstance()->Clear(); }
}; };
TEST_F(TestCinnLaunchOp, TestRunCPUInstructionByPE) { TEST_F(TestCinnLaunchOp, TestRunCPUInstructionByPE) {
RunAndCheck(platform::CPUPlace()); framework::Scope scope1;
RunAndCheck(platform::CPUPlace(), &scope1);
// the second run on the same place is to check the cache logic // the second run on the same place is to check the cache logic
RunAndCheck(platform::CPUPlace()); framework::Scope scope2;
RunAndCheck(platform::CPUPlace(), &scope2);
} }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
TEST_F(TestCinnLaunchOp, TestRunGPUInstructionByPE) { TEST_F(TestCinnLaunchOp, TestRunGPUInstructionByPE) {
RunAndCheck(platform::CUDAPlace()); framework::Scope scope1;
RunAndCheck(platform::CUDAPlace()); RunAndCheck(platform::CUDAPlace(), &scope1);
framework::Scope scope2;
RunAndCheck(platform::CUDAPlace(), &scope2);
} }
#endif #endif
...@@ -106,9 +110,11 @@ TEST_F(TestCinnLaunchOp, TestRunCPUInstructionByCinnProgram) { ...@@ -106,9 +110,11 @@ TEST_F(TestCinnLaunchOp, TestRunCPUInstructionByCinnProgram) {
// set FLAGS_enable_pe_launch_cinn=false to switch to use // set FLAGS_enable_pe_launch_cinn=false to switch to use
// default scheduler of CINN to execute the compiled program // default scheduler of CINN to execute the compiled program
FLAGS_enable_pe_launch_cinn = false; FLAGS_enable_pe_launch_cinn = false;
FLAGS_enable_interpretercore_launch_cinn = false;
RunAndCheck(platform::CPUPlace()); framework::Scope scope1;
RunAndCheck(platform::CPUPlace()); RunAndCheck(platform::CPUPlace(), &scope1);
framework::Scope scope2;
RunAndCheck(platform::CPUPlace(), &scope2);
} }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
...@@ -116,8 +122,11 @@ TEST_F(TestCinnLaunchOp, TestRunGPUInstructionByCinnProgram) { ...@@ -116,8 +122,11 @@ TEST_F(TestCinnLaunchOp, TestRunGPUInstructionByCinnProgram) {
// set FLAGS_enable_pe_launch_cinn=false to switch to use // set FLAGS_enable_pe_launch_cinn=false to switch to use
// default scheduler of CINN to execute the compiled program // default scheduler of CINN to execute the compiled program
FLAGS_enable_pe_launch_cinn = false; FLAGS_enable_pe_launch_cinn = false;
RunAndCheck(platform::CUDAPlace()); FLAGS_enable_interpretercore_launch_cinn = false;
RunAndCheck(platform::CUDAPlace()); framework::Scope scope1;
RunAndCheck(platform::CUDAPlace(), &scope1);
framework::Scope scope2;
RunAndCheck(platform::CUDAPlace(), &scope2);
} }
#endif #endif
...@@ -125,8 +134,10 @@ TEST_F(TestCinnLaunchOp, TestRunWithAutoTuneEnabled) { ...@@ -125,8 +134,10 @@ TEST_F(TestCinnLaunchOp, TestRunWithAutoTuneEnabled) {
FLAGS_enable_cinn_auto_tune = true; FLAGS_enable_cinn_auto_tune = true;
// currently only check on cpu, will add a test for gpu after CINN ready // currently only check on cpu, will add a test for gpu after CINN ready
RunAndCheck(platform::CPUPlace()); framework::Scope scope1;
RunAndCheck(platform::CPUPlace()); RunAndCheck(platform::CPUPlace(), &scope1);
framework::Scope scope2;
RunAndCheck(platform::CPUPlace(), &scope2);
} }
namespace details { namespace details {
......
...@@ -978,6 +978,20 @@ PADDLE_DEFINE_EXPORTED_bool(enable_pe_launch_cinn, ...@@ -978,6 +978,20 @@ PADDLE_DEFINE_EXPORTED_bool(enable_pe_launch_cinn,
"It controls whether to execute cinn compiled " "It controls whether to execute cinn compiled "
"program with ParallelExecutor"); "program with ParallelExecutor");
/*
* CINN related FLAG
* Name: FLAGS_enable_interpretercore_launch_cinn
* Since Version: 2.4
* Value Range: bool, default=true
* Example: FLAGS_enable_interpretercore_launch_cinn=true would execute the CINN
* compiled instructions of a paddle graph with InterpreterCore, otherwise with
* the CINN compiled runtime program in sequential order.
*/
PADDLE_DEFINE_EXPORTED_bool(enable_interpretercore_launch_cinn,
true,
"It controls whether to execute cinn compiled "
"program with InterpreterCore");
/* /*
* CINN related FLAG * CINN related FLAG
* Name: FLAGS_enable_cinn_auto_tune * Name: FLAGS_enable_cinn_auto_tune
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册