diff --git a/paddle/fluid/lite/core/mir/passes.h b/paddle/fluid/lite/core/mir/passes.h new file mode 100644 index 0000000000000000000000000000000000000000..237b2e889df13f483677082134b44ae9da8410b8 --- /dev/null +++ b/paddle/fluid/lite/core/mir/passes.h @@ -0,0 +1,27 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/lite/core/mir/pass_registry.h" + +namespace paddle { +namespace lite { +namespace mir {} // namespace mir +} // namespace lite +} // namespace paddle + +USE_MIR_PASS(demo); +USE_MIR_PASS(static_kernel_pick_pass); +USE_MIR_PASS(io_complement_pass); +USE_MIR_PASS(generate_program_pass); diff --git a/paddle/fluid/lite/core/optimizer_test.cc b/paddle/fluid/lite/core/optimizer_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..5d718754ddb4263b6f526b99b966ecb34b723923 --- /dev/null +++ b/paddle/fluid/lite/core/optimizer_test.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/core/optimizer.h" +#include +#include "paddle/fluid/lite/core/mir/pass_manager.h" +#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/mir/static_kernel_pick_pass.h" +#include "paddle/fluid/lite/core/program_fake_utils.h" + +namespace paddle { +namespace lite { + +TEST(Optimizer, test) { + Optimizer optimizer; + auto program = FakeProgram(); + std::vector places({Place{TARGET(kHost), PRECISION(kFloat)}}); + + auto* pick_pass = + mir::PassManager::Global().LookUp( + "static_kernel_pick_pass"); + ASSERT_TRUE(pick_pass != nullptr); + pick_pass->mutable_kernel_pick_factors() + ->ConsiderTarget() + .ConsiderPrecision(); + + optimizer.Run(std::move(program), places); +} + +} // namespace lite +} // namespace paddle + +USE_LITE_OP(fc); +USE_LITE_KERNEL(fc, kHost, kFloat); diff --git a/paddle/fluid/lite/core/program_fake_utils.cc b/paddle/fluid/lite/core/program_fake_utils.cc new file mode 100644 index 0000000000000000000000000000000000000000..296f2d523bf31d83d50aa78db1e39c09c928db5c --- /dev/null +++ b/paddle/fluid/lite/core/program_fake_utils.cc @@ -0,0 +1,22 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/core/program_fake_utils.h" +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace mir {} // namespace mir +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/core/program_fake_utils.h b/paddle/fluid/lite/core/program_fake_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..867cfc780237e737932ad39be29e4d88138fc341 --- /dev/null +++ b/paddle/fluid/lite/core/program_fake_utils.h @@ -0,0 +1,76 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "paddle/fluid/lite/core/mir/ssa_graph.h" +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { + +mir::Program FakeProgram() { + mir::Program program; + program.scope = new lite::Scope; + + auto add_fc = [&](int id, std::string x) { + // create variables + std::string w1 = "w" + std::to_string(id); + std::string b1 = "b" + std::to_string(id); + std::string out1 = "out" + std::to_string(id); + auto w1v = program.scope->Var(w1)->GetMutable(); + auto b1v = program.scope->Var(b1)->GetMutable(); + auto out1v = program.scope->Var(out1)->GetMutable(); + + framework::OpDesc desc; + desc.SetInput("Input", {x}); + desc.SetInput("W", {w1}); + desc.SetInput("Bias", {b1}); + desc.SetOutput("Out", {out1}); + desc.SetType("fc"); + desc.SetAttr("in_num_col_dims", 1); + desc.Flush(); + + // add to input + program.tmp_vars.push_back(w1); + program.tmp_vars.push_back(b1); + + auto fc_op = LiteOpRegistry::Global().Create("fc"); + fc_op->PickKernel({Place{TARGET(kHost), PRECISION(kFloat)}}); + fc_op->Attach(desc, program.scope); + program.ops.emplace_back(std::move(fc_op)); + + w1v->Resize({100, 100}); + b1v->Resize({100, 1}); + out1v->Resize({100, 100}); + + return out1; + }; + + // x1, w1, b1 -fc-> out1 + // out1, w2, b2 -fc-> out2 + + std::string x = "x"; + program.tmp_vars.push_back(x); + auto* xv = program.scope->Var(x)->GetMutable(); + xv->Resize({100, 100}); + + for (int i = 0; i < 3; i++) { + x = add_fc(i, x); + } + return program; +} + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/core/types.cc b/paddle/fluid/lite/core/types.cc new file mode 100644 index 0000000000000000000000000000000000000000..f616a7d7f497633d390198f4d4d7b044314c8327 --- /dev/null +++ b/paddle/fluid/lite/core/types.cc @@ -0,0 +1,49 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/core/types.h" + +namespace paddle { +namespace lite { +namespace core { + +KernelPickFactor& KernelPickFactor::ConsiderDataLayout() { + data_ |= static_cast(Factor::DataLayoutFirst); + return *this; +} +KernelPickFactor& KernelPickFactor::ConsiderPrecision() { + data_ |= static_cast(Factor::PrecisionFirst); + return *this; +} +KernelPickFactor& KernelPickFactor::ConsiderTarget() { + data_ |= static_cast(Factor::TargetFirst); + return *this; +} +KernelPickFactor& KernelPickFactor::ConsiderDevice() { + data_ |= static_cast(Factor::DeviceFirst); + return *this; +} +bool KernelPickFactor::IsPrecisionConsidered() const { + return data_ & static_cast(Factor::PrecisionFirst); +} +bool KernelPickFactor::IsTargetConsidered() const { + return data_ & static_cast(Factor::TargetFirst); +} +bool KernelPickFactor::IsDataLayoutConsidered() const { + return data_ & static_cast(Factor::DataLayoutFirst); +} + +} // namespace core +} // namespace lite +} // namespace paddle