未验证 提交 48600d7f 编写于 作者: L Leo Chen 提交者: GitHub

Add op function generator for dygraph (#21569)

* add op function generator, test=develop

* add unittest, test=develop

* follow comments, test=develop

* fix windows compilation problem, test=develop
上级 55f2b7de
...@@ -99,6 +99,27 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, ...@@ -99,6 +99,27 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
} }
} }
void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs,
framework::AttributeMap attrs) {
VLOG(1) << "Trace Op: " << type;
size_t op_id = GenerateUniqueId();
auto op =
OpBase::Create(op_id, type, ins, outs, std::move(attrs), expected_place_);
op->Run(ins, outs);
if (enable_program_desc_tracing_) {
VLOG(5) << "Trace op " << type << " into ProgramDesc";
program_desc_tracer_->InsertOp(type, ins, outs, op->Attrs());
}
if (ComputeRequiredGrad(ins, outs, no_grad_)) {
TraceBackward(op, ins, outs);
} else {
VLOG(3) << "No Grad to track for Op: " << type;
}
}
bool Tracer::ComputeRequiredGrad(const NameVarBaseMap& ins, bool Tracer::ComputeRequiredGrad(const NameVarBaseMap& ins,
const NameVarBaseMap& outs, const NameVarBaseMap& outs,
bool trace_backward) { bool trace_backward) {
......
...@@ -58,6 +58,9 @@ class Tracer { ...@@ -58,6 +58,9 @@ class Tracer {
const NameVarBaseMap& outs, framework::AttributeMap attrs, const NameVarBaseMap& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_bacward); const platform::Place& place, bool trace_bacward);
void TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs);
bool ComputeRequiredGrad(const NameVarBaseMap& ins, bool ComputeRequiredGrad(const NameVarBaseMap& ins,
const NameVarBaseMap& outs, bool trace_backward); const NameVarBaseMap& outs, bool trace_backward);
......
...@@ -33,6 +33,39 @@ if (WITH_DISTRIBUTE) ...@@ -33,6 +33,39 @@ if (WITH_DISTRIBUTE)
list(APPEND PYBIND_SRCS communicator_py.cc) list(APPEND PYBIND_SRCS communicator_py.cc)
endif() endif()
# generate op pybind functions automatically for dygraph.
set(OP_FUNCTION_GENERETOR_DEPS pybind proto_desc executor layer tracer engine imperative_profiler imperative_flag)
list(APPEND OP_FUNCTION_GENERETOR_DEPS ${GLOB_OP_LIB})
list(APPEND OP_FUNCTION_GENERETOR_DEPS ${GLOB_OPERATOR_DEPS})
add_executable(op_function_generator op_function_generator.cc)
target_link_libraries(op_function_generator ${OP_FUNCTION_GENERETOR_DEPS} )
get_property (os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(op_function_generator ${os_dependency_modules})
if (WIN32)
add_custom_target(op_function_cmd
COMMAND "${CMAKE_BINARY_DIR}/paddle/fluid/pybind/${CMAKE_BUILD_TYPE}/op_function_generator"
"${CMAKE_SOURCE_DIR}/paddle/fluid/pybind/op_function_impl.h")
add_dependencies(op_function_cmd op_function_generator)
if(WITH_MKL)
add_custom_target(copy_dll
COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_SHARED_LIB} ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}
COMMAND ${CMAKE_COMMAND} -E copy ${MKLML_SHARED_LIB} ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}
COMMAND ${CMAKE_COMMAND} -E copy ${MKLML_SHARED_LIB_DEPS} ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}
COMMAND ${CMAKE_COMMAND} -E copy ${MKLML_SHARED_IOMP_LIB} ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}
)
add_dependencies(copy_dll op_function_generator)
add_dependencies(op_function_cmd copy_dll)
endif(WITH_MKL)
else(WIN32)
add_custom_target(op_function_cmd
COMMAND "${CMAKE_CURRENT_BINARY_DIR}/op_function_generator"
"${CMAKE_SOURCE_DIR}/paddle/fluid/pybind/op_function_impl.h")
add_dependencies(op_function_cmd op_function_generator)
endif(WIN32)
if(WITH_PYTHON) if(WITH_PYTHON)
if(WITH_AMD_GPU) if(WITH_AMD_GPU)
hip_library(paddle_pybind SHARED hip_library(paddle_pybind SHARED
...@@ -51,5 +84,6 @@ if(WITH_PYTHON) ...@@ -51,5 +84,6 @@ if(WITH_PYTHON)
get_property (os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) get_property (os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(paddle_pybind ${os_dependency_modules}) target_link_libraries(paddle_pybind ${os_dependency_modules})
add_dependencies(paddle_pybind op_function_cmd)
endif(WITH_PYTHON) endif(WITH_PYTHON)
...@@ -30,6 +30,7 @@ limitations under the License. */ ...@@ -30,6 +30,7 @@ limitations under the License. */
#include "paddle/fluid/imperative/profiler.h" #include "paddle/fluid/imperative/profiler.h"
#include "paddle/fluid/imperative/tracer.h" #include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/imperative/type_defs.h" #include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/pybind/op_function.h"
#include "paddle/fluid/pybind/pybind_boost_headers.h" #include "paddle/fluid/pybind/pybind_boost_headers.h"
#include "paddle/fluid/pybind/tensor_py.h" #include "paddle/fluid/pybind/tensor_py.h"
...@@ -216,6 +217,8 @@ static imperative::NameVarBaseMap ConvertToNameVarBaseMap( ...@@ -216,6 +217,8 @@ static imperative::NameVarBaseMap ConvertToNameVarBaseMap(
void BindImperative(py::module *m_ptr) { void BindImperative(py::module *m_ptr) {
auto &m = *m_ptr; auto &m = *m_ptr;
BindOpFunctions(&m);
py::class_<imperative::detail::BackwardStrategy> backward_strategy( py::class_<imperative::detail::BackwardStrategy> backward_strategy(
m, "BackwardStrategy", R"DOC( m, "BackwardStrategy", R"DOC(
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <pybind11/chrono.h>
#include <pybind11/complex.h>
#include <pybind11/functional.h>
#include <pybind11/stl.h>
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/pybind/imperative.h"
// This include must be the last line
#include "paddle/fluid/pybind/op_function_impl.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <fstream>
#include <iostream>
#include <string>
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/pybind/pybind.h"
#include "paddle/fluid/string/string_helper.h"
const char* OUT_INITIALIZER_TEMPLATE =
R"({"%s", {std::shared_ptr<imperative::VarBase>(new imperative::VarBase(tracer->GenerateUniqueName()))}})";
const char* OP_FUNCTION_TEMPLATE =
R"([](const imperative::NameVarBaseMap& ins, const framework::AttributeMap& attrs,
imperative::NameVarBaseMap outs, const std::map<std::string, size_t>& out_nums)
{
auto tracer = imperative::GetCurrentTracer();
if (outs.size() == 0) {
if (out_nums.size() == 0) {
imperative::NameVarBaseMap outs_ = %s;
outs = std::move(outs_);
} else {
for (auto &pair : out_nums) {
for (size_t i = 0; i < pair.second; i ++) {
auto var_base_name = tracer->GenerateUniqueName();
auto out = new imperative::VarBase(var_base_name);
outs[pair.first].emplace_back(std::shared_ptr<imperative::VarBase>(out));
}
}
}
}
{
py::gil_scoped_release release;
tracer->TraceOp("%s", std::move(ins), std::move(outs), std::move(attrs));
return outs;
}
}, py::arg("ins"), py::arg("attrs")=framework::AttributeMap(),
py::arg("outs")=imperative::NameVarBaseMap(),
py::arg("out_nums")=std::map<std::string, size_t>())";
const char* PYBIND_ITEM_TEMPLATE = R"( %s.def("%s", %s);)";
static std::vector<std::string> GenerateOpFunctions(
const std::string& module_name) {
auto& op_info_map = paddle::framework::OpInfoMap::Instance().map();
std::vector<std::string> op_function_list;
for (auto& pair : op_info_map) {
auto& op_info = pair.second;
auto op_proto = op_info.proto_;
if (op_proto == nullptr) {
continue;
}
auto& op_type = op_proto->type();
// Generate outs initializer
std::string outs_initializer = "{";
for (auto& output : op_proto->outputs()) {
auto& out_name = output.name();
auto out_initializer_str =
paddle::string::Sprintf(OUT_INITIALIZER_TEMPLATE, out_name);
outs_initializer += out_initializer_str;
outs_initializer += ",";
}
if (outs_initializer.back() == ',') {
outs_initializer.pop_back();
}
outs_initializer += "}";
// generate op funtcion body
auto op_function_str = paddle::string::Sprintf(OP_FUNCTION_TEMPLATE,
outs_initializer, op_type);
// generate pybind item
auto pybind_op_function = paddle::string::Sprintf(
PYBIND_ITEM_TEMPLATE, module_name.c_str(), op_type, op_function_str);
pybind_op_function += "\n";
op_function_list.emplace_back(std::move(pybind_op_function));
}
return op_function_list;
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << "argc must be 2" << std::endl;
return -1;
}
std::vector<std::string> headers{"\"paddle/fluid/imperative/tracer.h\""};
std::ofstream out(argv[1], std::ios::out);
out << "#pragma once\n\n";
for (auto& header : headers) {
out << "#include " + header + "\n";
}
out << "namespace py = pybind11;"
<< "\n";
out << "namespace paddle {\n"
<< "namespace pybind {\n"
<< "\n"
<< "inline void BindOpFunctions(pybind11::module *module) {\n"
<< " auto m = module->def_submodule(\"ops\");\n\n";
// all op functions
auto op_funcs = GenerateOpFunctions("m");
out << paddle::string::join_strings(op_funcs, '\n');
out << "}\n\n"
<< "} // namespace pybind\n"
<< "} // namespace paddle\n";
out.close();
return 0;
}
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, in_dygraph_mode
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.dygraph.jit import TracedLayer
import numpy as np
class TestTracedLayer(fluid.dygraph.Layer):
def __init__(self, name_scope):
super(TestTracedLayer, self).__init__(name_scope)
def forward(self, input):
inputs = {'X': [input] if isinstance(input, fluid.Variable) else input}
return core.ops.relu(inputs)['Out'][0]
class TestVariable(unittest.TestCase):
def setUp(self):
self.shape = [512, 768]
self.dtype = np.float32
self.array = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
def test_elementwise_add(self):
with fluid.dygraph.guard():
a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
y = fluid.dygraph.to_variable(b)
x.stop_gradient = False
res1 = layers.elementwise_add(x, y)
inputs = {'X': [x], 'Y': [y]}
res2 = core.ops.elementwise_add(inputs)['Out'][0]
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_elementwise_mul(self):
with fluid.dygraph.guard():
a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
y = fluid.dygraph.to_variable(b)
res1 = layers.elementwise_mul(x, y)
inputs = {'X': [x], 'Y': [y]}
res2 = core.ops.elementwise_mul(inputs)['Out'][0]
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_relu(self):
with fluid.dygraph.guard():
a = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
res1 = layers.relu(x)
inputs = {'X': [x]}
res2 = core.ops.relu(inputs)['Out'][0]
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_trace_backward(self):
with fluid.dygraph.guard():
a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
y = fluid.dygraph.to_variable(b)
x.stop_gradient = False
y.stop_gradient = False
inputs = {'X': [x], 'Y': [y]}
loss = core.ops.elementwise_mul(inputs)['Out'][0]
loss.backward()
x_grad = x.gradient()
y_grad = y.gradient()
self.assertTrue(np.array_equal(x_grad, loss.gradient() * b))
self.assertTrue(np.array_equal(y_grad, loss.gradient() * a))
def test_traced_layer(self):
with fluid.dygraph.guard():
layer = TestTracedLayer("test_traced_layer")
a = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)
res_dygraph, static_layer = TracedLayer.trace(
layer, inputs=[x]) # dygraph out
res_static_graph = static_layer([x])[0]
self.assertTrue(
np.array_equal(res_dygraph.numpy(), res_static_graph))
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册