未验证 提交 24a3cb52 编写于 作者: Z zhangbo9674 提交者: GitHub

[IR] Change IR from Static library to dynamic library (#54729)

* new_ir to shared

* refine code

* add ir lib path to env

* refine type

* refine code

* fix bug

* fix bug

* refine code

* refine code

* close win

* refine code

* refine code

* refine code

* add win share

* refine code

* refie code

* refine code

* refine code

* refien code

* fix bug

* fix bug

* fix bug

* solve conflict

* solve conflict

* fix bug

* refine code

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug

* refine code

* fix interpretercore program bug

* delete unuse code

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug

* fix cinn bug

* fix cinn bug

* debug

* fix cinn bug

* delete unused code

* fix cinn bug

* fix cinn bug

* fix  ug

* test win openblas

* test win openblas

* fix win openblas bug

* polish code

* fix win open blas bug

* close win dll

* fix flag bug

* test for windows

* fix compile bug
上级 bce67644
......@@ -307,7 +307,7 @@ option(WITH_CUDNN_FRONTEND
"Compile with CUDNN Frontend API support (experimental)" OFF)
option(WITH_CUDNN_DSO "Compile PaddlePaddle with cuDNN dynamic-link libraries"
OFF)
option(WITH_IR "Compile PaddlePaddle with NEWIR" ON)
option(WITH_SHARED_IR "Compile PaddlePaddle with SHARED LIB of IR" ON)
if(WITH_RECORD_BUILDTIME)
set_property(
......
......@@ -8,9 +8,7 @@ add_subdirectory(pybind)
add_subdirectory(eager)
add_subdirectory(prim)
add_subdirectory(jit)
if(WITH_IR)
add_subdirectory(ir)
add_subdirectory(ir_adaptor)
endif()
add_subdirectory(ir)
add_subdirectory(ir_adaptor)
# NOTE: please add subdirectory inference at last.
add_subdirectory(inference)
......@@ -29,6 +29,8 @@
#include "paddle/fluid/platform/errors.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
namespace paddle {
namespace framework {
......
......@@ -20,6 +20,7 @@
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/fluid/prim/api/all.h"
#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h"
#include "paddle/fluid/prim/utils/utils.h"
......
......@@ -61,6 +61,14 @@ if(WIN32)
list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/phi.dll)
endif()
if(WITH_SHARED_IR)
add_custom_command(
OUTPUT ${eager_generator_path}/ir.dll
COMMAND ${CMAKE_COMMAND} -E copy ${IR_LIB} ${eager_generator_path}
DEPENDS ir)
list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/ir.dll)
endif()
if(${CBLAS_PROVIDER} STREQUAL MKLML)
message("Copied libiomp5md.dll for Eager AutoCodeGen")
add_custom_command(
......
......@@ -409,7 +409,6 @@ FORWARD_H_FILE_TEMPLATE = """
#include "paddle/phi/api/all.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/eager/to_static/run_program_op_func.h"
#include "paddle/fluid/eager/api/manual/eager_manual/dygraph_forward_api.h"
using CPUPlace = phi::CPUPlace;
......
......@@ -17,10 +17,13 @@
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/fluid/eager/tensor_wrapper.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/operators/run_program_op.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
namespace details {
using Tensor = paddle::Tensor;
......
......@@ -13,8 +13,10 @@
// limitations under the License.
#include "paddle/fluid/framework/executor_cache.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
namespace paddle {
namespace framework {
......
......@@ -23,7 +23,6 @@
#include <utility>
#include <vector>
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/program_desc.h"
......@@ -36,6 +35,8 @@ namespace ir {
class Graph;
}
class InterpreterCore;
namespace details {
void AppendSkipDeletionVars(const std::vector<std::string>& append_vars,
std::vector<std::string>* all_vars);
......
......@@ -14,7 +14,7 @@ set(STANDALONE_EXECUTOR_DEPS
pd_op_to_kernel_pass
phi_kernel_adaptor
program_translator
new_ir)
ir)
cc_library(
standalone_executor
......
......@@ -38,7 +38,6 @@
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/init.h"
#include "paddle/ir/core/program.h"
using AtomicVectorSizeT = std::vector<std::atomic<size_t>>;
......
......@@ -16,6 +16,8 @@
#include "paddle/fluid/framework/new_executor/new_ir_interpreter.h"
#include "paddle/fluid/framework/new_executor/program_interpreter.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
PADDLE_DEFINE_EXPORTED_bool(
new_executor_serial_run,
......
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/new_executor/interpreter_base_impl.h"
DECLARE_bool(new_executor_use_local_scope);
......@@ -20,10 +21,6 @@ namespace ir {
class Program;
} // namespace ir
namespace ir {
class Program;
} // namespace ir
namespace paddle {
namespace framework {
......
......@@ -38,6 +38,7 @@
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/ir/node.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/paddle2cinn/build_cinn_pass.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_graph_symbolization.h"
#include "paddle/fluid/framework/paddle2cinn/transform_desc.h"
......@@ -47,6 +48,8 @@
#include "paddle/fluid/operators/cinn/cinn_launch_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
#include "paddle/phi/core/flags.h"
PHI_DECLARE_bool(enable_pe_launch_cinn);
......
......@@ -33,6 +33,7 @@ endif()
# fluid_modules exclude API-interface of inference/api and inference/capi_exp
get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
get_property(phi_modules GLOBAL PROPERTY PHI_MODULES)
get_property(ir_targets GLOBAL PROPERTY IR_TARGETS)
set(utils_modules pretty_log string_helper benchmark utf8proc)
add_subdirectory(api)
......@@ -58,15 +59,20 @@ set(KERNEL_LIST
#windows GPU static library over the limit, so not create_static_lib, and cc_library is dummy
if(WIN32 AND WITH_GPU)
cc_library(paddle_inference DEPS ${fluid_modules} new_ir
cc_library(paddle_inference DEPS ${fluid_modules} ${ir_targets}
${STATIC_INFERENCE_API} ${utils_modules})
else()
# message("${fluid_modules}")
# message("${STATIC_INFERENCE_API}")
# message("${utils_modules}")
# message("${phi_modules}")
create_static_lib(paddle_inference ${phi_modules} ${fluid_modules} new_ir
${STATIC_INFERENCE_API} ${utils_modules})
if(WIN32)
create_static_lib(paddle_inference ${phi_modules} ${fluid_modules}
${STATIC_INFERENCE_API} ${utils_modules})
else()
create_static_lib(paddle_inference ${phi_modules} ${fluid_modules}
${ir_targets} ${STATIC_INFERENCE_API} ${utils_modules})
endif()
endif()
if(NOT APPLE)
......@@ -96,8 +102,13 @@ set(SHARED_INFERENCE_SRCS
# shared inference library deps
list(REMOVE_ITEM fluid_modules standalone_executor
interpretercore_garbage_collector)
set(SHARED_INFERENCE_DEPS phi new_ir ${fluid_modules} analysis_predictor
${utils_modules})
if(WIN32)
set(SHARED_INFERENCE_DEPS phi ${fluid_modules} analysis_predictor
${utils_modules})
else()
set(SHARED_INFERENCE_DEPS phi ${fluid_modules} ${ir_targets}
analysis_predictor ${utils_modules})
endif()
if(WITH_CRYPTO)
set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} paddle_crypto)
......
......@@ -50,5 +50,5 @@ file(GLOB PD_DIALECT_SRCS "*.cc")
cc_library(
pd_dialect
SRCS ${PD_DIALECT_SRCS} ${op_source_file}
DEPS new_ir framework_proto phi phi_utils)
DEPS framework_proto phi phi_utils pd_interface ir)
target_include_directories(pd_dialect PRIVATE ${PD_DIALECT_BINARY_DIR})
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/kernel_attribute.h"
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::KernelAttribute)
......@@ -36,3 +36,5 @@ class KernelAttribute : public ir::Attribute {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::KernelAttribute)
......@@ -69,3 +69,5 @@ void PaddleKernelDialect::PrintAttribute(ir::Attribute attr,
} // namespace dialect
} // namespace paddle
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::PaddleKernelDialect)
......@@ -37,3 +37,5 @@ class PaddleKernelDialect : public ir::Dialect {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::PaddleKernelDialect)
......@@ -34,3 +34,5 @@ void PhiKernelOp::Verify(const std::vector<ir::OpResult> &inputs,
} // namespace dialect
} // namespace paddle
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::PhiKernelOp)
......@@ -33,3 +33,5 @@ class PhiKernelOp : public ir::Op<PhiKernelOp> {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::PhiKernelOp)
......@@ -43,3 +43,5 @@ const size_t& AllocatedDenseTensorType::offset() const {
} // namespace dialect
} // namespace paddle
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AllocatedDenseTensorType)
......@@ -66,3 +66,5 @@ class AllocatedDenseTensorType : public ir::Type {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AllocatedDenseTensorType)
......@@ -42,12 +42,18 @@ H_FILE_TEMPLATE = """#ifdef GET_OP_LIST
#include "paddle/phi/core/infermeta_utils.h"
{input}
{declare_type_id}
#endif
"""
GET_OP_LIST_TEMPALTE = """{}
"""
DECLARE_OP_TYPE_ID = """
IR_DECLARE_EXPLICIT_TYPE_ID({op_name})
"""
OP_DECLARE_TEMPLATE = """
class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{
public:
......@@ -98,6 +104,8 @@ CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_g
#include "paddle/phi/api/lib/utils/allocator.h"
{input}
{define_type_id}
"""
OP_N_ATTRIBUTE_DEFINED_TEMPLATE = """
......@@ -238,6 +246,10 @@ void {op_name}::InferShape( phi::InferMetaContext *infer_meta ) {{
}}
"""
DEFINE_OP_TYPE_ID = """
IR_DEFINE_EXPLICIT_TYPE_ID({op_name})
"""
def to_phi_and_fluid_op_name(op_item):
# Templat: - op : phi_name (fluid_name)
......@@ -1701,6 +1713,11 @@ def OpGenerator(
op_list_str = GET_OP_LIST_TEMPALTE.format(
", ".join(ops_name_with_namespace_list)
) # Add GET_OP_LIST
declare_type_id_str = ""
for op in ops_name_with_namespace_list:
declare_type_id_str += DECLARE_OP_TYPE_ID.format(op_name=op)
head_file_str = ""
head_file_str += "".join(ops_declare_list) # Add op class
for name in reversed(namespaces):
......@@ -1708,7 +1725,9 @@ def OpGenerator(
namespace=name, input=head_file_str
) # Add namespaces
head_file_str = H_FILE_TEMPLATE.format(
op_declare=op_list_str, input=head_file_str
op_declare=op_list_str,
input=head_file_str,
declare_type_id=declare_type_id_str,
) # Add head
# (5) Generate source file str
......@@ -1717,8 +1736,15 @@ def OpGenerator(
source_file_str = NAMESPACE_GARD_TEMPLATE.format(
namespace=name, input=source_file_str
) # Add namespaces
define_type_id_str = ""
for op in ops_name_with_namespace_list:
define_type_id_str += DEFINE_OP_TYPE_ID.format(op_name=op)
source_file_str = CC_FILE_TEMPLATE.format(
h_file=op_def_h_file[:-4], input=source_file_str
h_file=op_def_h_file[:-4],
input=source_file_str,
define_type_id=define_type_id_str,
) # Add head
# (5) Generate pd_op.h.tmp, pd_op.cc.tmp
......
......@@ -46,3 +46,9 @@ phi::Scalar ScalarAttribute::data() {
} // namespace dialect
} // namespace paddle
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::IntArrayAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ScalarAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::DataTypeAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::PlaceAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::DataLayoutAttribute)
......@@ -94,3 +94,9 @@ class DataLayoutAttribute : public ir::Attribute {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::IntArrayAttribute)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ScalarAttribute)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::DataTypeAttribute)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::PlaceAttribute)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::DataLayoutAttribute)
......@@ -13,11 +13,11 @@
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
// NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in
// paddle/fluid/ir/dialect/CMakeLists.txt.
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/ir/dialect/pd_op.h"
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/fluid/ir/dialect/pd_type_storage.h"
......@@ -145,3 +145,6 @@ void PaddleDialect::PrintAttribute(ir::Attribute attr, std::ostream &os) const {
} // namespace dialect
} // namespace paddle
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::PaddleDialect)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ParameterConvertInterface)
......@@ -48,3 +48,6 @@ class PaddleDialect : public ir::Dialect {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::PaddleDialect)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ParameterConvertInterface)
......@@ -30,3 +30,5 @@ const size_t& DenseTensorType::offset() const { return storage()->offset_; }
} // namespace dialect
} // namespace paddle
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::DenseTensorType)
......@@ -41,3 +41,5 @@ class DenseTensorType : public ir::Type {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::DenseTensorType)
......@@ -4,4 +4,4 @@ file(GLOB PD_INTERFACE_SRCS "*.cc")
cc_library(
pd_interface
SRCS ${PD_INTERFACE_SRCS}
DEPS new_ir framework_proto dense_tensor phi_utils)
DEPS ir framework_proto phi_utils)
......@@ -48,3 +48,5 @@ class InferShapeInterface : public ir::OpInterfaceBase<InferShapeInterface> {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::InferShapeInterface)
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/interface/infershape.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::InferShapeInterface)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::OpYamlInfoInterface)
......@@ -50,3 +50,5 @@ class OpYamlInfoInterface : public ir::OpInterfaceBase<OpYamlInfoInterface> {
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::OpYamlInfoInterface)
......@@ -4,4 +4,4 @@ file(GLOB PD_PASS_SRCS "*.cc")
cc_library(
pd_op_to_kernel_pass
SRCS ${PD_PASS_SRCS}
DEPS new_ir phi_utils)
DEPS ir phi_utils)
......@@ -4,4 +4,4 @@ file(GLOB PHI_KERNEL_ADAPTOR_SRCS "*.cc")
cc_library(
phi_kernel_adaptor
SRCS ${PHI_KERNEL_ADAPTOR_SRCS}
DEPS new_ir phi_utils)
DEPS ir phi_utils)
......@@ -20,4 +20,4 @@ file(GLOB PD_PROGRAM_TRANSLATOR_SRCS "*.cc")
cc_library(
program_translator
SRCS ${PD_PROGRAM_TRANSLATOR_SRCS} ${op_compat_source_file}
DEPS proto_desc pd_dialect new_ir framework_proto)
DEPS proto_desc pd_dialect ir framework_proto)
......@@ -20,6 +20,8 @@
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
......
......@@ -31,6 +31,7 @@
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/paddle2cinn/build_cinn_pass.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_compiler.h"
#include "paddle/fluid/framework/paddle2cinn/transform_type.h"
......@@ -41,6 +42,8 @@
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/printf.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/utils/string/string_helper.h"
......
......@@ -22,7 +22,6 @@
#include <vector>
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/core/ddim.h"
......@@ -41,6 +40,7 @@ namespace framework {
class ProgramDesc;
class Scope;
class VarDesc;
class InterpreterCore;
namespace ir {
class Graph;
......
......@@ -22,12 +22,15 @@
#include "cinn/common/target.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_compiler.h"
#include "paddle/fluid/operators/cinn/cinn_launch_context.h"
#include "paddle/fluid/operators/cinn/cinn_op_helper.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
#include "paddle/phi/core/flags.h"
PHI_DECLARE_bool(enable_pe_launch_cinn);
......
......@@ -104,7 +104,7 @@ endif()
cc_library(
init
SRCS init.cc
DEPS device_context phi memcpy pd_dialect new_ir)
DEPS device_context phi memcpy pd_dialect ir)
# memcpy depends on device_context, here add deps individually for
# avoiding cycle dependencies
......
......@@ -40,8 +40,8 @@ set(PYBIND_DEPS
phi
phi_kernel_adaptor
pd_dialect
new_ir
program_translator
ir
new_profiler
jit_layer
jit_property
......@@ -339,7 +339,15 @@ if(WITH_PYTHON)
OUTPUT ${op_impl_path}/phi.dll
COMMAND ${CMAKE_COMMAND} -E copy ${PHI_LIB} ${op_impl_path}
DEPENDS phi)
list(APPEND EAGER_OP_IMPL_DEPS ${op_impl_path}/libiomp5md.dll)
list(APPEND EAGER_OP_IMPL_DEPS ${op_impl_path}/phi.dll)
endif()
if(WITH_SHARED_IR)
add_custom_command(
OUTPUT ${op_impl_path}/ir.dll
COMMAND ${CMAKE_COMMAND} -E copy ${IR_LIB} ${op_impl_path}
DEPENDS ir)
list(APPEND EAGER_OP_IMPL_DEPS ${op_impl_path}/ir.dll)
endif()
if(${CBLAS_PROVIDER} STREQUAL MKLML)
......
......@@ -46,6 +46,7 @@ limitations under the License. */
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/common/pstring.h"
#include "paddle/phi/core/string_tensor.h"
#include "paddle/phi/kernels/strings/unicode.h"
......
if(NOT WITH_IR)
return()
set_property(GLOBAL PROPERTY IR_TARGETS "")
set_property(GLOBAL PROPERTY IR_MODULES "")
function(ir_library TARGET_NAME)
set(options STATIC static SHARED shared INTERFACE interface)
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS)
cmake_parse_arguments(ir_library "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN})
set(OBJ_LIB ir_${TARGET_NAME})
add_library(${OBJ_LIB} OBJECT ${ir_library_SRCS})
if(ir_library_SHARED OR ir_library_shared) # build *.so
cc_library(
${TARGET_NAME} SHARED
SRCS $<TARGET_OBJECTS:${OBJ_LIB}>
DEPS ${ir_library_DEPS})
elseif(ir_library_INTERFACE OR ir_library_interface)
cc_library(
${TARGET_NAME} INTERFACE
SRCS $<TARGET_OBJECTS:${OBJ_LIB}>
DEPS ${ir_library_DEPS})
else()
cc_library(
${TARGET_NAME}
SRCS $<TARGET_OBJECTS:${OBJ_LIB}>
DEPS ${ir_library_DEPS})
set_property(GLOBAL APPEND PROPERTY IR_MODULES $<TARGET_OBJECTS:${OBJ_LIB}>)
get_property(ir_targets GLOBAL PROPERTY IR_TARGETS)
set(ir_targets ${ir_targets} ${TARGET_NAME})
set_property(GLOBAL PROPERTY IR_TARGETS "${ir_targets}")
endif()
endfunction()
if(WITH_SHARED_IR)
add_definitions(-DIR_DLL_EXPORT)
endif()
add_subdirectory(core)
add_subdirectory(pass)
add_subdirectory(pattern_rewrite)
if(WIN32)
if(WITH_SHARED_IR)
set(IR_NAME
ir.dll
CACHE INTERNAL "" FORCE)
else()
set(IR_NAME
ir.lib
CACHE INTERNAL "" FORCE)
endif()
elseif(APPLE)
if(WITH_SHARED_IR)
set(IR_NAME
libir.dylib
CACHE INTERNAL "" FORCE)
else()
set(IR_NAME
libir.a
CACHE INTERNAL "" FORCE)
endif()
else()
if(WITH_SHARED_IR)
set(IR_NAME
libir.so
CACHE INTERNAL "" FORCE)
else()
set(IR_NAME
libir.a
CACHE INTERNAL "" FORCE)
endif()
endif()
set(IR_LIB
"${CMAKE_CURRENT_BINARY_DIR}/${IR_NAME}"
CACHE FILEPATH "IR Library" FORCE)
get_property(ir_modules GLOBAL PROPERTY IR_MODULES)
if(WITH_SHARED_IR)
add_library(ir SHARED ${ir_modules})
else()
add_library(ir STATIC ${ir_modules})
endif()
......@@ -3,4 +3,4 @@ set(NEWIR_BINARY_DIR "${PADDLE_BINARY_DIR}/paddle/ir")
file(GLOB IR_SRCS "*.cc")
cc_library(new_ir SRCS ${IR_SRCS})
ir_library(ir_core SRCS ${IR_SRCS})
......@@ -22,11 +22,11 @@ namespace ir {
/// \brief Unified interface of the Attribute class. Derivation of all Attribute
/// classes only derives interfaces, not members.
///
class Attribute {
class IR_API Attribute {
public:
using Storage = AttributeStorage;
constexpr Attribute() = default;
Attribute() = default;
Attribute(const Storage *storage) // NOLINT
: storage_(storage) {}
......@@ -85,7 +85,7 @@ class Attribute {
const Storage *storage_{nullptr};
};
std::ostream &operator<<(std::ostream &os, Attribute attr);
IR_API std::ostream &operator<<(std::ostream &os, Attribute attr);
} // namespace ir
namespace std {
......
......@@ -25,7 +25,7 @@ class Dialect;
/// \brief Abstract the properties and behaviors common to all Attribute classes
/// into an AbstractAttribute class.
///
class AbstractAttribute {
class IR_API AbstractAttribute {
public:
///
/// \brief Construct an AbstractAttribute by TypeId directly.
......@@ -98,7 +98,7 @@ struct AttributeManager;
/// directly but parametric attribute should be constructed by Derived
/// AttributeStorage.
///
class AttributeStorage : public StorageManager::StorageBase {
class IR_API AttributeStorage : public StorageManager::StorageBase {
friend StorageManager;
friend AttributeManager;
......@@ -141,7 +141,7 @@ class AttributeStorage : public StorageManager::StorageBase {
/// \brief AttributeManager is a utility class that provides interfaces for get
/// or unique Attribute instances in IrContext.
///
struct AttributeManager {
struct IR_API AttributeManager {
///
/// \brief Get a unique instance of Attribute T from IrContext. Note: For a
/// parametric attribute, if not found in IrContext, it will try to create a
......
......@@ -17,12 +17,13 @@
#include <cstddef>
#include <list>
#include "paddle/ir/core/dll_decl.h"
#include "paddle/ir/core/region.h"
namespace ir {
class Operation;
class Block {
class IR_API Block {
using OpListType = std::list<Operation *>;
public:
......
......@@ -73,13 +73,13 @@ class Builder {
Block *block() const { return block_; }
/// Creates an operation given the fields represented as an OperationState.
Operation *Build(OperationArgument &&argument);
IR_API Operation *Build(OperationArgument &&argument);
/// Creates an operation with the given fields.
Operation *Build(const std::vector<ir::OpResult> &inputs,
const AttributeMap &attribute,
const std::vector<ir::Type> &output_types,
ir::OpInfo op_info);
IR_API Operation *Build(const std::vector<ir::OpResult> &inputs,
const AttributeMap &attribute,
const std::vector<ir::Type> &output_types,
ir::OpInfo op_info);
/// Create an operation of specific op type at the current insertion point.
template <typename OpTy, typename... Args>
......
......@@ -36,3 +36,12 @@ std::vector<Attribute> ArrayAttribute::data() const {
void* PointerAttribute::data() const { return storage()->GetAsKey(); }
} // namespace ir
IR_DEFINE_EXPLICIT_TYPE_ID(ir::StrAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::BoolAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::FloatAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::DoubleAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int32Attribute)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int64Attribute)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::ArrayAttribute)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::PointerAttribute)
......@@ -19,7 +19,7 @@
#include "paddle/ir/core/utils.h"
namespace ir {
class StrAttribute : public Attribute {
class IR_API StrAttribute : public Attribute {
public:
using Attribute::Attribute;
......@@ -34,7 +34,7 @@ class StrAttribute : public Attribute {
uint32_t size() const;
};
class BoolAttribute : public Attribute {
class IR_API BoolAttribute : public Attribute {
public:
using Attribute::Attribute;
......@@ -43,7 +43,7 @@ class BoolAttribute : public Attribute {
bool data() const;
};
class FloatAttribute : public Attribute {
class IR_API FloatAttribute : public Attribute {
public:
using Attribute::Attribute;
......@@ -52,7 +52,7 @@ class FloatAttribute : public Attribute {
float data() const;
};
class DoubleAttribute : public Attribute {
class IR_API DoubleAttribute : public Attribute {
public:
using Attribute::Attribute;
......@@ -61,7 +61,7 @@ class DoubleAttribute : public Attribute {
double data() const;
};
class Int32Attribute : public Attribute {
class IR_API Int32Attribute : public Attribute {
public:
using Attribute::Attribute;
......@@ -70,7 +70,7 @@ class Int32Attribute : public Attribute {
int32_t data() const;
};
class Int64Attribute : public Attribute {
class IR_API Int64Attribute : public Attribute {
public:
using Attribute::Attribute;
......@@ -79,7 +79,7 @@ class Int64Attribute : public Attribute {
int64_t data() const;
};
class ArrayAttribute : public Attribute {
class IR_API ArrayAttribute : public Attribute {
public:
using Attribute::Attribute;
......@@ -94,7 +94,7 @@ class ArrayAttribute : public Attribute {
Attribute operator[](size_t index) const { return data()[index]; }
};
class PointerAttribute : public Attribute {
class IR_API PointerAttribute : public Attribute {
public:
using Attribute::Attribute;
......@@ -104,3 +104,12 @@ class PointerAttribute : public Attribute {
};
} // namespace ir
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::StrAttribute)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::BoolAttribute)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::FloatAttribute)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::DoubleAttribute)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int32Attribute)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int64Attribute)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::ArrayAttribute)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::PointerAttribute)
......@@ -54,3 +54,5 @@ void BuiltinDialect::initialize() {
}
} // namespace ir
IR_DEFINE_EXPLICIT_TYPE_ID(ir::BuiltinDialect)
......@@ -22,7 +22,7 @@ namespace ir {
/// all built-in types defined in builtin_type.h will be registered in this
/// Dialect.
///
class BuiltinDialect : public ir::Dialect {
class IR_API BuiltinDialect : public ir::Dialect {
public:
explicit BuiltinDialect(ir::IrContext *context);
///
......@@ -38,3 +38,5 @@ class BuiltinDialect : public ir::Dialect {
};
} // namespace ir
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::BuiltinDialect)
......@@ -204,3 +204,11 @@ void ConstantOp::Verify(const std::vector<ir::OpResult> &inputs,
Attribute ConstantOp::value() { return operation()->attributes().at("value"); }
} // namespace ir
IR_DEFINE_EXPLICIT_TYPE_ID(ir::ModuleOp)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::GetParameterOp)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::SetParameterOp)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::CombineOp)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::SliceOp)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::ConstantLikeTrait)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::ConstantOp)
......@@ -24,7 +24,7 @@ class Block;
///
/// \brief ModuleOp
///
class ModuleOp : public ir::Op<ModuleOp> {
class IR_API ModuleOp : public ir::Op<ModuleOp> {
public:
using Op::Op;
static const char *name() { return "builtin.module"; }
......@@ -48,7 +48,7 @@ class ModuleOp : public ir::Op<ModuleOp> {
/// \brief GetParameterOp: OpResult = GetParameterOp({StrAttribute,
/// StrAttribute})
///
class GetParameterOp : public ir::Op<GetParameterOp> {
class IR_API GetParameterOp : public ir::Op<GetParameterOp> {
public:
using Op::Op;
static const char *name() { return "builtin.get_parameter"; }
......@@ -63,7 +63,7 @@ class GetParameterOp : public ir::Op<GetParameterOp> {
/// \brief SetParameterOp: SetParameterOp(OpOperand, {StrAttribute,
/// StrAttribute})
///
class SetParameterOp : public ir::Op<SetParameterOp> {
class IR_API SetParameterOp : public ir::Op<SetParameterOp> {
public:
using Op::Op;
static const char *name() { return "builtin.set_parameter"; }
......@@ -77,7 +77,7 @@ class SetParameterOp : public ir::Op<SetParameterOp> {
///
/// \brief CombineOp: CombineOp(OpOperand)
///
class CombineOp : public ir::Op<CombineOp> {
class IR_API CombineOp : public ir::Op<CombineOp> {
public:
using Op::Op;
......@@ -95,7 +95,7 @@ class CombineOp : public ir::Op<CombineOp> {
///
/// \brief SliceOp: SliceOp(OpOperand)
///
class SliceOp : public ir::Op<SliceOp> {
class IR_API SliceOp : public ir::Op<SliceOp> {
public:
using Op::Op;
......@@ -109,7 +109,7 @@ class SliceOp : public ir::Op<SliceOp> {
const ir::AttributeMap &attributes);
};
class ConstantLikeTrait : public OpTraitBase<ConstantLikeTrait> {
class IR_API ConstantLikeTrait : public OpTraitBase<ConstantLikeTrait> {
public:
explicit ConstantLikeTrait(Operation *op)
: OpTraitBase<ConstantLikeTrait>(op) {}
......@@ -118,7 +118,7 @@ class ConstantLikeTrait : public OpTraitBase<ConstantLikeTrait> {
///
/// \brief ConstantOp
///
class ConstantOp : public Op<ConstantOp, ConstantLikeTrait> {
class IR_API ConstantOp : public Op<ConstantOp, ConstantLikeTrait> {
public:
using Op::Op;
static const char *name() { return "builtin.constant"; }
......@@ -139,3 +139,11 @@ class ConstantOp : public Op<ConstantOp, ConstantLikeTrait> {
};
} // namespace ir
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::ModuleOp)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::GetParameterOp)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::SetParameterOp)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::CombineOp)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::SliceOp)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::ConstantLikeTrait)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::ConstantOp)
......@@ -18,3 +18,14 @@ namespace ir {
std::vector<Type> VectorType::data() const { return storage()->GetAsKey(); }
} // namespace ir
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int8Type)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::VectorType)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::BFloat16Type)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Float16Type)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Float32Type)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Float64Type)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int16Type)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int32Type)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int64Type)
IR_DEFINE_EXPLICIT_TYPE_ID(ir::BoolType)
......@@ -38,14 +38,14 @@ namespace ir {
// NOTE(dev): Currently Int8 are not considered as a cached member
// in IrContextImpl because it is not widely used.
class Int8Type : public Type {
class IR_API Int8Type : public Type {
public:
using Type::Type;
DECLARE_TYPE_UTILITY_FUNCTOR(Int8Type, TypeStorage);
};
class VectorType : public Type {
class IR_API VectorType : public Type {
public:
using Type::Type;
......@@ -60,25 +60,25 @@ class VectorType : public Type {
Type operator[](size_t index) const { return data()[index]; }
};
#define DECLARE_BUILTIN_TYPE(__name) \
class __name##Type : public Type { \
public: \
using Type::Type; \
\
DECLARE_TYPE_UTILITY_FUNCTOR(__name##Type, TypeStorage); \
\
static __name##Type get(IrContext *context); \
#define DECLARE_BUILTIN_TYPE(__name) \
class IR_API __name : public Type { \
public: \
using Type::Type; \
\
DECLARE_TYPE_UTILITY_FUNCTOR(__name, TypeStorage); \
\
static __name get(IrContext *context); \
};
#define FOREACH_BUILTIN_TYPE(__macro) \
__macro(BFloat16); \
__macro(Float16); \
__macro(Float32); \
__macro(Float64); \
__macro(Int16); \
__macro(Int32); \
__macro(Int64); \
__macro(Bool);
__macro(BFloat16Type); \
__macro(Float16Type); \
__macro(Float32Type); \
__macro(Float64Type); \
__macro(Int16Type); \
__macro(Int32Type); \
__macro(Int64Type); \
__macro(BoolType);
FOREACH_BUILTIN_TYPE(DECLARE_BUILTIN_TYPE)
......@@ -86,3 +86,14 @@ FOREACH_BUILTIN_TYPE(DECLARE_BUILTIN_TYPE)
#undef DECLARE_BUILTIN_TYPE
} // namespace ir
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int8Type)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::VectorType)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::BFloat16Type)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Float16Type)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Float32Type)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Float64Type)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int16Type)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int32Type)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int64Type)
IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::BoolType)
......@@ -38,7 +38,7 @@ class DialectInterface;
/// compilers only need to combine existing dialects and add their own
/// extensions or customizations.
///
class Dialect {
class IR_API Dialect {
public:
Dialect(std::string name, IrContext *context, TypeId id);
......
......@@ -34,7 +34,7 @@ class DialectInterfaceBase : public BaseT {
explicit DialectInterfaceBase(Dialect *dialect) : BaseT(dialect, id()) {}
};
class DialectInterface {
class IR_API DialectInterface {
public:
virtual ~DialectInterface();
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#if defined(_WIN32)
#ifndef IR_API
#ifdef IR_DLL_EXPORT
#define IR_API __declspec(dllexport)
#else
#define IR_API __declspec(dllimport)
#endif // IR_DLL_EXPORT
#endif // IR_API
#else
#define IR_API
#endif // _WIN32
......@@ -182,6 +182,8 @@ IrContext *IrContext::Instance() {
return &context;
}
IrContext::~IrContext() { delete impl_; }
IrContext::IrContext() : impl_(new IrContextImpl()) {
VLOG(4) << "BuiltinDialect registered into IrContext. ===>";
GetOrRegisterDialect<BuiltinDialect>();
......
......@@ -18,6 +18,8 @@
#include <unordered_map>
#include <vector>
#include "paddle/ir/core/dll_decl.h"
namespace ir {
class IrContextImpl;
class StorageManager;
......@@ -37,7 +39,7 @@ using OpInfoMap = std::unordered_map<std::string, OpInfo>;
/// \brief IrContext is a global parameterless class used to store and manage
/// Type, Attribute and other related data structures.
///
class IrContext {
class IR_API IrContext {
public:
///
/// \brief Initializes a new instance of IrContext.
......@@ -186,7 +188,8 @@ class IrContext {
private:
IrContext();
const std::unique_ptr<IrContextImpl> impl_;
~IrContext();
IrContextImpl *impl_;
};
} // namespace ir
......@@ -40,7 +40,7 @@ class BasicIrPrinter {
std::ostream& os;
};
class IrPrinter : public BasicIrPrinter {
class IR_API IrPrinter : public BasicIrPrinter {
public:
explicit IrPrinter(std::ostream& os) : BasicIrPrinter(os) {}
......
......@@ -20,7 +20,7 @@
namespace ir {
class InterfaceValue {
class IR_API InterfaceValue {
public:
template <typename ConcreteOp, typename T>
static InterfaceValue get() {
......@@ -64,7 +64,7 @@ class InterfaceValue {
void *model_{nullptr};
};
class OpBase {
class IR_API OpBase {
public:
explicit OpBase(Operation *operation = nullptr) : operation_(operation) {}
......
......@@ -26,9 +26,9 @@ class Type;
class Attribute;
class Dialect;
class OpInfo {
class IR_API OpInfo {
public:
constexpr OpInfo() = default;
OpInfo() = default;
OpInfo(const OpInfo &other) = default;
......
......@@ -26,7 +26,7 @@ class Program;
class OpOperand;
class OpResult;
class alignas(8) Operation final {
class IR_API alignas(8) Operation final {
public:
///
/// \brief Malloc memory and construct objects in the following order:
......
......@@ -20,7 +20,7 @@ namespace ir {
///
/// \brief Parameter represents the weight in the calculation graph.
///
class Parameter {
class IR_API Parameter {
public:
Parameter(void* data, size_t size, ir::Type type) {
data_ = malloc(size);
......
......@@ -36,7 +36,7 @@ class IrContext;
/// concepts such as basic blocks, closures, and functions will be introduced to
/// continuously improve Program's ability to represent computational graphs.
///
class Program {
class IR_API Program {
public:
using ParameterMap =
std::unordered_map<std::string, std::unique_ptr<Parameter>>;
......
......@@ -17,12 +17,14 @@
#include <cstddef>
#include <list>
#include "paddle/ir/core/dll_decl.h"
namespace ir {
class Block;
class Operation;
class Region {
class IR_API Region {
public:
using iterator = std::list<Block *>::iterator;
using reverse_iterator = std::list<Block *>::reverse_iterator;
......
......@@ -40,7 +40,7 @@ struct ParametricStorageManager;
/// provide method 'bool operator==(const ParamKey &) const', used to compare
/// Storage instance and ParamKey instance.
///
class StorageManager {
class IR_API StorageManager {
public:
///
/// \brief This class is the base class of all storage classes,
......
......@@ -26,11 +26,11 @@ namespace ir {
/// Float32Type, etc. are all derived classes of Type, but no new member
/// variables will be added.
///
class Type {
class IR_API Type {
public:
using Storage = TypeStorage;
constexpr Type() = default;
Type() = default;
Type(const Storage *storage) // NOLINT
: storage_(const_cast<Storage *>(storage)) {}
......@@ -89,7 +89,7 @@ class Type {
const Storage *storage_{nullptr};
};
std::ostream &operator<<(std::ostream &os, Type type);
IR_API std::ostream &operator<<(std::ostream &os, Type type);
} // namespace ir
......
......@@ -28,7 +28,7 @@ class Dialect;
/// of all types is TypeId (and possibly others). Therefore, construct a class
/// with TypeId as its member.
///
class AbstractType {
class IR_API AbstractType {
public:
///
/// \brief Construct an AbstractType by TypeId directly.
......@@ -100,7 +100,7 @@ struct TypeManager;
/// be included. So that, non-parametric type can be constructed by TypeStorage
/// directly but parametric type should be constructed by Derived TypeStorage.
///
class TypeStorage : public StorageManager::StorageBase {
class IR_API TypeStorage : public StorageManager::StorageBase {
friend StorageManager;
friend TypeManager;
......@@ -141,7 +141,7 @@ class TypeStorage : public StorageManager::StorageBase {
/// \brief TypeManager is a utility class that provides interfaces for get or
/// unique Type instances in IrContext.
///
struct TypeManager {
struct IR_API TypeManager {
///
/// \brief Get a unique instance of Type T from IrContext. Note: For a
/// parametric_type, if not found in IrContext, it will try to create a new
......
......@@ -17,6 +17,8 @@
#include <glog/logging.h>
#include <functional>
#include "paddle/ir/core/dll_decl.h"
namespace ir {
///
......@@ -40,10 +42,7 @@ class TypeId {
/// \return The unique TypeId of Type T.
///
template <typename T>
static TypeId get() {
static Storage instance;
return TypeId(&instance);
}
static TypeId get();
TypeId() = default;
......@@ -85,6 +84,60 @@ class TypeId {
Storage *storage_{nullptr};
};
namespace detail {
class alignas(8) UniqueingId {
public:
UniqueingId() = default;
UniqueingId(const UniqueingId &) = delete;
UniqueingId &operator=(const UniqueingId &) = delete;
UniqueingId(UniqueingId &&) = delete;
UniqueingId &operator=(UniqueingId &&) = delete;
operator TypeId() { return id(); }
TypeId id() { return TypeId::RecoverFromOpaquePointer(this); }
};
template <typename T>
class TypeIdResolver;
} // namespace detail
template <typename T>
TypeId TypeId::get() {
return detail::TypeIdResolver<T>::Resolve();
}
#define IR_DECLARE_EXPLICIT_TYPE_ID(TYPE_CLASS) \
namespace ir { \
namespace detail { \
template <> \
class TypeIdResolver<TYPE_CLASS> { \
public: \
static TypeId Resolve() { return id_; } \
static UniqueingId id_; \
}; \
} \
} // namespace ir
#define IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(TYPE_CLASS) \
namespace ir { \
namespace detail { \
template <> \
class IR_API TypeIdResolver<TYPE_CLASS> { \
public: \
static TypeId Resolve() { return id_; } \
static UniqueingId id_; \
}; \
} \
} // namespace ir
#define IR_DEFINE_EXPLICIT_TYPE_ID(TYPE_CLASS) \
namespace ir { \
namespace detail { \
UniqueingId TypeIdResolver<TYPE_CLASS>::id_ = {}; \
} \
} // namespace ir
} // namespace ir
namespace std {
......
......@@ -20,11 +20,13 @@
#include <tuple>
#include <type_traits>
#include "paddle/ir/core/dll_decl.h"
namespace ir {
///
/// \brief Equivalent to boost::hash_combine.
///
std::size_t hash_combine(std::size_t lhs, std::size_t rhs);
IR_API std::size_t hash_combine(std::size_t lhs, std::size_t rhs);
///
/// \brief Aligned malloc and free functions.
......
......@@ -31,7 +31,7 @@ class OpResultImpl;
/// \brief OpOperand class represents the operand of operation. This class only
/// provides interfaces, for specific implementation, see Impl class.
///
class OpOperand {
class IR_API OpOperand {
public:
OpOperand() = default;
......@@ -103,7 +103,7 @@ class ValueUseIterator {
/// \brief Value class represents the SSA value in the IR system. This class
/// only provides interfaces, for specific implementation, see Impl class.
///
class Value {
class IR_API Value {
public:
Value() = default;
......@@ -161,7 +161,7 @@ class Value {
/// This class only provides interfaces, for specific implementation, see Impl
/// class.
///
class OpResult : public Value {
class IR_API OpResult : public Value {
public:
using Value::Value;
......
file(GLOB NEW_PASS_SRCS "*.cc")
cc_library(
new_pass
SRCS ${NEW_PASS_SRCS}
DEPS new_ir)
ir_library(ir_pass SRCS ${NEW_PASS_SRCS} DEPS ir_core)
......@@ -74,6 +74,8 @@ class PreservedAnalyses {
preserved_ids_.erase(TypeId::get<AnalysisT>());
}
friend ir::detail::TypeIdResolver<AllAnalysesType>;
private:
template <typename>
friend struct AnalysisModel;
......@@ -304,3 +306,5 @@ class AnalysisManagerHolder {
};
} // namespace ir
IR_DECLARE_EXPLICIT_TYPE_ID(ir::detail::PreservedAnalyses::AllAnalysesType)
......@@ -227,3 +227,5 @@ void PassInstrumentor::AddInstrumentation(
}
} // namespace ir
IR_DEFINE_EXPLICIT_TYPE_ID(ir::detail::PreservedAnalyses::AllAnalysesType)
......@@ -18,8 +18,8 @@
#include <string>
#include <vector>
#include "paddle/ir/core/enforce.h"
#include "paddle/ir/pass/analysis_manager.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/utils/optional.h"
namespace ir {
......@@ -68,7 +68,7 @@ struct PassInfo {
} // namespace detail
/// We can access pass only from PassManager.
class Pass {
class IR_API Pass {
public:
explicit Pass(const std::string& name,
uint8_t opt_level,
......@@ -91,9 +91,8 @@ class Pass {
AnalysisManager analysis_manager() { return pass_state().am; }
detail::PassExecutionState& pass_state() {
PADDLE_ENFORCE_EQ(pass_state_.is_initialized(),
true,
phi::errors::Fatal("pass state was never initialized"));
IR_ENFORCE(pass_state_.is_initialized() == true,
"pass state was never initialized");
return *pass_state_;
}
......
......@@ -57,7 +57,7 @@ class PassInstrumentation {
/// This class holds a collection of PassInstrumentation obejcts, and invokes
/// their respective callbacks.
class PassInstrumentor {
class IR_API PassInstrumentor {
public:
PassInstrumentor();
~PassInstrumentor();
......
......@@ -34,7 +34,7 @@ namespace detail {
class PassAdaptor;
}
class PassManager {
class IR_API PassManager {
public:
explicit PassManager(IrContext *context, uint8_t opt_level = 2);
......
file(GLOB PATTERN_SRCS "*.cc")
cc_library(
pattern_rewrite
SRCS ${PATTERN_SRCS}
DEPS new_ir)
ir_library(ir_pattern_rewrite SRCS ${PATTERN_SRCS} DEPS ir_core)
......@@ -57,7 +57,7 @@ class PatternBenefit {
// This class contains all of the data related to a Pattern, but not contains
// any methods for the matching. This class is used to interface with the
// metadata of a pattern, such as benefit or root operation.
class Pattern {
class IR_API Pattern {
enum class RootKind {
// The pattern root matches "any" operation.
Any,
......@@ -155,7 +155,7 @@ class Pattern {
class PatternRewriter;
class RewritePattern : public Pattern {
class IR_API RewritePattern : public Pattern {
public:
virtual ~RewritePattern();
......
......@@ -406,18 +406,6 @@
data_type : dtype
backend : place
- op : full_int_array
args : (IntArray value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output: Tensor(out)
infer_meta :
func : CreateIntArrayInferMeta
param : [value, dtype]
kernel :
func : full_int_array
param : [value, dtype]
data_type : dtype
backend : place
- op : full_like
args : (Tensor x, Scalar value, DataType dtype = DataType::UNDEFINED, Place place = {})
output: Tensor(out)
......
......@@ -919,6 +919,18 @@
func : frame
backward : frame_grad
- op : full_int_array
args : (IntArray value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output: Tensor(out)
infer_meta :
func : CreateIntArrayInferMeta
param : [value, dtype]
kernel :
func : full_int_array
param : [value, dtype]
data_type : dtype
backend : place
- op : gather_nd
args : (Tensor x, Tensor index)
output : Tensor
......
......@@ -695,7 +695,7 @@ set PATH=%THIRD_PARTY_PATH:/=\%\install\openblas\lib;%THIRD_PARTY_PATH:/=\%\inst
%THIRD_PARTY_PATH:/=\%\install\zlib\bin;%THIRD_PARTY_PATH:/=\%\install\mklml\lib;^
%THIRD_PARTY_PATH:/=\%\install\mkldnn\bin;%THIRD_PARTY_PATH:/=\%\install\warpctc\bin;^
%THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib;%THIRD_PARTY_PATH:/=\%\install\paddle2onnx\lib;^
%work_dir%\%BUILD_DIR%\paddle\fluid\inference;%work_dir%\%BUILD_DIR%\paddle\fluid\inference\capi_exp;^
%work_dir%\%BUILD_DIR%\paddle\fluid\inference;%work_dir%\%BUILD_DIR%\paddle\fluid\inference\capi_exp;%work_dir%\%BUILD_DIR%\paddle\ir;^
%PATH%
REM TODO: make ut find .dll in install\onnxruntime\lib
......
......@@ -13,6 +13,9 @@ env_dict={
'PHI_LIB':'@PHI_LIB@',
'PHI_NAME':'@PHI_NAME@',
'WITH_SHARED_PHI':'@WITH_SHARED_PHI@',
'IR_LIB':'@IR_LIB@',
'IR_NAME':'@IR_NAME@',
'WITH_SHARED_IR':'@WITH_SHARED_IR@',
'WARPCTC_LIBRARIES':'@WARPCTC_LIBRARIES@',
'WARPRNNT_LIBRARIES':'@WARPRNNT_LIBRARIES@',
'FLASHATTN_LIBRARIES':'@FLASHATTN_LIBRARIES@',
......
......@@ -564,9 +564,13 @@ libs_path='${PADDLE_BINARY_DIR}/python/paddle/libs'
package_data['paddle.libs']= []
if('${WITH_SHARED_PHI}' == 'ON'):
package_data['paddle.libs'] = [('libphi' if os.name != 'nt' else 'phi') + ext_name]
package_data['paddle.libs'] += [('libphi' if os.name != 'nt' else 'phi') + ext_name]
shutil.copy('${PHI_LIB}', libs_path)
if('${WITH_SHARED_IR}' == 'ON'):
package_data['paddle.libs'] += [('libir' if os.name != 'nt' else 'ir') + ext_name]
shutil.copy('${IR_LIB}', libs_path)
package_data['paddle.libs']+=[
('libwarpctc' if os.name != 'nt' else 'warpctc') + ext_name,
('libwarprnnt' if os.name != 'nt' else 'warprnnt') + ext_name,
......@@ -731,11 +735,17 @@ if '${CMAKE_BUILD_TYPE}' == 'Release':
if('${WITH_SHARED_PHI}' == 'ON'):
# change rpath of phi.ext for loading 3rd party libb
commands.append("install_name_tool -add_rpath '@loader_path' ${PADDLE_BINARY_DIR}/python/paddle/libs/${PHI_NAME}")
if('${WITH_SHARED_IR}' == 'ON'):
# change rpath of ir.ext for loading 3rd party libb
commands.append("install_name_tool -add_rpath '@loader_path' ${PADDLE_BINARY_DIR}/python/paddle/libs/${IR_NAME}")
else:
commands = ["patchelf --set-rpath '$ORIGIN/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/fluid/${FLUID_CORE_NAME}" + '.so']
if('${WITH_SHARED_PHI}' == 'ON'):
# change rpath of phi.ext for loading 3rd party lib
commands.append("patchelf --set-rpath '$ORIGIN' ${PADDLE_BINARY_DIR}/python/paddle/libs/${PHI_NAME}")
if('${WITH_SHARED_IR}' == 'ON'):
# change rpath of ir.ext for loading 3rd party lib
commands.append("patchelf --set-rpath '$ORIGIN' ${PADDLE_BINARY_DIR}/python/paddle/libs/${IR_NAME}")
# The sw_64 not suppot patchelf, so we just disable that.
if platform.machine() != 'sw_64' and platform.machine() != 'mips64':
for command in commands:
......
......@@ -966,11 +966,17 @@ def get_package_data_and_package_dir():
package_data['paddle.libs'] = []
if env_dict.get("WITH_SHARED_PHI") == "ON":
package_data['paddle.libs'] = [
package_data['paddle.libs'] += [
('libphi' if os.name != 'nt' else 'phi') + ext_suffix
]
shutil.copy(env_dict.get("PHI_LIB"), libs_path)
if env_dict.get("WITH_SHARED_IR") == "ON":
package_data['paddle.libs'] += [
('libir' if os.name != 'nt' else 'ir') + ext_suffix
]
shutil.copy(env_dict.get("IR_LIB"), libs_path)
package_data['paddle.libs'] += [
('libwarpctc' if os.name != 'nt' else 'warpctc') + ext_suffix,
('libwarprnnt' if os.name != 'nt' else 'warprnnt') + ext_suffix,
......@@ -1216,6 +1222,13 @@ def get_package_data_and_package_dir():
+ '/python/paddle/libs/'
+ env_dict.get("PHI_NAME")
)
if env_dict.get("WITH_SHARED_IR") == "ON":
commands.append(
"install_name_tool -add_rpath '@loader_path' "
+ env_dict.get("PADDLE_BINARY_DIR")
+ '/python/paddle/libs/'
+ env_dict.get("IR_NAME")
)
else:
commands = [
"patchelf --set-rpath '$ORIGIN/../libs/' "
......@@ -1231,6 +1244,13 @@ def get_package_data_and_package_dir():
+ '/python/paddle/libs/'
+ env_dict.get("PHI_NAME")
)
if env_dict.get("WITH_SHARED_IR") == "ON":
commands.append(
"patchelf --set-rpath '$ORIGIN' "
+ env_dict.get("PADDLE_BINARY_DIR")
+ '/python/paddle/libs/'
+ env_dict.get("IR_NAME")
)
# The sw_64 not suppot patchelf, so we just disable that.
if platform.machine() != 'sw_64' and platform.machine() != 'mips64':
for command in commands:
......
......@@ -171,6 +171,9 @@ if(${len} GREATER_EQUAL 1)
if(WITH_SHARED_PHI)
target_link_libraries(${test_name} $<TARGET_LINKER_FILE:phi>)
endif()
if(WITH_SHARED_IR)
target_link_libraries(${test_name} $<TARGET_LINKER_FILE:ir>)
endif()
add_dependencies(${test_name} ${paddle_lib} paddle_gtest_main_new)
if(WITH_GPU)
target_link_libraries(${test_name} ${CUDA_CUDART_LIBRARY}
......@@ -182,7 +185,7 @@ if(${len} GREATER_EQUAL 1)
if(APPLE)
target_link_libraries(
${test_name}
"-Wl,-rpath,$<TARGET_FILE_DIR:${paddle_lib}> -Wl,-rpath,$<TARGET_FILE_DIR:phi>"
"-Wl,-rpath,$<TARGET_FILE_DIR:${paddle_lib}> -Wl,-rpath,$<TARGET_FILE_DIR:phi> -Wl,-rpath,$<TARGET_FILE_DIR:ir>"
)
endif()
if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
......
......@@ -27,6 +27,7 @@ limitations under the License. */
#include "cinn/hlir/framework/tensor.h"
#include "cinn/runtime/cinn_runtime.h"
#include "gtest/gtest.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/paddle2cinn/build_cinn_pass.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_compiler.h"
......@@ -34,6 +35,8 @@ limitations under the License. */
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/operators/cinn/cinn_op_helper.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/value.h"
#include "paddle/phi/core/ddim.h"
USE_OP_ITSELF(cinn_instruction_run);
......
if(NOT WITH_IR)
return()
endif()
add_subdirectory(core)
add_subdirectory(pass)
add_subdirectory(pattern_rewrite)
......
cc_test_old(type_test SRCS type_test.cc DEPS new_ir gtest)
cc_test_old(ir_attribute_test SRCS ir_attribute_test.cc DEPS new_ir gtest)
cc_test_old(ir_value_test SRCS ir_value_test.cc DEPS new_ir gtest)
cc_test_old(ir_op_test SRCS ir_op_test.cc DEPS new_ir gtest)
cc_test_old(type_test SRCS type_test.cc DEPS ir gtest)
cc_test_old(ir_attribute_test SRCS ir_attribute_test.cc DEPS ir gtest)
cc_test_old(ir_value_test SRCS ir_value_test.cc DEPS ir gtest)
cc_test_old(ir_op_test SRCS ir_op_test.cc DEPS ir gtest)
cc_test_old(
ir_program_test
SRCS
ir_program_test.cc
DEPS
new_ir
pd_dialect
ir
phi
gtest)
......@@ -17,8 +17,8 @@ cc_test_old(
SRCS
ir_phi_kernel_op_test.cc
DEPS
new_ir
pd_dialect
ir
phi
gtest)
......@@ -27,8 +27,8 @@ cc_test_old(
SRCS
ir_infershape_test.cc
DEPS
new_ir
pd_dialect
ir
phi
gtest)
......@@ -37,9 +37,9 @@ cc_test_old(
SRCS
ir_exe_test.cc
DEPS
new_ir
pd_dialect
phi_kernel_adaptor
ir
phi
gtest)
......@@ -49,7 +49,7 @@ cc_test_old(
scalar_attribute_test.cc
DEPS
pd_dialect
new_ir
ir
gtest)
file(
......@@ -70,7 +70,7 @@ cc_test_old(
DEPS
program_translator
gtest
new_ir
pd_dialect)
pd_dialect
ir)
cc_test_old(ir_op_info_test SRCS op_info_test.cc DEPS gtest new_ir)
cc_test_old(ir_op_info_test SRCS op_info_test.cc DEPS gtest ir)
......@@ -22,13 +22,19 @@
#include "paddle/ir/core/dialect.h"
#include "paddle/ir/core/ir_context.h"
class AttributeA {};
IR_DECLARE_EXPLICIT_TYPE_ID(AttributeA)
IR_DEFINE_EXPLICIT_TYPE_ID(AttributeA)
struct FakeDialect : ir::Dialect {
explicit FakeDialect(ir::IrContext *context)
: ir::Dialect(name(), context, ir::TypeId::get<FakeDialect>()) {}
static const char *name() { return "fake"; }
};
IR_DECLARE_EXPLICIT_TYPE_ID(FakeDialect)
IR_DEFINE_EXPLICIT_TYPE_ID(FakeDialect)
TEST(attribute_test, attribute_base) {
class AttributeA {};
struct FakeDialect : ir::Dialect {
explicit FakeDialect(ir::IrContext *context)
: ir::Dialect(name(), context, ir::TypeId::get<FakeDialect>()) {}
static const char *name() { return "fake"; }
};
// Test 1: Test the function of IrContext to register Dialect.
ir::IrContext *ctx = ir::IrContext::Instance();
ir::Dialect *fake_dialect = ctx->GetOrRegisterDialect<FakeDialect>();
......
......@@ -53,6 +53,8 @@ class OperationTest
fn(infer_meta);
}
};
IR_DECLARE_EXPLICIT_TYPE_ID(OperationTest)
IR_DEFINE_EXPLICIT_TYPE_ID(OperationTest)
const char *OperationTest::attributes_name[attributes_num] = {"op2_attr1",
"op2_attr2"};
......@@ -69,6 +71,8 @@ class TestDialect : public ir::Dialect {
private:
void initialize() { RegisterOps<OperationTest>(); }
};
IR_DECLARE_EXPLICIT_TYPE_ID(TestDialect)
IR_DEFINE_EXPLICIT_TYPE_ID(TestDialect)
TEST(infershape_test, infershape_test) {
ir::IrContext *ctx = ir::IrContext::Instance();
......
......@@ -34,6 +34,8 @@ class ReadOnlyTrait : public ir::OpTraitBase<ReadOnlyTrait> {
explicit ReadOnlyTrait(ir::Operation *op)
: ir::OpTraitBase<ReadOnlyTrait>(op) {}
};
IR_DECLARE_EXPLICIT_TYPE_ID(ReadOnlyTrait)
IR_DEFINE_EXPLICIT_TYPE_ID(ReadOnlyTrait)
/// \brief Define built-in Interface, derived from OpInterfaceBase. Concepts and
/// Models need to be defined within the class. Concept defines abstract
......@@ -66,6 +68,8 @@ class InferShapeInterface : public ir::OpInterfaceBase<InferShapeInterface> {
private:
Concept *impl_;
};
IR_DECLARE_EXPLICIT_TYPE_ID(InferShapeInterface)
IR_DEFINE_EXPLICIT_TYPE_ID(InferShapeInterface)
ir::AttributeMap CreateAttributeMap(std::vector<std::string> attribute_names,
std::vector<std::string> attributes) {
......@@ -118,6 +122,9 @@ class Operation1 : public ir::Op<Operation1> {
const char *Operation1::attributes_name[attributes_num] = {"op1_attr1",
"op1_attr2"};
IR_DECLARE_EXPLICIT_TYPE_ID(Operation1)
IR_DEFINE_EXPLICIT_TYPE_ID(Operation1)
// Define op2.
class Operation2
: public ir::Op<Operation2, ReadOnlyTrait, InferShapeInterface> {
......@@ -142,6 +149,8 @@ class Operation2
};
const char *Operation2::attributes_name[attributes_num] = {"op2_attr1",
"op2_attr2"};
IR_DECLARE_EXPLICIT_TYPE_ID(Operation2)
IR_DEFINE_EXPLICIT_TYPE_ID(Operation2)
// Define a dialect, op1 and op2 will be registered by this dialect.
class TestDialect : public ir::Dialect {
......@@ -164,6 +173,8 @@ class TestDialect : public ir::Dialect {
private:
void initialize() { RegisterOps<Operation1, Operation2>(); }
};
IR_DECLARE_EXPLICIT_TYPE_ID(TestDialect)
IR_DEFINE_EXPLICIT_TYPE_ID(TestDialect)
TEST(op_test, op_test) {
// (1) Register Dialect, Operation1, Operation2 into IrContext.
......
......@@ -49,6 +49,8 @@ class AddOp : public ir::Op<AddOp> {
}
}
};
IR_DECLARE_EXPLICIT_TYPE_ID(AddOp)
IR_DEFINE_EXPLICIT_TYPE_ID(AddOp)
TEST(program_test, program) {
// (1) Init environment.
......
......@@ -92,7 +92,8 @@ TEST(value_test, value_test) {
EXPECT_EQ(op3_first_input.next_use(), nullptr);
// Test 3: Value iterator
ir::Value::use_iterator iter = op1->result(0).begin();
using my_iterator = ir::Value::use_iterator;
my_iterator iter = op1->result(0).begin();
EXPECT_EQ(iter.owner(), op4);
++iter;
EXPECT_EQ(iter.owner(), op3);
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册