未验证 提交 126940b3 编写于 作者: W wanghuancoder 提交者: GitHub

[Eager] delete final state pre-name (#45306)

上级 2dca718a
......@@ -66,14 +66,14 @@ paddle/infrt/dialect/pd/common/pd_ops_info.h
paddle/infrt/tests/dialect/Output
paddle/infrt/tests/lit.cfg.py
paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc
paddle/fluid/pybind/eager_final_state_op_function.cc
paddle/fluid/pybind/eager_op_function.cc
# these files (directories) are generated before build system generation
paddle/fluid/operators/generated_op.cc
paddle/phi/ops/compat/generated_sig.cc
paddle/phi/api/yaml/parsed_apis/
python/paddle/utils/code_gen/
paddle/fluid/pybind/tmp_eager_final_state_op_function_impl.h
paddle/fluid/pybind/eager_final_state_op_function_impl.h
paddle/fluid/pybind/tmp_eager_op_function_impl.h
paddle/fluid/pybind/eager_op_function_impl.h
paddle/fluid/pybind/eager_op_function_impl.h
paddle/fluid/pybind/op_function_impl.h
......@@ -41,7 +41,7 @@ if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
grad_tensor_holder
SRCS grad_tensor_holder.cc
DEPS grad_node_info gradient_accumulator)
add_dependencies(grad_tensor_holder eager_final_state_codegen)
add_dependencies(grad_tensor_holder eager_codegen)
cc_library(
backward
SRCS backward.cc
......
......@@ -8,5 +8,5 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER))
final_dygraph_node
SRCS nodes.cc ${eager_manual_nodes}
DEPS ${eager_deps})
add_dependencies(final_dygraph_node eager_final_state_codegen)
add_dependencies(final_dygraph_node eager_codegen)
endif()
......@@ -8,5 +8,5 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER))
final_dygraph_function
SRCS dygraph_functions.cc ${eager_manual_functions}
DEPS ${eager_deps})
add_dependencies(final_dygraph_function eager_final_state_codegen)
add_dependencies(final_dygraph_function eager_codegen)
endif()
......@@ -16,10 +16,10 @@
#include "paddle/phi/api/include/tensor.h"
paddle::experimental::Tensor add_n_final_state_dygraph_function(
paddle::experimental::Tensor add_n_dygraph_function(
const std::vector<paddle::experimental::Tensor>& x);
paddle::experimental::Tensor conv2d_final_state_dygraph_function(
paddle::experimental::Tensor conv2d_dygraph_function(
const paddle::experimental::Tensor& input,
const paddle::experimental::Tensor& filter,
std::vector<int> strides,
......
......@@ -23,7 +23,7 @@
#pragma GCC diagnostic ignored "-Wunused-variable"
DECLARE_bool(check_nan_inf);
paddle::experimental::Tensor add_n_final_state_dygraph_function(
paddle::experimental::Tensor add_n_dygraph_function(
const std::vector<paddle::experimental::Tensor>& x) {
// Dygraph Record Event
paddle::platform::RecordEvent dygraph_entrance_record_event(
......@@ -46,7 +46,7 @@ paddle::experimental::Tensor add_n_final_state_dygraph_function(
paddle::imperative::AutoCastGuard guard(
egr::Controller::Instance().GetCurrentTracer(),
paddle::imperative::AmpLevel::O0);
return add_n_final_state_dygraph_function(NEW_x);
return add_n_dygraph_function(NEW_x);
}
}
......@@ -56,7 +56,7 @@ paddle::experimental::Tensor add_n_final_state_dygraph_function(
std::vector<egr::AutogradMeta*>* x_autograd_meta = &x_autograd_meta_vec;
// Forward API Call
VLOG(3) << "Final State Running: "
<< "add_n_final_state_dygraph_function";
<< "add_n_dygraph_function";
auto api_result = paddle::experimental::add_n(x);
// Check NaN and Inf if needed
if (FLAGS_check_nan_inf) {
......
......@@ -23,7 +23,7 @@
#pragma GCC diagnostic ignored "-Wunused-variable"
DECLARE_bool(check_nan_inf);
paddle::experimental::Tensor conv2d_final_state_dygraph_function(
paddle::experimental::Tensor conv2d_dygraph_function(
const paddle::experimental::Tensor& input,
const paddle::experimental::Tensor& filter,
std::vector<int> strides,
......@@ -59,17 +59,17 @@ paddle::experimental::Tensor conv2d_final_state_dygraph_function(
paddle::imperative::AutoCastGuard guard(
egr::Controller::Instance().GetCurrentTracer(),
paddle::imperative::AmpLevel::O0);
return conv2d_final_state_dygraph_function(NEW_input,
NEW_filter,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
return conv2d_dygraph_function(NEW_input,
NEW_filter,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
}
}
......@@ -80,7 +80,7 @@ paddle::experimental::Tensor conv2d_final_state_dygraph_function(
egr::EagerUtils::nullable_autograd_meta(filter);
// Forward API Call
VLOG(3) << "Final State Running: "
<< "conv2d_final_state_dygraph_function";
<< "conv2d_dygraph_function";
auto api_result = paddle::experimental::conv2d(input,
filter,
strides,
......
......@@ -64,8 +64,8 @@ AddNGradNodeFinal::operator()(
// dygraph function
for (size_t i = 0; i < returns[0].size(); i++) {
returns[0][i] = ::scale_final_state_dygraph_function(
out_grad, phi::Scalar(1.0), 0.0, true);
returns[0][i] =
::scale_dygraph_function(out_grad, phi::Scalar(1.0), 0.0, true);
}
// Check NaN and Inf id needed
......
add_subdirectory(final_state_generator)
add_subdirectory(generator)
set(EAGER_GENERETOR_DEPS
${GLOB_OP_LIB}
......@@ -88,7 +88,7 @@ if(WIN32)
endif()
add_custom_target(
eager_codegen
legacy_eager_codegen
COMMAND
"${eager_generator_path}/eager_generator.exe"
"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated"
......@@ -97,7 +97,7 @@ if(WIN32)
VERBATIM)
else()
add_custom_target(
eager_codegen
legacy_eager_codegen
COMMAND
${CMAKE_COMMAND} -E env
"LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH}:${CMAKE_CURRENT_BINARY_DIR}/../../pybind"
......
......@@ -37,11 +37,11 @@ namespace framework {
// To handle append_op at python-level
std::unordered_map<std::string, std::vector<std::string>>
core_ops_returns_info = {};
std::unordered_map<std::string, std::vector<std::string>> core_ops_args_info =
{};
core_ops_legacy_returns_info = {};
std::unordered_map<std::string, std::vector<std::string>>
core_ops_args_type_info = {};
core_ops_legacy_args_info = {};
std::unordered_map<std::string, std::vector<std::string>>
core_ops_legacy_args_type_info = {};
/* --- Static maps to handle corner cases --- */
static std::unordered_map<std::string, paddle::framework::AttributeMap>
......@@ -1473,10 +1473,10 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
std::string dygraph_function_args_str = "";
std::string amp_function_call_args_str = "";
core_ops_args_info[op_type] = {};
core_ops_args_type_info[op_type] = {};
core_ops_args_info[op_type].resize(in_vars.size());
core_ops_args_type_info[op_type].resize(in_vars.size());
core_ops_legacy_args_info[op_type] = {};
core_ops_legacy_args_type_info[op_type] = {};
core_ops_legacy_args_info[op_type].resize(in_vars.size());
core_ops_legacy_args_type_info[op_type].resize(in_vars.size());
/* ------ Dygraph forward function generation ------ */
generated_function_body += " // Dygraph Forward Pass\n";
......@@ -1500,7 +1500,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
amp_function_call_args_str_list[input_position] =
" NEW_" + LegalizeVarName(input_name);
core_ops_args_type_info[op_type][input_position] = "list";
core_ops_legacy_args_type_info[op_type][input_position] = "list";
} else {
// inplace tensor can't be const
const char* FWD_INS_ARG_TEMPLATE;
......@@ -1522,9 +1522,9 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
amp_function_call_args_str_list[input_position] =
" NEW_" + LegalizeVarName(input_name);
core_ops_args_type_info[op_type][input_position] = "tensor";
core_ops_legacy_args_type_info[op_type][input_position] = "tensor";
}
core_ops_args_info[op_type][input_position] = input_name;
core_ops_legacy_args_info[op_type][input_position] = input_name;
if (input.dispensable()) continue;
......@@ -1666,7 +1666,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
dygraph_function_args_str += arg_str;
amp_function_call_args_str += (", " + LegalizeVarName(output_var_name));
core_ops_args_type_info[op_type].push_back("list");
core_ops_legacy_args_type_info[op_type].push_back("list");
} else {
const char* FWD_NUM_ARG_TEMPLATE = ", paddle::experimental::Tensor* %s";
std::string arg_str = paddle::string::Sprintf(
......@@ -1674,7 +1674,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
dygraph_function_args_str += arg_str;
amp_function_call_args_str += (", " + LegalizeVarName(output_var_name));
core_ops_args_type_info[op_type].push_back("tensor");
core_ops_legacy_args_type_info[op_type].push_back("tensor");
}
if (BeSameAsInput(output_name, input_names)) {
......@@ -1693,7 +1693,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
output_name,
LegalizeVarName(output_var_name));
}
core_ops_args_info[op_type].push_back(output_name);
core_ops_legacy_args_info[op_type].push_back(output_name);
} else if (!forward_inplace_map.empty() &&
forward_inplace_map.count(output_name)) {
......@@ -1727,8 +1727,8 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
"{ \"%s\", egr::EagerUtils::CreateVars(%s) },";
outs_contents_str += paddle::string::Sprintf(
FWD_OUTS_CONTENT_TEMPLATE, output_name, outnum);
core_ops_args_info[op_type].push_back(outnum);
core_ops_args_type_info[op_type].push_back("int");
core_ops_legacy_args_info[op_type].push_back(outnum);
core_ops_legacy_args_type_info[op_type].push_back("int");
} else {
const char* FWD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", "
......@@ -2003,10 +2003,11 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
VLOG(6) << "Converted Output VarBase to EagerVariable(s)";
/* ------ END Generate TraceOp ----- */
// [Generation] Handle core_ops_returns_info
// avoid inplace op changing core_ops_returns_info
if (core_ops_returns_info.empty() || !core_ops_returns_info.count(op_type)) {
core_ops_returns_info[op_type] = return_contents;
// [Generation] Handle core_ops_legacy_returns_info
// avoid inplace op changing core_ops_legacy_returns_info
if (core_ops_legacy_returns_info.empty() ||
!core_ops_legacy_returns_info.count(op_type)) {
core_ops_legacy_returns_info[op_type] = return_contents;
}
// [Generation] ComputeRequireGrad -> GradNodeCreation
......@@ -2983,13 +2984,13 @@ static std::string GenerateDygraphHFileIncludes() {
dygraph_forward_api_includes_str +=
"extern std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_args_info;\n";
"core_ops_legacy_args_info;\n";
dygraph_forward_api_includes_str +=
"extern std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_args_type_info;\n";
"core_ops_legacy_args_type_info;\n";
dygraph_forward_api_includes_str +=
"extern std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_returns_info;\n\n";
"core_ops_legacy_returns_info;\n\n";
return dygraph_forward_api_includes_str;
}
......@@ -3060,7 +3061,7 @@ static void GenerateNodeCCFile(const std::string& node_cc_path,
static std::string ConvertCoreOpsInfosToString(
const std::unordered_map<std::string, std::vector<std::string>>&
core_ops_info) {
std::string core_ops_returns_info_init_str = "";
std::string core_ops_legacy_returns_info_init_str = "";
for (const auto& iter : core_ops_info) {
const char* Core_Ops_Returns_TEMPLATE = "{ \"%s\", { %s } },\n";
const std::string& op_type = iter.first;
......@@ -3074,23 +3075,23 @@ static std::string ConvertCoreOpsInfosToString(
if (returns_str.size() > 0) returns_str.pop_back();
std::string op_type_init_str = paddle::string::Sprintf(
Core_Ops_Returns_TEMPLATE, op_type, returns_str);
core_ops_returns_info_init_str += op_type_init_str;
core_ops_legacy_returns_info_init_str += op_type_init_str;
}
// Remove trailing ','
if (core_ops_returns_info_init_str.size() > 0)
core_ops_returns_info_init_str.pop_back();
if (core_ops_legacy_returns_info_init_str.size() > 0)
core_ops_legacy_returns_info_init_str.pop_back();
return core_ops_returns_info_init_str;
return core_ops_legacy_returns_info_init_str;
}
static std::string GenerateCoreOpsArgsInfo() {
const char* Core_Ops_Returns_MAP_TEMPLATE =
"std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_args_info = { %s };\n";
"core_ops_legacy_args_info = { %s };\n";
std::string core_ops_args_info_init_str =
ConvertCoreOpsInfosToString(core_ops_args_info);
ConvertCoreOpsInfosToString(core_ops_legacy_args_info);
std::string core_ops_info_str = paddle::string::Sprintf(
Core_Ops_Returns_MAP_TEMPLATE, core_ops_args_info_init_str);
......@@ -3101,10 +3102,10 @@ static std::string GenerateCoreOpsArgsInfo() {
static std::string GenerateCoreOpsArgsTypeInfo() {
const char* Core_Ops_Returns_MAP_TEMPLATE =
"std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_args_type_info = { %s };\n";
"core_ops_legacy_args_type_info = { %s };\n";
std::string core_ops_args_type_info_init_str =
ConvertCoreOpsInfosToString(core_ops_args_type_info);
ConvertCoreOpsInfosToString(core_ops_legacy_args_type_info);
std::string core_ops_info_str = paddle::string::Sprintf(
Core_Ops_Returns_MAP_TEMPLATE, core_ops_args_type_info_init_str);
......@@ -3115,13 +3116,13 @@ static std::string GenerateCoreOpsArgsTypeInfo() {
static std::string GenerateCoreOpsReturnsInfo() {
const char* Core_Ops_Returns_MAP_TEMPLATE =
"std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_returns_info = { %s };\n";
"core_ops_legacy_returns_info = { %s };\n";
std::string core_ops_returns_info_init_str =
ConvertCoreOpsInfosToString(core_ops_returns_info);
std::string core_ops_legacy_returns_info_init_str =
ConvertCoreOpsInfosToString(core_ops_legacy_returns_info);
std::string core_ops_info_str = paddle::string::Sprintf(
Core_Ops_Returns_MAP_TEMPLATE, core_ops_returns_info_init_str);
Core_Ops_Returns_MAP_TEMPLATE, core_ops_legacy_returns_info_init_str);
return core_ops_info_str;
}
......
......@@ -124,7 +124,7 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count):
".tmp.cc\" \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/nodes/nodes"
+ str(i + 1) + ".cc\"\n")
f.write(" DEPENDS eager_codegen\n")
f.write(" DEPENDS legacy_eager_codegen\n")
f.write(" VERBATIM)\n")
f.write("cc_library(dygraph_node SRCS ")
......@@ -154,7 +154,7 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count):
f.write(
" COMMAND ${CMAKE_COMMAND} -E copy_if_different \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/forwards/dygraph_forward_functions_returns_info.tmp.cc\" \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/forwards/dygraph_forward_functions_returns_info.cc\"\n"
)
f.write(" DEPENDS eager_codegen\n")
f.write(" DEPENDS legacy_eager_codegen\n")
f.write(" VERBATIM)\n")
f.write("cc_library(dygraph_function SRCS ")
......
......@@ -34,10 +34,10 @@ set(fwd_api_yaml_path
message("Final State Eager CodeGen")
add_custom_target(
eager_final_state_codegen
eager_codegen
COMMAND
"${PYTHON_EXECUTABLE}"
"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py"
"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py"
"--api_yaml_path=${api_yaml_path},${fwd_api_yaml_path}"
"--backward_yaml_path=${backward_yaml_path}"
"--forwards_cc_path=${tmp_forwards_cc_path}"
......@@ -54,16 +54,15 @@ add_custom_target(
VERBATIM)
set(tmp_python_c_output_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/eager_final_state_op_function.cc.tmp"
)
"${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/eager_op_function.cc.tmp")
set(python_c_output_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/eager_final_state_op_function.cc")
"${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/eager_op_function.cc")
add_custom_target(
eager_final_state_python_c_codegen
eager_python_c_codegen
COMMAND
"${PYTHON_EXECUTABLE}"
"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py"
"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py"
"--api_yaml_path=${api_yaml_path},${fwd_api_yaml_path}"
"--output_path=${tmp_python_c_output_path}"
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_python_c_output_path}
......
......@@ -165,7 +165,7 @@ def GetGradNodeName(string):
def GetDygraphForwardFunctionName(string):
return f"{string}_final_state_dygraph_function"
return f"{string}_dygraph_function"
def GetIntermediateAPIFunctionName(string):
......@@ -198,7 +198,7 @@ def GetInplacedFunctionName(function_name):
def GetForwardFunctionName(string):
return f"{string}_final_state_dygraph_function"
return f"{string}_dygraph_function"
def GetIndent(num):
......
......@@ -349,13 +349,13 @@ using CPUPlace = phi::CPUPlace;
CORE_OPS_INFO_TEMPLATE = \
"""
std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_args_info = {{
std::unordered_map<std::string, std::vector<std::string>> core_ops_args_info = {{
{}
}};
std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_args_type_info = {{
std::unordered_map<std::string, std::vector<std::string>> core_ops_args_type_info = {{
{}
}};
std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_returns_info = {{
std::unordered_map<std::string, std::vector<std::string>> core_ops_returns_info = {{
{}
}};
......@@ -363,9 +363,9 @@ std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_r
CORE_OPS_DECLARATION_TEMPLATE = \
"""
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_args_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_args_type_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_returns_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_args_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_args_type_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_returns_info;
"""
......@@ -1260,31 +1260,24 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
forward_inputs_position_map.keys()) + len(forward_attrs_list)
num_returns = len(forward_outputs_position_map.keys())
final_state_fwd_api_name = "final_state_" + forward_api_name
core_ops_returns_info[final_state_fwd_api_name] = [
"" for i in range(num_returns)
]
core_ops_args_info[final_state_fwd_api_name] = [
"" for i in range(num_args)
]
core_ops_args_type_info[final_state_fwd_api_name] = [
"" for i in range(num_args)
]
fwd_api_name = "" + forward_api_name
core_ops_returns_info[fwd_api_name] = ["" for i in range(num_returns)]
core_ops_args_info[fwd_api_name] = ["" for i in range(num_args)]
core_ops_args_type_info[fwd_api_name] = ["" for i in range(num_args)]
for name, (ttype, pos) in forward_inputs_position_map.items():
core_ops_args_info[final_state_fwd_api_name][pos] = name
core_ops_args_info[fwd_api_name][pos] = name
if IsPlainTensorType(ttype):
core_ops_args_type_info[final_state_fwd_api_name][
pos] = "tensor"
core_ops_args_type_info[fwd_api_name][pos] = "tensor"
else:
assert IsVectorTensorType(ttype)
core_ops_args_type_info[final_state_fwd_api_name][pos] = "list"
core_ops_args_type_info[fwd_api_name][pos] = "list"
for name, _, _, pos in forward_attrs_list:
core_ops_args_info[final_state_fwd_api_name][pos] = name
core_ops_args_info[fwd_api_name][pos] = name
for name, (ttype, pos) in forward_outputs_position_map.items():
core_ops_returns_info[final_state_fwd_api_name][pos] = name
core_ops_returns_info[fwd_api_name][pos] = name
def run(self):
super().run()
......
......@@ -85,7 +85,7 @@ RETURN_INPLACE_PYOBJECT_TEMPLATE = \
PYTHON_C_FUNCTION_TEMPLATE = \
"""
static PyObject * eager_final_state_api_{}(PyObject *self, PyObject *args, PyObject *kwargs) {{
static PyObject * eager_api_{}(PyObject *self, PyObject *args, PyObject *kwargs) {{
{}
PyThreadState *tstate = nullptr;
......@@ -146,7 +146,7 @@ FUNCTION_NAME_TEMPLATE = \
PYTHON_C_FUNCTION_REG_TEMPLATE = \
"""
{{\"final_state_{}{}\", (PyCFunction)(void(*)(void)) {}eager_final_state_api_{}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {} in dygraph.\"}}
{{\"{}{}\", (PyCFunction)(void(*)(void)) {}eager_api_{}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {} in dygraph.\"}}
"""
......@@ -162,7 +162,7 @@ PYTHON_C_WRAPPER_TEMPLATE = \
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/fluid/pybind/op_function_common.h"
#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
#include "paddle/fluid/pybind/eager_final_state_custom_python_api.h"
#include "paddle/fluid/pybind/eager_custom_python_api.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/eager/amp_utils.h"
#include "paddle/fluid/eager/eager_amp_auto_cast.h"
......@@ -193,11 +193,11 @@ void BindFinalStateEagerOpFunctions(pybind11::module *module) {{
CORE_OPS_INFO = \
"""
static PyObject * eager_get_final_state_core_ops_args_info(PyObject *self) {
static PyObject * eager_get_core_ops_args_info(PyObject *self) {
PyThreadState *tstate = nullptr;
try
{
return ToPyObject(core_ops_final_state_args_info);
return ToPyObject(core_ops_args_info);
}
catch(...) {
if (tstate) {
......@@ -208,11 +208,11 @@ static PyObject * eager_get_final_state_core_ops_args_info(PyObject *self) {
}
}
static PyObject * eager_get_final_state_core_ops_args_type_info(PyObject *self) {
static PyObject * eager_get_core_ops_args_type_info(PyObject *self) {
PyThreadState *tstate = nullptr;
try
{
return ToPyObject(core_ops_final_state_args_type_info);
return ToPyObject(core_ops_args_type_info);
}
catch(...) {
if (tstate) {
......@@ -223,11 +223,11 @@ static PyObject * eager_get_final_state_core_ops_args_type_info(PyObject *self)
}
}
static PyObject * eager_get_final_state_core_ops_returns_info(PyObject *self) {
static PyObject * eager_get_core_ops_returns_info(PyObject *self) {
PyThreadState *tstate = nullptr;
try
{
return ToPyObject(core_ops_final_state_returns_info);
return ToPyObject(core_ops_returns_info);
}
catch(...) {
if (tstate) {
......@@ -242,16 +242,16 @@ static PyObject * eager_get_final_state_core_ops_returns_info(PyObject *self) {
CORE_OPS_INFO_REGISTRY = \
"""
{\"get_final_state_core_ops_args_info\",
(PyCFunction)(void(*)(void))eager_get_final_state_core_ops_args_info, METH_NOARGS,
\"C++ interface function for eager_get_final_state_core_ops_args_info.\"},
{\"get_final_state_core_ops_args_type_info\",
(PyCFunction)(void(*)(void))eager_get_final_state_core_ops_args_type_info,
{\"get_core_ops_args_info\",
(PyCFunction)(void(*)(void))eager_get_core_ops_args_info, METH_NOARGS,
\"C++ interface function for eager_get_core_ops_args_info.\"},
{\"get_core_ops_args_type_info\",
(PyCFunction)(void(*)(void))eager_get_core_ops_args_type_info,
METH_NOARGS,
\"C++ interface function for eager_get_final_state_core_ops_args_type_info.\"},
{\"get_final_state_core_ops_returns_info\",
(PyCFunction)(void(*)(void))eager_get_final_state_core_ops_returns_info,
METH_NOARGS, \"C++ interface function for eager_get_final_state_core_ops_returns_info.\"},
\"C++ interface function for eager_get_core_ops_args_type_info.\"},
{\"get_core_ops_returns_info\",
(PyCFunction)(void(*)(void))eager_get_core_ops_returns_info,
METH_NOARGS, \"C++ interface function for eager_get_core_ops_returns_info.\"},
"""
NAMESPACE_WRAPPER_TEMPLATE = \
......
......@@ -45,7 +45,7 @@ inline paddle::experimental::Tensor Cast(
const bool trace_backward = true) {
if (input.is_sparse_coo_tensor() || input.is_sparse_csr_tensor()) {
if (trace_backward) {
return sparse::cast_final_state_dygraph_function(
return sparse::cast_dygraph_function(
input, paddle::experimental::DataType::UNDEFINED, dst_dtype);
} else {
return paddle::experimental::sparse::cast(
......@@ -53,7 +53,7 @@ inline paddle::experimental::Tensor Cast(
}
} else {
if (trace_backward) {
return cast_final_state_dygraph_function(input, dst_dtype);
return cast_dygraph_function(input, dst_dtype);
} else {
return paddle::experimental::cast(input, dst_dtype);
}
......
......@@ -143,7 +143,7 @@ void GradTensorHolder::add(size_t slot_id,
if (t.is_dense_tensor()) {
if (buffer_tensor.is_dense_tensor()) {
if (create_graph || t.is_custom_device()) {
buffer_tensor = add_final_state_dygraph_function(t, buffer_tensor);
buffer_tensor = add_dygraph_function(t, buffer_tensor);
} else {
paddle::imperative::TensorAdd<paddle::experimental::Tensor>(
t, &buffer_tensor);
......@@ -170,8 +170,7 @@ void GradTensorHolder::add(size_t slot_id,
std::make_shared<phi::DenseTensor>(
buffer_sparse->non_zero_elements()));
if (create_graph || t.is_custom_device()) {
buffer_values =
add_final_state_dygraph_function(t_values, buffer_values);
buffer_values = add_dygraph_function(t_values, buffer_values);
} else {
paddle::imperative::TensorAdd<paddle::experimental::Tensor>(
t_values, &buffer_values);
......
......@@ -77,8 +77,7 @@ void benchmark_eager_matmul(const paddle::experimental::Tensor& X,
size_t max_num_runs = accuracy_check ? 2 : max_num_benchmark_runs;
for (size_t i = 0; i < max_num_runs; i++) {
input_tensor0 =
matmul_final_state_dygraph_function(input_tensor0, Y, false, false);
input_tensor0 = matmul_dygraph_function(input_tensor0, Y, false, false);
}
std::vector<paddle::experimental::Tensor> target_tensors = {input_tensor0};
......
......@@ -8,4 +8,4 @@ op_function6.cc
op_function7.cc
op_function8.cc
eager_op_function.cc
eager_final_state_op_function.cc
eager_legacy_op_function.cc
......@@ -263,8 +263,9 @@ if(WITH_PYTHON)
add_executable(op_function_generator op_function_generator.cc)
target_link_libraries(op_function_generator ${OP_FUNCTION_GENERETOR_DEPS})
add_executable(eager_op_function_generator eager_op_function_generator.cc)
target_link_libraries(eager_op_function_generator
add_executable(eager_legacy_op_function_generator
eager_legacy_op_function_generator.cc)
target_link_libraries(eager_legacy_op_function_generator
${OP_FUNCTION_GENERETOR_DEPS})
if(NOT WIN32)
add_executable(kernel_signature_generator kernel_signature_generator.cc)
......@@ -274,10 +275,11 @@ if(WITH_PYTHON)
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(op_function_generator ${os_dependency_modules})
target_link_libraries(eager_op_function_generator ${os_dependency_modules})
target_link_libraries(eager_legacy_op_function_generator
${os_dependency_modules})
if(WITH_ROCM)
target_link_libraries(op_function_generator ${ROCM_HIPRTC_LIB})
target_link_libraries(eager_op_function_generator ${ROCM_HIPRTC_LIB})
target_link_libraries(eager_legacy_op_function_generator ${ROCM_HIPRTC_LIB})
target_link_libraries(kernel_signature_generator ${ROCM_HIPRTC_LIB})
endif()
......@@ -300,7 +302,7 @@ if(WITH_PYTHON)
set(tmp_impl_file8 ${impl_file8}.tmp)
set(CODE_GEN_SPLIT_FILE_COUNT "8")
set(eager_impl_file
${CMAKE_SOURCE_DIR}/paddle/fluid/pybind/eager_op_function.cc)
${CMAKE_SOURCE_DIR}/paddle/fluid/pybind/eager_legacy_op_function.cc)
set(tmp_eager_impl_file ${eager_impl_file}.tmp)
execute_process(
......@@ -311,8 +313,8 @@ if(WITH_PYTHON)
"${CODE_GEN_SPLIT_FILE_COUNT}")
set(OP_IMPL_DEPS op_function_generator)
set(EAGER_OP_IMPL_DEPS eager_op_function_generator
eager_final_state_python_c_codegen)
set(EAGER_OP_IMPL_DEPS eager_legacy_op_function_generator
eager_python_c_codegen)
if(WIN32)
if("${CMAKE_GENERATOR}" STREQUAL "Ninja")
......@@ -342,13 +344,13 @@ if(WITH_PYTHON)
file(
WRITE
${CMAKE_BINARY_DIR}/paddle/fluid/pybind/eager_op_function_generator_retry.bat
${CMAKE_BINARY_DIR}/paddle/fluid/pybind/eager_legacy_op_function_generator_retry.bat
""
"set build_times=1\n"
":retry\n"
"ECHO eager_op_function_generator run %build_times% time\n"
"taskkill /f /im eager_op_function_generator.exe 2>NUL\n"
"${op_impl_path}/eager_op_function_generator.exe ${tmp_eager_impl_file}\n"
"ECHO eager_legacy_op_function_generator run %build_times% time\n"
"taskkill /f /im eager_legacy_op_function_generator.exe 2>NUL\n"
"${op_impl_path}/eager_legacy_op_function_generator.exe ${tmp_eager_impl_file}\n"
"if %ERRORLEVEL% NEQ 0 (\n"
" set /a build_times=%build_times%+1\n"
" if %build_times% GEQ 10 (\n"
......@@ -436,7 +438,7 @@ if(WITH_PYTHON)
add_custom_command(
OUTPUT ${eager_impl_file}
COMMAND
${CMAKE_BINARY_DIR}/paddle/fluid/pybind/eager_op_function_generator_retry.bat
${CMAKE_BINARY_DIR}/paddle/fluid/pybind/eager_legacy_op_function_generator_retry.bat
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_eager_impl_file}
${eager_impl_file}
COMMENT "copy_if_different ${tmp_eager_impl_file} to ${eager_impl_file}"
......@@ -525,7 +527,7 @@ if(WITH_PYTHON)
OUTPUT ${eager_impl_file}
COMMAND
${CMAKE_COMMAND} -E env "LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH}:."
"${CMAKE_CURRENT_BINARY_DIR}/eager_op_function_generator"
"${CMAKE_CURRENT_BINARY_DIR}/eager_legacy_op_function_generator"
"${tmp_eager_impl_file}"
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_eager_impl_file}
${eager_impl_file}
......@@ -536,7 +538,7 @@ if(WITH_PYTHON)
endif()
add_custom_target(op_function_generator_cmd ALL DEPENDS op_function)
if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
add_custom_target(eager_op_function_generator_cmd ALL
add_custom_target(eager_legacy_op_function_generator_cmd ALL
DEPENDS ${eager_impl_file})
endif()
......@@ -549,8 +551,8 @@ if(WITH_PYTHON)
set(PYBIND_SRCS eager_properties.cc ${PYBIND_SRCS})
set(PYBIND_SRCS eager_utils.cc ${PYBIND_SRCS})
set(PYBIND_SRCS eager_py_layer.cc ${PYBIND_SRCS})
set(PYBIND_SRCS eager_legacy_op_function.cc ${PYBIND_SRCS})
set(PYBIND_SRCS eager_op_function.cc ${PYBIND_SRCS})
set(PYBIND_SRCS eager_final_state_op_function.cc ${PYBIND_SRCS})
list(APPEND PYBIND_DEPS eager_api)
list(APPEND PYBIND_DEPS autograd_meta)
list(APPEND PYBIND_DEPS backward)
......@@ -575,8 +577,8 @@ if(WITH_PYTHON)
DEPS ${PYBIND_DEPS} ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS})
if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
add_dependencies(paddle_pybind eager_codegen)
add_dependencies(paddle_pybind eager_op_function_generator_cmd)
add_dependencies(paddle_pybind legacy_eager_codegen)
add_dependencies(paddle_pybind eager_legacy_op_function_generator_cmd)
endif()
if(NOT APPLE AND NOT WIN32)
......
......@@ -15,40 +15,39 @@
#include <iostream>
#include "paddle/fluid/eager/to_static/run_program_op_func.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace pybind {
static PyObject *eager_api_run_program(PyObject *self,
PyObject *args,
PyObject *kwargs) {
static PyObject *eager_api_linear(PyObject *self,
PyObject *args,
PyObject *kwargs) {
PyThreadState *tstate = nullptr;
try {
auto X = GetTensorListFromArgs("run_program", "X", args, 0, false);
auto Params = GetTensorListFromArgs("run_program", "Params", args, 1, true);
auto Out = GetTensorPtrListFromArgs("run_program", "Out", args, 2, false);
auto OutScope =
GetScopePtrListFromArgs("run_program", "OutScope", args, 3, false);
auto DOut = GetTensorPtrListFromArgs("run_program", "DOut", args, 4, true);
framework::AttributeMap attrs;
// TODO(zengjinle): support CUDA Graph on eager mode
ConstructAttrMapFromPyArgs(
"run_program", args, 6, PyTuple_GET_SIZE(args), attrs);
auto x = GetTensorFromArgs("linear", "X", args, 0, false);
auto weight = GetTensorFromArgs("linear", "weight", args, 1, false);
auto bias = GetTensorFromArgs("linear", "Bias", args, 2, true);
tstate = PyEval_SaveThread();
run_program_dygraph_function(X, Params, Out, OutScope, DOut, attrs);
PyEval_RestoreThread(tstate);
tstate = nullptr;
Py_RETURN_NONE;
if (bias.initialized()) {
auto mm_out = matmul_dygraph_function(x, weight, false, false);
auto out = add_dygraph_function(mm_out, bias);
PyEval_RestoreThread(tstate);
tstate = nullptr;
return ToPyObject(out);
} else {
auto mm_out = matmul_dygraph_function(x, weight, false, false);
PyEval_RestoreThread(tstate);
tstate = nullptr;
return ToPyObject(mm_out);
}
} catch (paddle::platform::EnforceNotMet &exception) {
if (tstate) {
PyEval_RestoreThread(tstate);
}
std::ostringstream sout;
sout << exception.what();
sout << " [operator < run_program > error]";
sout << " [operator < linear > error]";
exception.set_error_str(sout.str());
ThrowExceptionToPython(std::current_exception());
return nullptr;
......@@ -61,9 +60,9 @@ static PyObject *eager_api_run_program(PyObject *self,
}
}
static PyMethodDef CustomEagerMethods[] = {
{"run_program",
(PyCFunction)(void (*)(void))eager_api_run_program,
static PyMethodDef CustomEagerFinalStateMethods[] = {
{"linear",
(PyCFunction)(void (*)(void))eager_api_linear,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for run_program in dygraph."},
{nullptr, nullptr, 0, nullptr}};
......
......@@ -15,41 +15,40 @@
#include <iostream>
#include "paddle/fluid/eager/to_static/run_program_op_func.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace pybind {
static PyObject *eager_api_final_state_linear(PyObject *self,
PyObject *args,
PyObject *kwargs) {
static PyObject *eager_api_run_program(PyObject *self,
PyObject *args,
PyObject *kwargs) {
PyThreadState *tstate = nullptr;
try {
auto x = GetTensorFromArgs("linear", "X", args, 0, false);
auto weight = GetTensorFromArgs("linear", "weight", args, 1, false);
auto bias = GetTensorFromArgs("linear", "Bias", args, 2, true);
auto X = GetTensorListFromArgs("run_program", "X", args, 0, false);
auto Params = GetTensorListFromArgs("run_program", "Params", args, 1, true);
auto Out = GetTensorPtrListFromArgs("run_program", "Out", args, 2, false);
auto OutScope =
GetScopePtrListFromArgs("run_program", "OutScope", args, 3, false);
auto DOut = GetTensorPtrListFromArgs("run_program", "DOut", args, 4, true);
framework::AttributeMap attrs;
// TODO(zengjinle): support CUDA Graph on eager mode
ConstructAttrMapFromPyArgs(
"run_program", args, 6, PyTuple_GET_SIZE(args), attrs);
tstate = PyEval_SaveThread();
if (bias.initialized()) {
auto mm_out =
matmul_final_state_dygraph_function(x, weight, false, false);
auto out = add_final_state_dygraph_function(mm_out, bias);
PyEval_RestoreThread(tstate);
tstate = nullptr;
return ToPyObject(out);
} else {
auto mm_out =
matmul_final_state_dygraph_function(x, weight, false, false);
PyEval_RestoreThread(tstate);
tstate = nullptr;
return ToPyObject(mm_out);
}
run_program_dygraph_function(X, Params, Out, OutScope, DOut, attrs);
PyEval_RestoreThread(tstate);
tstate = nullptr;
Py_RETURN_NONE;
} catch (paddle::platform::EnforceNotMet &exception) {
if (tstate) {
PyEval_RestoreThread(tstate);
}
std::ostringstream sout;
sout << exception.what();
sout << " [operator < linear > error]";
sout << " [operator < run_program > error]";
exception.set_error_str(sout.str());
ThrowExceptionToPython(std::current_exception());
return nullptr;
......@@ -62,9 +61,9 @@ static PyObject *eager_api_final_state_linear(PyObject *self,
}
}
static PyMethodDef CustomEagerFinalStateMethods[] = {
{"final_state_linear",
(PyCFunction)(void (*)(void))eager_api_final_state_linear,
static PyMethodDef CustomEagerMethods[] = {
{"run_program",
(PyCFunction)(void (*)(void))eager_api_run_program,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for run_program in dygraph."},
{nullptr, nullptr, 0, nullptr}};
......
......@@ -320,10 +320,12 @@ std::string GenerateOpFunctionsBody(
std::string inplace_arg_name = inplace_pair.second;
std::string inplace_return_name = inplace_pair.first;
const char* RETURN_INPLACE_TENSOR_TEMPLATE =
" ssize_t arg_id = GetIdxFromCoreOpsInfoMap(core_ops_args_info, "
" ssize_t arg_id = "
"GetIdxFromCoreOpsInfoMap(core_ops_legacy_args_info, "
"\"%s\", \"%s\");\n"
" ssize_t return_id = "
"GetIdxFromCoreOpsInfoMap(core_ops_returns_info, \"%s\", \"%s\");\n"
"GetIdxFromCoreOpsInfoMap(core_ops_legacy_returns_info, \"%s\", "
"\"%s\");\n"
" inplace_var_idx_map[return_id] = arg_id;";
return_str += paddle::string::Sprintf(RETURN_INPLACE_TENSOR_TEMPLATE,
op_type,
......@@ -361,7 +363,7 @@ static std::string GenerateCoreOpsInfoMap() {
" PyThreadState *tstate = nullptr;\n"
" try\n"
" {\n"
" return ToPyObject(core_ops_args_info);\n"
" return ToPyObject(core_ops_legacy_args_info);\n"
" }\n"
" catch(...) {\n"
" if (tstate) {\n"
......@@ -376,7 +378,7 @@ static std::string GenerateCoreOpsInfoMap() {
" PyThreadState *tstate = nullptr;\n"
" try\n"
" {\n"
" return ToPyObject(core_ops_args_type_info);\n"
" return ToPyObject(core_ops_legacy_args_type_info);\n"
" }\n"
" catch(...) {\n"
" if (tstate) {\n"
......@@ -391,7 +393,7 @@ static std::string GenerateCoreOpsInfoMap() {
" PyThreadState *tstate = nullptr;\n"
" try\n"
" {\n"
" return ToPyObject(core_ops_returns_info);\n"
" return ToPyObject(core_ops_legacy_returns_info);\n"
" }\n"
" catch(...) {\n"
" if (tstate) {\n"
......@@ -429,7 +431,7 @@ GenerateOpFunctions() {
!phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type)) {
continue;
}
std::string func_name = "eager_api_" + op_type;
std::string func_name = "eager_legacy_api_" + op_type;
std::string op_function_str =
GenerateOpFunctionsBody(op_proto, func_name, {});
......@@ -461,7 +463,7 @@ GenerateOpFunctions() {
}
std::string inplace_op_type = op_type + "_";
std::string inplace_func_name = "eager_api_" + inplace_op_type;
std::string inplace_func_name = "eager_legacy_api_" + inplace_op_type;
std::string inplace_op_function_str =
GenerateOpFunctionsBody(op_proto, inplace_func_name, inplace_map);
......@@ -500,7 +502,7 @@ int main(int argc, char* argv[]) {
"\"paddle/fluid/platform/profiler/event_tracing.h\"",
"\"paddle/fluid/pybind/exception.h\"",
"\"paddle/fluid/pybind/op_function_common.h\"",
"\"paddle/fluid/pybind/eager_custom_python_api.h\"",
"\"paddle/fluid/pybind/eager_legacy_custom_python_api.h\"",
"\"paddle/fluid/pybind/eager.h\""};
std::ofstream out(argv[1], std::ios::out);
......@@ -540,11 +542,12 @@ int main(int argc, char* argv[]) {
out << "void BindEagerOpFunctions(pybind11::module *module) {\n"
<< " InitOpsAttrTypeMap();\n"
<< " auto m = module->def_submodule(\"ops\");\n"
<< " if (PyModule_AddFunctions(m.ptr(), ExtestMethods) < 0) {\n"
<< " auto legacy = m.def_submodule(\"legacy\");\n"
<< " if (PyModule_AddFunctions(legacy.ptr(), ExtestMethods) < 0) {\n"
<< " PADDLE_THROW(platform::errors::Fatal (\"Add functions to "
"core.eager.ops failed!\"));\n"
<< " }\n\n"
<< " if (PyModule_AddFunctions(m.ptr(), CustomEagerMethods) < "
<< " if (PyModule_AddFunctions(legacy.ptr(), CustomEagerMethods) < "
"0) {\n"
<< " PADDLE_THROW(platform::errors::Fatal (\"Add functions to "
"core.eager.ops failed!\"));\n"
......
......@@ -806,14 +806,14 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
decrease_axis.end());
if (op_type == "slice") {
out = slice_final_state_dygraph_function(self->tensor,
slice_axes_tmp,
slice_starts,
slice_ends,
infer_flags_tmp,
decrease_axis_tmp);
out = slice_dygraph_function(self->tensor,
slice_axes_tmp,
slice_starts,
slice_ends,
infer_flags_tmp,
decrease_axis_tmp);
} else if (op_type == "strided_slice") {
out = strided_slice_final_state_dygraph_function(
out = strided_slice_dygraph_function(
self->tensor, slice_axes, slice_starts, slice_ends, slice_strides);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
......@@ -852,7 +852,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
}
paddle::experimental::Tensor new_out;
new_out = unsqueeze_final_state_dygraph_function(out, none_axes);
new_out = unsqueeze_dygraph_function(out, none_axes);
return ToPyObject(new_out);
}
}
......@@ -868,8 +868,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
paddle::framework::TensorFromVector(
list_select_idxs, *dev_ctx, idx_tensor.get());
framework::AttributeMap attrs = {{"dim", 0}};
out = index_select_final_state_dygraph_function(
self->tensor, select_index, 0);
out = index_select_dygraph_function(self->tensor, select_index, 0);
}
return ToPyObject(out);
......
......@@ -20,7 +20,7 @@ if __name__ == "__main__":
pybind_dir = sys.argv[1]
split_count = int(sys.argv[2])
empty_files = [os.path.join(pybind_dir, "eager_final_state_op_function.cc")]
empty_files = [os.path.join(pybind_dir, "eager_legacy_op_function.cc")]
empty_files.append(os.path.join(pybind_dir, "eager_op_function.cc"))
for i in range(split_count):
......
......@@ -43,10 +43,10 @@ taskkill /f /im cicc.exe /t 2>NUL
taskkill /f /im ptxas.exe /t 2>NUL
taskkill /f /im op_function_generator.exe /t 2>NUL
taskkill /f /im eager_generator.exe /t 2>NUL
taskkill /f /im eager_op_function_generator.exe /t 2>NUL
taskkill /f /im eager_legacy_op_function_generator.exe /t 2>NUL
wmic process where name="op_function_generator.exe" call terminate 2>NUL
wmic process where name="eager_generator.exe" call terminate 2>NUL
wmic process where name="eager_op_function_generator.exe" call terminate 2>NUL
wmic process where name="eager_legacy_op_function_generator.exe" call terminate 2>NUL
wmic process where name="cvtres.exe" call terminate 2>NUL
wmic process where name="rc.exe" call terminate 2>NUL
wmic process where name="cl.exe" call terminate 2>NUL
......@@ -531,10 +531,10 @@ taskkill /f /im cicc.exe /t 2>NUL
taskkill /f /im ptxas.exe /t 2>NUL
taskkill /f /im op_function_generator.exe /t 2>NUL
taskkill /f /im eager_generator.exe /t 2>NUL
taskkill /f /im eager_op_function_generator.exe /t 2>NUL
taskkill /f /im eager_legacy_op_function_generator.exe /t 2>NUL
wmic process where name="op_function_generator.exe" call terminate 2>NUL
wmic process where name="eager_generator.exe" call terminate 2>NUL
wmic process where name="eager_op_function_generator.exe" call terminate 2>NUL
wmic process where name="eager_legacy_op_function_generator.exe" call terminate 2>NUL
wmic process where name="cmake.exe" call terminate 2>NUL
wmic process where name="cvtres.exe" call terminate 2>NUL
wmic process where name="rc.exe" call terminate 2>NUL
......@@ -933,10 +933,10 @@ taskkill /f /im cicc.exe /t 2>NUL
taskkill /f /im ptxas.exe /t 2>NUL
taskkill /f /im op_function_generator.exe /t 2>NUL
taskkill /f /im eager_generator.exe /t 2>NUL
taskkill /f /im eager_op_function_generator.exe /t 2>NUL
taskkill /f /im eager_legacy_op_function_generator.exe /t 2>NUL
wmic process where name="op_function_generator.exe" call terminate 2>NUL
wmic process where name="eager_generator.exe" call terminate 2>NUL
wmic process where name="eager_op_function_generator.exe" call terminate 2>NUL
wmic process where name="eager_legacy_op_function_generator.exe" call terminate 2>NUL
wmic process where name="cvtres.exe" call terminate 2>NUL
wmic process where name="rc.exe" call terminate 2>NUL
wmic process where name="cl.exe" call terminate 2>NUL
......
......@@ -17,39 +17,6 @@ from .fluid import framework
__all__ = []
_already_switch_to_eager_ = False
if not framework._in_eager_mode_:
for name in dir(core.ops):
globals()[name] = getattr(core.ops, name)
__all__.append(name)
_already_switch_to_eager_ = False
else:
for name in dir(core.eager.ops):
globals()[name] = getattr(core.eager.ops, name)
__all__.append(name)
_already_switch_to_eager_ = True
def switch_to_core_ops():
global _already_switch_to_eager_
if _already_switch_to_eager_:
for name in dir(core.eager.ops):
del globals()[name]
__all__.remove(name)
for name in dir(core.ops):
globals()[name] = getattr(core.ops, name)
__all__.append(name)
_already_switch_to_eager_ = False
def switch_to_eager_ops():
global _already_switch_to_eager_
if not _already_switch_to_eager_:
for name in dir(core.ops):
del globals()[name]
__all__.remove(name)
for name in dir(core.eager.ops):
globals()[name] = getattr(core.eager.ops, name)
__all__.append(name)
_already_switch_to_eager_ = True
for name in dir(core.eager.ops):
globals()[name] = getattr(core.eager.ops, name)
__all__.append(name)
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import core
from .fluid import framework
__all__ = []
_already_switch_to_eager_ = False
if not framework._in_eager_mode_:
for name in dir(core.ops):
globals()[name] = getattr(core.ops, name)
__all__.append(name)
_already_switch_to_eager_ = False
else:
for name in dir(core.eager.ops.legacy):
globals()[name] = getattr(core.eager.ops.legacy, name)
__all__.append(name)
_already_switch_to_eager_ = True
def switch_to_core_ops():
global _already_switch_to_eager_
if _already_switch_to_eager_:
for name in dir(core.eager.ops.legacy):
del globals()[name]
__all__.remove(name)
for name in dir(core.ops):
globals()[name] = getattr(core.ops, name)
__all__.append(name)
_already_switch_to_eager_ = False
def switch_to_eager_ops():
global _already_switch_to_eager_
if not _already_switch_to_eager_:
for name in dir(core.ops):
del globals()[name]
__all__.remove(name)
for name in dir(core.eager.ops.legacy):
globals()[name] = getattr(core.eager.ops.legacy, name)
__all__.append(name)
_already_switch_to_eager_ = True
......@@ -37,7 +37,7 @@ from ..fluid.dygraph.parallel import prepare_context
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
import paddle.fluid.dygraph_utils as dygraph_utils
import contextlib
......@@ -355,7 +355,7 @@ def barrier(group=None):
temp = fill_constant([1], dtype="int32", value="1")
if _non_static_mode():
return _C_ops.barrier(temp, temp, 'ring_id', ring_id)
return _legacy_C_ops.barrier(temp, temp, 'ring_id', ring_id)
op_type = 'barrier'
......@@ -657,7 +657,7 @@ def wait(tensor, group=None, use_calc_stream=True):
def _sync_calc_stream(tensor):
if _non_static_mode():
return _C_ops.c_sync_calc_stream(tensor, tensor)
return _legacy_C_ops.c_sync_calc_stream(tensor, tensor)
op_type = 'c_sync_calc_stream'
......@@ -672,7 +672,8 @@ def _sync_calc_stream(tensor):
def _sync_comm_stream(tensor, ring_id=0):
if _non_static_mode():
return _C_ops.c_sync_comm_stream([tensor], [tensor], 'ring_id', ring_id)
return _legacy_C_ops.c_sync_comm_stream([tensor], [tensor], 'ring_id',
ring_id)
op_type = 'c_sync_comm_stream'
......@@ -750,9 +751,9 @@ def broadcast(tensor, src, group=None, use_calc_stream=True):
assert gsrc >= 0, ("src rank out of group, need global rank")
if _non_static_mode():
return _C_ops.c_broadcast(tensor, tensor, 'root', gsrc,
'use_calc_stream', use_calc_stream, 'ring_id',
ring_id)
return _legacy_C_ops.c_broadcast(tensor, tensor, 'root', gsrc,
'use_calc_stream', use_calc_stream,
'ring_id', ring_id)
op_type = 'c_broadcast'
check_variable_and_dtype(
......@@ -830,17 +831,21 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, use_calc_stream=True):
ring_id = 0 if group is None else group.id
if _non_static_mode():
if op == ReduceOp.SUM:
return _C_ops.c_allreduce_sum_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id)
return _legacy_C_ops.c_allreduce_sum_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id)
elif op == ReduceOp.MAX:
return _C_ops.c_allreduce_max_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id)
return _legacy_C_ops.c_allreduce_max_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id)
elif op == ReduceOp.MIN:
return _C_ops.c_allreduce_min_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id)
return _legacy_C_ops.c_allreduce_min_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id)
elif op == ReduceOp.PROD:
return _C_ops.c_allreduce_prod_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id)
return _legacy_C_ops.c_allreduce_prod_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id)
else:
raise ValueError("Unknown parameter: {}.".format(op))
......@@ -931,21 +936,22 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, use_calc_stream=True):
if _non_static_mode():
if op == ReduceOp.SUM:
return _C_ops.c_reduce_sum(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'root_id', gdst)
return _legacy_C_ops.c_reduce_sum(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id, 'root_id', gdst)
elif op == ReduceOp.MAX:
return _C_ops.c_reduce_max(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'root_id', gdst)
return _legacy_C_ops.c_reduce_max(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id, 'root_id', gdst)
elif op == ReduceOp.MIN:
return _C_ops.c_reduce_min(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'root_id', gdst)
return _legacy_C_ops.c_reduce_min(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id, 'root_id', gdst)
elif op == ReduceOp.PROD:
return _C_ops.c_reduce_prod(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'root_id', gdst)
return _legacy_C_ops.c_reduce_prod(tensor, tensor,
'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id, 'root_id', gdst)
else:
raise ValueError("Unknown parameter: {}.".format(op))
......@@ -1052,8 +1058,9 @@ def all_gather(tensor_list, tensor, group=None, use_calc_stream=True):
nranks = _get_global_group().nranks if group is None else group.nranks
if _non_static_mode():
out = _C_ops.c_allgather(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'nranks', nranks)
out = _legacy_C_ops.c_allgather(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'nranks', nranks)
else:
op_type = 'c_allgather'
helper = LayerHelper(op_type, **locals())
......@@ -1237,9 +1244,9 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True):
return task
if _non_static_mode():
return _C_ops.c_scatter(temp, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id, 'nranks',
nranks, 'root', gsrc)
return _legacy_C_ops.c_scatter(temp, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'nranks', nranks, 'root', gsrc)
op_type = 'c_scatter'
check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
......@@ -1273,8 +1280,9 @@ def _c_identity(tensor, group=None):
ring_id = 0 if group is None else group.id
if _non_static_mode():
return _C_ops.c_identity(tensor, 'use_calc_stream', True, 'ring_id',
ring_id, 'use_model_parallel', True)
return _legacy_C_ops.c_identity(tensor, 'use_calc_stream', True,
'ring_id', ring_id,
'use_model_parallel', True)
op_type = 'c_identity'
helper = LayerHelper(op_type, **locals())
out = helper.create_variable_for_type_inference(dtype=tensor.dtype)
......@@ -1316,9 +1324,10 @@ def _c_concat(tensor, group=None):
nranks = group.nranks
if _non_static_mode():
return _C_ops.c_concat(tensor, 'ring_id', ring_id, 'use_calc_stream',
True, 'rank', rank, 'nranks', nranks,
'use_model_parallel', True)
return _legacy_C_ops.c_concat(tensor, 'ring_id', ring_id,
'use_calc_stream', True, 'rank', rank,
'nranks', nranks, 'use_model_parallel',
True)
op_type = 'c_concat'
helper = LayerHelper(op_type, **locals())
......@@ -1363,9 +1372,9 @@ def _c_split(tensor, group=None):
nranks = _get_global_env().world_size if group is None else group.nranks
if _non_static_mode():
return _C_ops.c_split(tensor, 'use_calc_stream', True, 'ring_id',
ring_id, 'rank', rank, 'nranks', nranks,
'use_model_parallel', True)
return _legacy_C_ops.c_split(tensor, 'use_calc_stream', True, 'ring_id',
ring_id, 'rank', rank, 'nranks', nranks,
'use_model_parallel', True)
op_type = 'c_split'
helper = LayerHelper(op_type, **locals())
......@@ -1410,26 +1419,27 @@ def _mp_allreduce(tensor,
def forward(ctx, tensor, use_calc_stream, ring_id,
use_model_parallel):
ctx.ring_id = ring_id
return _C_ops.c_allreduce_sum_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id, "use_model_parallel",
use_model_parallel)
return _legacy_C_ops.c_allreduce_sum_(tensor, 'use_calc_stream',
use_calc_stream,
'ring_id', ring_id,
"use_model_parallel",
use_model_parallel)
@staticmethod
def backward(ctx, dy):
return _C_ops.c_identity(dy, 'use_calc_stream', True, 'ring_id',
ctx.ring_id, 'use_model_parallel',
True)
return _legacy_C_ops.c_identity(dy, 'use_calc_stream', True,
'ring_id', ctx.ring_id,
'use_model_parallel', True)
return mp_allreduce_eager.apply(tensor, use_calc_stream, ring_id,
use_model_parallel)
elif _in_legacy_dygraph():
if op == ReduceOp.SUM:
return _C_ops.c_allreduce_sum_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
"use_model_parallel",
use_model_parallel)
return _legacy_C_ops.c_allreduce_sum_(tensor, 'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id, "use_model_parallel",
use_model_parallel)
else:
raise ValueError("Unknown parameter: {}.".format(op))
......@@ -1467,7 +1477,8 @@ def _c_lookup_table(table, index, start_index=0, name=None):
Tensor.
"""
if _non_static_mode():
return _C_ops.c_embedding(table, index, "start_index", start_index)
return _legacy_C_ops.c_embedding(table, index, "start_index",
start_index)
op_type = 'c_embedding'
helper = LayerHelper(op_type, **locals())
......@@ -1543,7 +1554,7 @@ def _c_softmax_with_cross_entropy(logits,
label = paddle.unsqueeze(label, axis=-1)
if _non_static_mode():
softmax, loss = _C_ops.c_softmax_with_cross_entropy(
softmax, loss = _legacy_C_ops.c_softmax_with_cross_entropy(
logits, label, 'ring_id', ring_id, 'rank', rank, 'nranks', nranks)
if not return_softmax:
return loss
......@@ -1581,8 +1592,8 @@ def _linear(x, weight, bias=None, name=None):
"""
if _non_static_mode():
pre_bias = _varbase_creator(dtype=x.dtype)
_C_ops.matmul(x, weight, pre_bias, 'transpose_X', False, 'transpose_Y',
False, "alpha", 1)
_legacy_C_ops.matmul(x, weight, pre_bias, 'transpose_X', False,
'transpose_Y', False, "alpha", 1)
return dygraph_utils._append_bias_in_dygraph(pre_bias,
bias,
axis=len(x.shape) - 1)
......@@ -2056,8 +2067,8 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True):
return
if _non_static_mode():
out = _C_ops.alltoall(temp, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id)
out = _legacy_C_ops.alltoall(temp, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id)
else:
op_type = 'alltoall'
helper = LayerHelper(op_type, **locals())
......@@ -2225,8 +2236,8 @@ def send(tensor, dst=0, group=None, use_calc_stream=True):
ring_id = 0 if group is None else group.id
if _non_static_mode():
return _C_ops.send_v2(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'peer', dst)
return _legacy_C_ops.send_v2(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'peer', dst)
op_type = 'send_v2'
check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
......@@ -2288,9 +2299,9 @@ def recv(tensor, src=0, group=None, use_calc_stream=True):
ring_id = 0 if group is None else group.id
if _non_static_mode():
return _C_ops.recv_v2(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'peer', src, 'dtype',
tensor.dtype, 'out_shape', tensor.shape)
return _legacy_C_ops.recv_v2(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'peer', src, 'dtype',
tensor.dtype, 'out_shape', tensor.shape)
op_type = 'recv_v2'
check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
......
......@@ -30,7 +30,7 @@ from paddle.fluid.dygraph import parallel_helper
from paddle.fluid.ir import apply_build_strategy
from .base import topology as tp
from .meta_parallel import model_parallel_random_seed
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid import core
__all__ = []
......
......@@ -22,7 +22,7 @@ from paddle.fluid.framework import Variable
import types
from paddle.fluid import core
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = []
......@@ -66,8 +66,8 @@ class HybridParallelGradScaler:
param._grad_ivar() for param in optimizer._parameter_list
if param._grad_ivar() is not None
]
_C_ops.check_finite_and_unscale(param_grads, self._scale, param_grads,
self._found_inf)
_legacy_C_ops.check_finite_and_unscale(param_grads, self._scale,
param_grads, self._found_inf)
# allreduce_max found_inf in check_group
if not self._use_dp_mode:
self._found_inf = paddle.cast(self._found_inf, dtype="int32")
......
......@@ -15,7 +15,7 @@
import paddle
import contextlib
import numpy as np
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid import core
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _non_static_mode, default_main_program, Variable
......@@ -205,9 +205,10 @@ def dropout(x,
# dygraph using tracker, doesn't need determinate seed
if _non_static_mode():
out, mask = _C_ops.dropout(x, 'dropout_prob', p, 'is_test',
not training, 'fix_seed', False, 'seed', 0,
'dropout_implementation', mode)
out, mask = _legacy_C_ops.dropout(x, 'dropout_prob', p, 'is_test',
not training, 'fix_seed', False,
'seed', 0, 'dropout_implementation',
mode)
return out
seed = determinate_seed(rng_name)
......
......@@ -16,7 +16,7 @@ import paddle
from .utils import paddle_2_number, number_2_dtype
from ...utils.log_util import logger
import numpy as np
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
import paddle.fluid.core as core
from paddle.fluid.framework import _in_legacy_dygraph, _non_static_mode, in_dygraph_mode
......@@ -166,9 +166,10 @@ def _is_valid_send_recv_partial(tensor, mp_degree):
def _partial_send_op(tensor, group, use_calc_stream, ring_id, dst, nranks,
rank_id):
if _in_legacy_dygraph():
return _C_ops.partial_send(tensor.detach(), 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id, 'peer',
dst, 'num', nranks, 'id', rank_id)
return _legacy_C_ops.partial_send(tensor.detach(), 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'peer', dst, 'num', nranks, 'id',
rank_id)
elif in_dygraph_mode():
group = paddle.distributed.collective._get_default_group(
) if group is None else group
......@@ -204,10 +205,11 @@ def send_partial(tensor,
def _partial_recv_op(tensor, group, use_calc_stream, ring_id, src, nranks,
rank_id):
if _in_legacy_dygraph():
return _C_ops.partial_recv(tensor.detach(), 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id, 'peer',
src, 'num', nranks, 'id', rank_id, 'dtype',
tensor.dtype, 'out_shape', tensor.shape)
return _legacy_C_ops.partial_recv(tensor.detach(), 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'peer', src, 'num', nranks, 'id',
rank_id, 'dtype', tensor.dtype,
'out_shape', tensor.shape)
elif in_dygraph_mode():
group = paddle.distributed.collective._get_default_group(
) if group is None else group
......@@ -243,9 +245,11 @@ def recv_partial(tensor,
def _partial_allgather_op(tensor, group, use_calc_stream, ring_id, nranks,
rank_id):
if _in_legacy_dygraph():
return _C_ops.partial_allgather_(tensor.detach(), 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'nranks', nranks, 'rank', rank_id)
return _legacy_C_ops.partial_allgather_(tensor.detach(),
'use_calc_stream',
use_calc_stream, 'ring_id',
ring_id, 'nranks', nranks,
'rank', rank_id)
elif in_dygraph_mode():
group = paddle.distributed.collective._get_default_group(
) if group is None else group
......
......@@ -16,7 +16,7 @@ import contextlib
import paddle
from paddle.fluid import core
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.autograd import PyLayer
from paddle.fluid import framework
from ...utils.recompute import check_recompute_necessary, detach_variable, swith_rng_state_tracker
......@@ -115,8 +115,8 @@ def _all_gather(tensor, group=None, use_calc_stream=True):
ring_id = 0 if group is None else group.id
nranks = paddle.distributed.collective._get_global_group(
).nranks if group is None else group.nranks
return _C_ops.c_allgather(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'nranks', nranks)
return _legacy_C_ops.c_allgather(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'nranks', nranks)
def _split_activation(tensor):
......
......@@ -19,7 +19,7 @@ import numpy as np
from types import MethodType
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid import core
from paddle.fluid import layers
from paddle.fluid.dygraph import to_variable
......@@ -209,13 +209,15 @@ def GroupShardedScaler(scaler):
with device_guard(dev_id, device):
if len(param_grads_fp16):
_C_ops.check_finite_and_unscale(param_grads_fp16, self._scale,
param_grads_fp16,
temp_found_inf_fp16)
_legacy_C_ops.check_finite_and_unscale(param_grads_fp16,
self._scale,
param_grads_fp16,
temp_found_inf_fp16)
if len(param_grads_fp32):
_C_ops.check_finite_and_unscale(param_grads_fp32, self._scale,
param_grads_fp32,
temp_found_inf_fp32)
_legacy_C_ops.check_finite_and_unscale(param_grads_fp32,
self._scale,
param_grads_fp32,
temp_found_inf_fp32)
self._found_inf = 1 if temp_found_inf_fp16 or temp_found_inf_fp32 else 0
is_found_inf = paddle.to_tensor([self._found_inf], dtype="int32")
......
......@@ -21,7 +21,7 @@ import numpy as np
from types import MethodType
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid import core
from paddle.fluid import layers
from paddle.fluid.dygraph import to_variable
......@@ -210,13 +210,15 @@ def ShardingScaler(scaler):
with device_guard(dev_id, device):
if len(param_grads_fp16):
_C_ops.check_finite_and_unscale(param_grads_fp16, self._scale,
param_grads_fp16,
temp_found_inf_fp16)
_legacy_C_ops.check_finite_and_unscale(param_grads_fp16,
self._scale,
param_grads_fp16,
temp_found_inf_fp16)
if len(param_grads_fp32):
_C_ops.check_finite_and_unscale(param_grads_fp32, self._scale,
param_grads_fp32,
temp_found_inf_fp32)
_legacy_C_ops.check_finite_and_unscale(param_grads_fp32,
self._scale,
param_grads_fp32,
temp_found_inf_fp32)
self._found_inf = 1 if temp_found_inf_fp16 or temp_found_inf_fp32 else 0
is_found_inf = paddle.to_tensor([self._found_inf], dtype="int32")
......
......@@ -20,7 +20,7 @@ from types import MethodType
from paddle.fluid import core
from paddle.fluid.dygraph import to_variable
import numpy as np
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
def distributed_scaler(scaler):
......@@ -60,13 +60,15 @@ def distributed_scaler(scaler):
temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool_))
temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_))
if len(param_grads_fp16):
_C_ops.check_finite_and_unscale(param_grads_fp16, self._scale,
param_grads_fp16,
temp_found_inf_fp16)
_legacy_C_ops.check_finite_and_unscale(param_grads_fp16,
self._scale,
param_grads_fp16,
temp_found_inf_fp16)
if len(param_grads_fp32):
_C_ops.check_finite_and_unscale(param_grads_fp32, self._scale,
param_grads_fp32,
temp_found_inf_fp32)
_legacy_C_ops.check_finite_and_unscale(param_grads_fp32,
self._scale,
param_grads_fp32,
temp_found_inf_fp32)
self._found_inf = 1 if temp_found_inf_fp16 or temp_found_inf_fp32 else 0
is_found_inf = paddle.to_tensor([self._found_inf], dtype="int32")
......
......@@ -16,7 +16,7 @@ from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode, _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
def _number_count(numbers, upper_range):
......@@ -42,7 +42,7 @@ def _number_count(numbers, upper_range):
print(number_count) # the result: [2, 0, 2, 0, 0, 0]
"""
if in_dygraph_mode():
return _C_ops.number_count(numbers, 'upper_range', upper_range)
return _legacy_C_ops.number_count(numbers, 'upper_range', upper_range)
elif _in_legacy_dygraph():
return core.ops.number_count(numbers, 'upper_range', upper_range)
else:
......@@ -89,7 +89,7 @@ def _assign_pos(x, cum_count):
print(pos) # the result: (2, 0, 3, 1)
"""
if in_dygraph_mode():
return _C_ops.assign_pos(x, cum_count, cum_count[-1])
return _legacy_C_ops.assign_pos(x, cum_count, cum_count[-1])
elif _in_legacy_dygraph():
return core.ops.assign_pos(x, cum_count, cum_count[-1])
else:
......@@ -124,7 +124,7 @@ def _random_routing(topk_idx, topk_value, prob, topk=2):
"""
if topk == 2:
if in_dygraph_mode():
return _C_ops.random_routing(prob, topk_value, topk_idx)
return _legacy_C_ops.random_routing(prob, topk_value, topk_idx)
elif _in_legacy_dygraph():
return core.ops.random_routing(prob, topk_value, topk_idx)
else:
......@@ -155,8 +155,8 @@ def _limit_by_capacity(expert_count, capacity, n_worker):
print(out) # the result: [1, 2, 2, 4, 3, 3]
"""
if in_dygraph_mode():
return _C_ops.limit_by_capacity(expert_count, capacity, 'n_worker',
n_worker)
return _legacy_C_ops.limit_by_capacity(expert_count, capacity,
'n_worker', n_worker)
elif _in_legacy_dygraph():
return core.ops.limit_by_capacity(expert_count, capacity, 'n_worker',
n_worker)
......@@ -202,8 +202,9 @@ def _prune_gate_by_capacity(gate_idx, expert_count, n_expert, n_worker):
[1, 3, 3, 3, -1, 2, 1, 1])
"""
if in_dygraph_mode():
return _C_ops.prune_gate_by_capacity(gate_idx, expert_count, "n_expert",
n_expert, "n_worker", n_worker)
return _legacy_C_ops.prune_gate_by_capacity(gate_idx, expert_count,
"n_expert", n_expert,
"n_worker", n_worker)
elif _in_legacy_dygraph():
return core.ops.prune_gate_by_capacity(gate_idx, expert_count,
"n_expert", n_expert, "n_worker",
......
......@@ -31,7 +31,7 @@ from distutils.util import strtobool
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [ #noqa
'get_host_name_ip',
......@@ -146,7 +146,7 @@ def global_scatter(x,
ring_id = 0 if group is None else group.id
if _non_static_mode():
return _C_ops.global_scatter(x, local_count, \
return _legacy_C_ops.global_scatter(x, local_count, \
global_count, \
'use_calc_stream', use_calc_stream, \
'ring_id', ring_id)
......@@ -259,7 +259,7 @@ def global_gather(x,
ring_id = 0 if group is None else group.id
if _non_static_mode():
return _C_ops.global_gather(x, local_count, \
return _legacy_C_ops.global_gather(x, local_count, \
global_count, \
'use_calc_stream', use_calc_stream, \
'ring_id', ring_id)
......
......@@ -17,7 +17,7 @@ import warnings
import numpy as np
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.distribution import distribution
from paddle.fluid import core
from paddle.fluid.data_feeder import (check_dtype, check_type,
......
......@@ -158,9 +158,9 @@ def _dirichlet(concentration, name=None):
['float32', 'float64'], op_type)
if in_dygraph_mode():
return paddle._C_ops.final_state_dirichlet(concentration)
elif _in_legacy_dygraph():
return paddle._C_ops.dirichlet(concentration)
elif _in_legacy_dygraph():
return paddle._legacy_C_ops.dirichlet(concentration)
else:
helper = LayerHelper(op_type, **locals())
out = helper.create_variable_for_type_inference(
......
......@@ -26,7 +26,7 @@ import warnings
import numpy as np
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid import core
from paddle.fluid.data_feeder import (check_dtype, check_type,
check_variable_and_dtype, convert_dtype)
......@@ -221,8 +221,8 @@ class Distribution(object):
warnings.warn(
"dtype of input 'value' needs to be the same as parameters of distribution class. dtype of 'value' will be converted."
)
return _C_ops.cast(value, 'in_dtype', value.dtype, 'out_dtype',
param.dtype)
return _legacy_C_ops.cast(value, 'in_dtype', value.dtype,
'out_dtype', param.dtype)
return value
check_variable_and_dtype(value, 'value', ['float32', 'float64'],
......
......@@ -16,7 +16,7 @@ import math
import warnings
import numpy as np
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.distribution import distribution
from paddle.fluid import core
from paddle.fluid.data_feeder import (check_dtype, check_type,
......
......@@ -16,7 +16,7 @@ import math
import warnings
import numpy as np
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.distribution import distribution
from paddle.fluid import core
from paddle.fluid.data_feeder import (check_dtype, check_type,
......@@ -191,10 +191,10 @@ class Uniform(distribution.Distribution):
lb_bool = self.low < value
ub_bool = value < self.high
lb = _C_ops.cast(lb_bool, 'in_dtype', lb_bool.dtype, 'out_dtype',
value.dtype)
ub = _C_ops.cast(ub_bool, 'in_dtype', ub_bool.dtype, 'out_dtype',
value.dtype)
lb = _legacy_C_ops.cast(lb_bool, 'in_dtype', lb_bool.dtype,
'out_dtype', value.dtype)
ub = _legacy_C_ops.cast(ub_bool, 'in_dtype', ub_bool.dtype,
'out_dtype', value.dtype)
return nn.log(lb * ub) - nn.log(self.high - self.low)
name = self.name + '_log_prob'
......@@ -221,10 +221,10 @@ class Uniform(distribution.Distribution):
lb_bool = self.low < value
ub_bool = value < self.high
lb = _C_ops.cast(lb_bool, 'in_dtype', lb_bool.dtype, 'out_dtype',
value.dtype)
ub = _C_ops.cast(ub_bool, 'in_dtype', ub_bool.dtype, 'out_dtype',
value.dtype)
lb = _legacy_C_ops.cast(lb_bool, 'in_dtype', lb_bool.dtype,
'out_dtype', value.dtype)
ub = _legacy_C_ops.cast(ub_bool, 'in_dtype', ub_bool.dtype,
'out_dtype', value.dtype)
return (lb * ub) / (self.high - self.low)
name = self.name + '_probs'
......
......@@ -18,7 +18,7 @@ import paddle
from .tensor.attribute import is_complex, is_floating_point, is_integer
from .tensor.creation import _real_to_complex_dtype, _complex_to_real_dtype
from .fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from . import _C_ops
from . import _C_ops, _legacy_C_ops
from .fluid.data_feeder import check_variable_and_dtype
from .fluid.layer_helper import LayerHelper
......@@ -1404,10 +1404,10 @@ def fft_c2c(x, n, axis, norm, forward, name):
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type)
if in_dygraph_mode():
out = _C_ops.final_state_fft_c2c(x, axes, norm, forward)
out = _C_ops.fft_c2c(x, axes, norm, forward)
elif _in_legacy_dygraph():
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
out = getattr(_C_ops, op_type)(x, *attrs)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1440,11 +1440,11 @@ def fft_r2c(x, n, axis, norm, forward, onesided, name):
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], op_type)
if in_dygraph_mode():
out = _C_ops.final_state_fft_r2c(x, axes, norm, forward, onesided)
out = _C_ops.fft_r2c(x, axes, norm, forward, onesided)
elif _in_legacy_dygraph():
attrs = ('axes', axes, 'normalization', norm, 'forward', forward,
'onesided', onesided)
out = getattr(_C_ops, op_type)(x, *attrs)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1486,16 +1486,16 @@ def fft_c2r(x, n, axis, norm, forward, name):
if in_dygraph_mode():
if n is not None:
out = _C_ops.final_state_fft_c2r(x, axes, norm, forward, n)
out = _C_ops.fft_c2r(x, axes, norm, forward, n)
else:
out = _C_ops.final_state_fft_c2r(x, axes, norm, forward, 0)
out = _C_ops.fft_c2r(x, axes, norm, forward, 0)
elif _in_legacy_dygraph():
if n is not None:
attrs = ('axes', axes, 'normalization', norm, 'forward', forward,
'last_dim_size', n)
else:
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
out = getattr(_C_ops, op_type)(x, *attrs)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1549,10 +1549,10 @@ def fftn_c2c(x, s, axes, norm, forward, name):
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type)
if in_dygraph_mode():
out = _C_ops.final_state_fft_c2c(x, axes, norm, forward)
out = _C_ops.fft_c2c(x, axes, norm, forward)
elif _in_legacy_dygraph():
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
out = getattr(_C_ops, op_type)(x, *attrs)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1602,11 +1602,11 @@ def fftn_r2c(x, s, axes, norm, forward, onesided, name):
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], op_type)
if in_dygraph_mode():
out = _C_ops.final_state_fft_r2c(x, axes, norm, forward, onesided)
out = _C_ops.fft_r2c(x, axes, norm, forward, onesided)
elif _in_legacy_dygraph():
attrs = ('axes', axes, 'normalization', norm, 'forward', forward,
'onesided', onesided)
out = getattr(_C_ops, op_type)(x, *attrs)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1668,16 +1668,16 @@ def fftn_c2r(x, s, axes, norm, forward, name):
if in_dygraph_mode():
if s is not None:
out = _C_ops.final_state_fft_c2r(x, axes, norm, forward, s[-1])
out = _C_ops.fft_c2r(x, axes, norm, forward, s[-1])
else:
out = _C_ops.final_state_fft_c2r(x, axes, norm, forward, 0)
out = _C_ops.fft_c2r(x, axes, norm, forward, 0)
elif _in_legacy_dygraph():
if s:
attrs = ('axes', axes, 'normalization', norm, 'forward', forward,
'last_dim_size', s[-1])
else:
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
out = getattr(_C_ops, op_type)(x, *attrs)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......
......@@ -29,7 +29,7 @@ from .data_feeder import check_variable_and_dtype
from .framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph
from .layer_helper import LayerHelper
from .framework import default_main_program
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'set_gradient_clip', 'ErrorClipByValue', 'ClipGradByValue',
......@@ -71,9 +71,9 @@ def _squared_l2_norm(x):
return sum_square
if in_dygraph_mode():
return _C_ops.final_state_squared_l2_norm(x)
elif _in_legacy_dygraph():
return _C_ops.squared_l2_norm(x)
elif _in_legacy_dygraph():
return _legacy_C_ops.squared_l2_norm(x)
op_type = 'squared_l2_norm'
check_variable_and_dtype(x, 'x', ['float32', 'float64'], op_type)
......
......@@ -36,7 +36,7 @@ from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.framework import Variable, convert_np_dtype_to_dtype_
from paddle.fluid.layers import slice, reshape
import warnings
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'fused_elemwise_activation', 'sequence_topk_avg_pooling', 'var_conv_2d',
......@@ -1783,7 +1783,7 @@ def bilateral_slice(x, guide, grid, has_offset, name=None):
"""
if paddle.fluid._non_static_mode():
attrs = ('has_offset', has_offset)
return getattr(_C_ops, "bilateral_slice")(x, grid, guide, *attrs)
return getattr(_legacy_C_ops, "bilateral_slice")(x, grid, guide, *attrs)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'bilateral_slice')
check_variable_and_dtype(guide, 'guide', ['float32', 'float64'],
......@@ -1858,7 +1858,7 @@ def correlation(x,
attrs = ("pad_size", pad_size, "kernel_size", kernel_size,
"max_displacement", max_displacement, "stride1", stride1,
"stride2", stride2, "corr_type_multiply", corr_type_multiply)
output = getattr(_C_ops, "correlation")(x, y, *attrs)
output = getattr(_legacy_C_ops, "correlation")(x, y, *attrs)
else:
helper = LayerHelper("correlation", **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
......
......@@ -21,7 +21,7 @@ from paddle.fluid import unique_name
from paddle.fluid import layers
from paddle.fluid.layer_helper import LayerHelper
import warnings
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = ['Momentum']
......@@ -207,7 +207,7 @@ class Momentum(Optimizer):
if find_master else None)
if framework._non_static_mode():
_, _, _ = _C_ops.momentum(
_, _, _ = _legacy_C_ops.momentum(
param_and_grad[0], param_and_grad[1], velocity_acc, lr,
master_weight, param_and_grad[0], velocity_acc, master_weight,
'mu', self._momentum, 'use_nesterov', self._use_nesterov,
......
......@@ -20,7 +20,7 @@ from paddle.fluid.data_feeder import check_type
from ...wrapped_decorator import signature_safe_contextmanager, wrap_decorator
import warnings
import numpy as np
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from collections import defaultdict
from enum import Enum
......@@ -286,26 +286,26 @@ class AmpScaler(object):
if param.dtype == core.VarDesc.VarType.FP32
]
if core.is_compiled_with_npu():
float_status = _C_ops.alloc_float_status()
_C_ops.clear_float_status(float_status, float_status)
float_status = _legacy_C_ops.alloc_float_status()
_legacy_C_ops.clear_float_status(float_status, float_status)
if len(param_grads_fp16):
_C_ops.check_finite_and_unscale(param_grads_fp16, self._scale,
float_status, param_grads_fp16,
self._temp_found_inf_fp16)
_legacy_C_ops.check_finite_and_unscale(
param_grads_fp16, self._scale, float_status,
param_grads_fp16, self._temp_found_inf_fp16)
if len(param_grads_fp32):
_C_ops.check_finite_and_unscale(param_grads_fp32, self._scale,
float_status, param_grads_fp32,
self._temp_found_inf_fp32)
_legacy_C_ops.check_finite_and_unscale(
param_grads_fp32, self._scale, float_status,
param_grads_fp32, self._temp_found_inf_fp32)
else:
if len(param_grads_fp16):
_C_ops.check_finite_and_unscale(param_grads_fp16, self._scale,
param_grads_fp16,
self._temp_found_inf_fp16)
_legacy_C_ops.check_finite_and_unscale(
param_grads_fp16, self._scale, param_grads_fp16,
self._temp_found_inf_fp16)
if len(param_grads_fp32):
_C_ops.check_finite_and_unscale(param_grads_fp32, self._scale,
param_grads_fp32,
self._temp_found_inf_fp32)
_legacy_C_ops.check_finite_and_unscale(
param_grads_fp32, self._scale, param_grads_fp32,
self._temp_found_inf_fp32)
self._found_inf = self._temp_found_inf_fp16 or self._temp_found_inf_fp32
......
......@@ -30,7 +30,7 @@ from paddle.fluid.contrib.mixed_precision.decorator import AutoMixedPrecisionLis
from paddle.fluid.contrib.mixed_precision.fp16_utils import rewrite_program, cast_model_to_fp16
from paddle.fluid.dygraph.amp.auto_cast import _in_amp_guard, _in_pure_fp16_guard
import paddle.compat as cpt
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
class NestSequence(object):
......@@ -442,10 +442,11 @@ class PartialProgramLayer:
self._cast_fp16_if_pure_fp16(in_vars)
_C_ops.run_program(self._valid_vars(in_vars),
self._valid_vars(self._params),
self._valid_vars(out_vars), self._create_scope_vec(),
self._double_grads, self._cuda_graph_vec, *attrs)
_legacy_C_ops.run_program(self._valid_vars(in_vars),
self._valid_vars(self._params),
self._valid_vars(out_vars),
self._create_scope_vec(), self._double_grads,
self._cuda_graph_vec, *attrs)
restored_nest_out = self._restore_out(out_vars)
return self._remove_no_value(restored_nest_out)
......
......@@ -16,7 +16,7 @@ from ..wrapped_decorator import wrap_decorator
from ..framework import _non_static_mode
import warnings
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
# NOTE(pangyoki): The Inplace APIs with underline(`_`) is only valid for the method of calling `_C_ops`
......
......@@ -30,7 +30,7 @@ from paddle.fluid.layers import nn
from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.framework import _non_static_mode
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = ['TranslatedLayer']
......@@ -865,9 +865,10 @@ def _run_dygraph(instance, input, program_holder):
attrs = ('global_block', trace_program.block(0), 'start_op_index', 0,
'end_op_index', end_op_index, 'is_test', instance._is_test,
'program_id', _hash_with_id(trace_program, instance))
_C_ops.run_program(_valid_vars(input_vars), _valid_vars(persistable_vars),
_valid_vars(output_vars), tmp_scope_vec,
_valid_vars(double_grad_vars), None, *attrs)
_legacy_C_ops.run_program(_valid_vars(input_vars),
_valid_vars(persistable_vars),
_valid_vars(output_vars), tmp_scope_vec,
_valid_vars(double_grad_vars), None, *attrs)
# NOTE: [ why need set param's gradient type here ]
# if user set sparse gradient mode, the param's gradient
# will be SelectedRows, not LoDTensor. But tracer will just
......
......@@ -22,7 +22,7 @@ from .. import framework
import numpy as np
import warnings
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
_supported_int_dtype_ = [
core.VarDesc.VarType.UINT8,
......@@ -72,12 +72,13 @@ def monkey_patch_math_varbase():
@no_grad
def create_tensor(value, dtype, shape):
if framework._in_eager_mode_:
out = _C_ops.final_state_full(shape, value, dtype,
framework._current_expected_place())
out = _C_ops.full(shape, value, dtype,
framework._current_expected_place())
else:
out = _varbase_creator(dtype=dtype)
out = _C_ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
'value', value, 'force_cpu', False)
out = _legacy_C_ops.fill_constant(out, 'dtype', dtype, 'shape',
shape, 'value', value,
'force_cpu', False)
out.stop_gradient = True
return out
......@@ -111,13 +112,14 @@ def monkey_patch_math_varbase():
dtype = convert_np_dtype_to_dtype_(dtype)
if _in_legacy_dygraph():
return _C_ops.cast(self, 'in_dtype', self.dtype, 'out_dtype', dtype)
return _C_ops.final_state_cast(self, dtype)
return _legacy_C_ops.cast(self, 'in_dtype', self.dtype, 'out_dtype',
dtype)
return _C_ops.cast(self, dtype)
def _scalar_elementwise_op_(var, scale, bias):
if framework.in_dygraph_mode():
return _C_ops.final_state_scale(var, float(scale), bias, True)
return _C_ops.scale(var, 'scale', scale, 'bias', bias)
return _C_ops.scale(var, float(scale), bias, True)
return _legacy_C_ops.scale(var, 'scale', scale, 'bias', bias)
def _neg_(var):
return _scalar_elementwise_op_(var, -1.0, 0.0)
......@@ -174,9 +176,9 @@ def monkey_patch_math_varbase():
for i in range(len(var.shape)):
perm.insert(0, i)
if _in_legacy_dygraph():
out, _ = _C_ops.transpose2(var, 'axis', perm)
out, _ = _legacy_C_ops.transpose2(var, 'axis', perm)
else:
out = _C_ops.final_state_transpose(var, perm)
out = _C_ops.transpose(var, perm)
return out
def _scalar_add_(var, value):
......@@ -223,8 +225,7 @@ def monkey_patch_math_varbase():
# so the calculation result here and the calculation result of numpy are
# different after 6 decimal point. If necessary, we can also use float64 here.
# torch's behavior here is consistent with ours
if (op_type == "final_state_divide"
or op_type == "elementwise_div"
if (op_type == "divide" or op_type == "elementwise_div"
) and self.dtype in _supported_int_dtype_:
self = astype(self, 'float32')
# here use `scale` replace `elementwise` to get better performance
......@@ -281,17 +282,25 @@ def monkey_patch_math_varbase():
self = other_var
other_var = tmp
if (op_type == "final_state_divide" or op_type == "elementwise_div"
if (op_type == "divide" or op_type == "elementwise_div"
) and self.dtype in _supported_int_dtype_:
self = astype(self, 'float32')
other_var = astype(other_var, 'float32')
# 4. calculation
axis = -1
math_op = getattr(_C_ops, op_type)
if in_dygraph_mode():
math_op = getattr(_C_ops, op_type)
else:
math_op = getattr(_legacy_C_ops, op_type)
if call_final_api:
if op_type == "final_state_matmul":
if op_type == "matmul":
return math_op(self, other_var, False, False)
if op_type == "pow":
if isinstance(other_var, core.eager.Tensor):
return _C_ops.elementwise_pow(self, other_var)
else:
return _C_ops.elementwise_pow(self, other_var)
return math_op(self, other_var, -1)
return math_op(self, other_var, 'axis', axis)
......@@ -324,104 +333,95 @@ def monkey_patch_math_varbase():
('ndim', _ndim_),
('size', _size_),
('T', _T_),
('__add__',
_binary_creator_('__add__', 'final_state_add', False, _scalar_add_,
True)) if framework._in_eager_mode_ else
('__add__', _binary_creator_('__add__', 'add', False, _scalar_add_,
True)) if framework._in_eager_mode_ else
('__add__',
_binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)),
## a+b == b+a. Do not need to reverse explicitly
('__radd__',
_binary_creator_('__radd__', 'final_state_add', False, _scalar_add_,
True)) if framework._in_eager_mode_ else
_binary_creator_('__radd__', 'add', False, _scalar_add_, True))
if framework._in_eager_mode_ else
('__radd__',
_binary_creator_('__radd__', 'elementwise_add', False, _scalar_add_)),
('__sub__',
_binary_creator_('__sub__', 'final_state_subtract', False,
_scalar_sub_, True)) if framework._in_eager_mode_ else
_binary_creator_('__sub__', 'subtract', False, _scalar_sub_, True))
if framework._in_eager_mode_ else
('__sub__',
_binary_creator_('__sub__', 'elementwise_sub', False, _scalar_sub_)),
('__rsub__',
_binary_creator_('__rsub__', 'final_state_subtract', True,
_scalar_rsub_, True))
_binary_creator_('__rsub__', 'subtract', True, _scalar_rsub_, True))
if framework._in_eager_mode_ else
('__rsub__',
_binary_creator_('__rsub__', 'elementwise_sub', True, _scalar_rsub_)),
('__mul__',
_binary_creator_('__mul__', 'final_state_multiply', False,
_scalar_mul_, True)) if framework._in_eager_mode_ else
_binary_creator_('__mul__', 'multiply', False, _scalar_mul_, True))
if framework._in_eager_mode_ else
('__mul__',
_binary_creator_('__mul__', 'elementwise_mul', False, _scalar_mul_)),
## a*b == b*a. Do not need to reverse explicitly
('__rmul__',
_binary_creator_('__rmul__', 'final_state_multiply', False,
_scalar_mul_, True)) if framework._in_eager_mode_ else
_binary_creator_('__rmul__', 'multiply', False, _scalar_mul_, True))
if framework._in_eager_mode_ else
('__rmul__',
_binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
('__div__',
_binary_creator_('__div__', 'final_state_divide', False, _scalar_div_,
True)) if framework._in_eager_mode_ else
_binary_creator_('__div__', 'divide', False, _scalar_div_, True))
if framework._in_eager_mode_ else
('__div__',
_binary_creator_('__div__', 'elementwise_div', False, _scalar_div_)),
('__truediv__',
_binary_creator_('__truediv__', 'final_state_divide', False,
_scalar_div_, True)) if framework._in_eager_mode_ else
_binary_creator_('__truediv__', 'divide', False, _scalar_div_, True))
if framework._in_eager_mode_ else
('__truediv__',
_binary_creator_('__truediv__', 'elementwise_div', False,
_scalar_div_)),
('__rdiv__',
_binary_creator_('__rdiv__', 'final_state_divide', True, None, True))
('__rdiv__', _binary_creator_('__rdiv__', 'divide', True, None, True))
if framework._in_eager_mode_ else
('__rdiv__',
_binary_creator_('__rdiv__', 'elementwise_div', True, None)),
('__rtruediv__',
_binary_creator_('rtruediv__', 'final_state_divide', True, None, True))
_binary_creator_('rtruediv__', 'divide', True, None, True))
if framework._in_eager_mode_ else
('__rtruediv__',
_binary_creator_('rtruediv__', 'elementwise_div', True, None)),
('__pow__',
_binary_creator_('__pow__', 'final_state_elementwise_pow', False,
_C_ops.final_state_pow, True))
('__pow__', _binary_creator_('__pow__', 'pow', False, _C_ops.pow, True))
if framework._in_eager_mode_ else
('__pow__',
_binary_creator_('__pow__', 'elementwise_pow', False, None)),
('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
None)),
('__floordiv__',
_binary_creator_('__floordiv__', 'floor_divide', False, None, True))
if framework._in_eager_mode_ else
('__floordiv__',
_binary_creator_('__floordiv__', 'elementwise_floordiv', False, None)),
('__mod__',
_binary_creator_('__mod__', 'final_state_modulo', False, None, True))
('__mod__', _binary_creator_('__mod__', 'modulo', False, None, True))
if framework._in_eager_mode_ else
('__mod__',
_binary_creator_('__mod__', 'elementwise_mod', False, None)),
('__matmul__',
_binary_creator_('__matmul__', "final_state_matmul", False, None,
True)) if framework._in_eager_mode_ else
_binary_creator_('__matmul__', "matmul", False, None, True))
if framework._in_eager_mode_ else
('__matmul__',
_binary_creator_('__matmul__', "matmul_v2", False, None)),
## for logical compare
('__eq__',
_binary_creator_('__eq__', 'final_state_equal', False, None, True))
('__eq__', _binary_creator_('__eq__', 'equal', False, None, True))
if framework._in_eager_mode_ else
('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
('__ne__',
_binary_creator_('__ne__', 'final_state_not_equal', False, None, True))
('__ne__', _binary_creator_('__ne__', 'not_equal', False, None, True))
if framework._in_eager_mode_ else
('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
('__lt__',
_binary_creator_('__lt__', 'final_state_less_than', False, None, True))
('__lt__', _binary_creator_('__lt__', 'less_than', False, None, True))
if framework._in_eager_mode_ else
('__lt__', _binary_creator_('__lt__', 'less_than', False, None)),
('__le__',
_binary_creator_('__le__', 'final_state_less_equal', False, None,
True)) if framework._in_eager_mode_ else
('__le__', _binary_creator_('__le__', 'less_equal', False, None, True))
if framework._in_eager_mode_ else
('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
('__gt__',
_binary_creator_('__gt__', 'final_state_greater_than', False, None,
True)) if framework._in_eager_mode_ else
('__gt__', _binary_creator_('__gt__', 'greater_than', False, None,
True)) if framework._in_eager_mode_ else
('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
('__ge__',
_binary_creator_('__ge__', 'final_state_greater_equal', False, None,
True)) if framework._in_eager_mode_ else
('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None,
True)) if framework._in_eager_mode_ else
('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
('__array_ufunc__', None)
]
......
......@@ -33,7 +33,7 @@ import numbers
import logging
import os
import paddle.utils.deprecated as deprecated
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Dropout', 'Embedding',
......@@ -240,10 +240,10 @@ class Conv2D(layers.Layer):
def forward(self, input):
if in_dygraph_mode() and self._l_type == "conv2d":
pre_bias = _C_ops.final_state_conv2d(
input, self.weight, self._stride, self._padding, "EXPLICIT",
self._groups if self._groups else 1, self._dilation, "NCHW",
False, -1, False)
pre_bias = _C_ops.conv2d(input, self.weight, self._stride,
self._padding, "EXPLICIT",
self._groups if self._groups else 1,
self._dilation, "NCHW", False, -1, False)
if self.bias is not None:
pre_act = F.elementwise_add(pre_bias, self.bias, axis=1)
else:
......@@ -257,7 +257,7 @@ class Conv2D(layers.Layer):
'dilations', self._dilation, 'groups',
self._groups if self._groups else 1, 'use_cudnn',
self._use_cudnn, 'use_mkldnn', self._use_mkldnn)
out = _C_ops.conv2d(input, self.weight, *attrs)
out = _legacy_C_ops.conv2d(input, self.weight, *attrs)
pre_bias = out
pre_act = dygraph_utils._append_bias_in_dygraph(
......@@ -892,7 +892,7 @@ class Pool2D(layers.Layer):
'use_cudnn', self._use_cudnn, 'ceil_mode', self._ceil_mode,
'use_mkldnn', self._use_mkldnn, 'exclusive',
self._exclusive, 'data_format', self._data_format)
return _C_ops.pool2d(input, *attrs)
return _legacy_C_ops.pool2d(input, *attrs)
check_variable_and_dtype(
input, 'input', ['int8', 'uint8', 'float16', 'float32', 'float64'],
......@@ -997,9 +997,9 @@ class Linear(layers.Layer):
def forward(self, input):
if _non_static_mode():
pre_bias = _varbase_creator(dtype=input.dtype)
_C_ops.matmul(input, self.weight, pre_bias, 'transpose_X', False,
'transpose_Y', False, "alpha", 1, "use_mkldnn",
self._use_mkldnn)
_legacy_C_ops.matmul(input, self.weight, pre_bias, 'transpose_X',
False, 'transpose_Y', False, "alpha", 1,
"use_mkldnn", self._use_mkldnn)
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias,
self.bias,
......@@ -1144,12 +1144,13 @@ class InstanceNorm(layers.Layer):
def forward(self, input):
if in_dygraph_mode():
out = _C_ops.final_state_instance_norm(input, self.scale, self.bias,
self._epsilon)
out = _C_ops.instance_norm(input, self.scale, self.bias,
self._epsilon)
return out
if _in_legacy_dygraph():
out, _, _ = _C_ops.instance_norm(input, self.scale, self.bias,
'epsilon', self._epsilon)
out, _, _ = _legacy_C_ops.instance_norm(input, self.scale,
self.bias, 'epsilon',
self._epsilon)
return out
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
......@@ -1360,7 +1361,7 @@ class BatchNorm(layers.Layer):
if _non_static_mode():
if in_dygraph_mode():
batch_norm_out, t1, t2, t3, t4, _ = _C_ops.final_state_batch_norm(
batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm(
input, self.weight, self.bias, self._mean, self._variance,
self._momentum, self._epsilon, self._data_layout,
not self.training, self._use_global_stats,
......@@ -1375,7 +1376,7 @@ class BatchNorm(layers.Layer):
"fuse_with_relu", self._fuse_with_relu,
"use_global_stats", self._use_global_stats,
'trainable_statistics', self._trainable_statistics)
batch_norm_out, _, _, _, _, _ = _C_ops.batch_norm(
batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm(
input, self.weight, self.bias, self._mean, self._variance,
None, mean_out, variance_out, *attrs)
......@@ -1529,7 +1530,7 @@ class Dropout(layers.Layer):
if _non_static_mode():
attrs = sum(attrs.items(), ())
out, mask = _C_ops.dropout(input, *attrs)
out, mask = _legacy_C_ops.dropout(input, *attrs)
return out
out = self._helper.create_variable_for_type_inference(dtype=input.dtype)
......@@ -1681,12 +1682,10 @@ class Embedding(layers.Layer):
def forward(self, input):
if _non_static_mode():
return _C_ops.lookup_table_v2(self.weight, input, 'is_sparse',
self._is_sparse, 'is_distributed',
self._is_distributed,
'remote_prefetch',
self._remote_prefetch, 'padding_idx',
self._padding_idx)
return _legacy_C_ops.lookup_table_v2(
self.weight, input, 'is_sparse', self._is_sparse,
'is_distributed', self._is_distributed, 'remote_prefetch',
self._remote_prefetch, 'padding_idx', self._padding_idx)
check_variable_and_dtype(input, 'input',
['uint8', 'int8', 'int16', 'int32', 'int64'],
......@@ -1841,16 +1840,15 @@ class LayerNorm(layers.Layer):
if _non_static_mode():
if in_dygraph_mode():
pre_act, _, _, = _C_ops.final_state_layer_norm(
input, self.weight, self.bias, self._epsilon,
self._begin_norm_axis, False)
pre_act, _, _, = _C_ops.layer_norm(input, self.weight,
self.bias, self._epsilon,
self._begin_norm_axis, False)
return dygraph_utils._append_activation_in_dygraph(
pre_act, act=self._act)
else:
pre_act, _, _ = _C_ops.layer_norm(input, self.weight, self.bias,
'epsilon', self._epsilon,
'begin_norm_axis',
self._begin_norm_axis)
pre_act, _, _ = _legacy_C_ops.layer_norm(
input, self.weight, self.bias, 'epsilon', self._epsilon,
'begin_norm_axis', self._begin_norm_axis)
return dygraph_utils._append_activation_in_dygraph(
pre_act, act=self._act)
......@@ -2036,7 +2034,7 @@ class GRUUnit(layers.Layer):
def forward(self, input, hidden):
if _non_static_mode():
gate, reset_hidden_pre, updated_hidden = _C_ops.gru_unit(
gate, reset_hidden_pre, updated_hidden = _legacy_C_ops.gru_unit(
input, hidden, self.weight, self.bias, 'activation',
self.activation, 'gate_activation', self.gate_activation)
return updated_hidden, reset_hidden_pre, gate
......@@ -2286,12 +2284,12 @@ class NCE(layers.Layer):
self._attrs['seed'], 'sampler', self._attrs['sampler'],
'is_sparse', self._attrs['is_sparse'], 'remote_prefetch',
self._attrs['remote_prefetch'])
cost, _, _ = _C_ops.nce(input, label, self.weight, self.bias,
self._inputs['SampleWeight'],
self._inputs['CustomDistProbs'],
self._inputs['CustomDistAlias'],
self._inputs['CustomDistAliasProbs'],
*attrs)
cost, _, _ = _legacy_C_ops.nce(input, label, self.weight, self.bias,
self._inputs['SampleWeight'],
self._inputs['CustomDistProbs'],
self._inputs['CustomDistAlias'],
self._inputs['CustomDistAliasProbs'],
*attrs)
return cost / (self._num_neg_samples + 1)
check_variable_and_dtype(input, "input", ['float32', 'float64'], "NCE")
......@@ -2731,7 +2729,7 @@ class Conv2DTranspose(layers.Layer):
def forward(self, input):
if _non_static_mode():
op = getattr(_C_ops, self._op_type)
op = getattr(_legacy_C_ops, self._op_type)
out = op(input, self.weight, 'output_size', self._output_size,
'strides', self._stride, 'paddings', self._padding,
'dilations', self._dilation, 'groups', self._groups,
......@@ -3032,16 +3030,15 @@ class GroupNorm(layers.Layer):
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
if in_dygraph_mode():
out = _C_ops.final_state_group_norm(input, self.weight, self.bias,
self._epsilon, self._groups,
"NCHW")
out = _C_ops.group_norm(input, self.weight, self.bias,
self._epsilon, self._groups, "NCHW")
return dygraph_utils._append_activation_in_dygraph(out, self._act)
elif _in_legacy_dygraph():
attrs = ('epsilon', self._epsilon, 'groups', self._groups)
out, _, _ = _C_ops.group_norm(input, self.weight, self.bias,
mean_out, variance_out, *attrs)
out, _, _ = _legacy_C_ops.group_norm(input, self.weight, self.bias,
mean_out, variance_out, *attrs)
return dygraph_utils._append_activation_in_dygraph(out, self._act)
else:
......
......@@ -22,7 +22,7 @@ import warnings
from contextlib import contextmanager
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.dygraph import layers
......@@ -346,7 +346,7 @@ def _split_tensors(coalesced_grads_and_grad_vars):
attrs = ()
attrs += ('sections', grad_var_len)
attrs += ('axis', 0)
_C_ops.split(coalesced_grad, origin_grad_vars, *attrs)
_legacy_C_ops.split(coalesced_grad, origin_grad_vars, *attrs)
for g_var, g_shape in zip(origin_grad_vars, grad_shapes):
g_var.reshape_(shape=g_shape)
assert g_var.shape == g_shape
......
......@@ -19,11 +19,11 @@ import six
from collections import defaultdict
from paddle.fluid import core
from paddle.fluid import framework
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
final_state_name_mapping = {
name_mapping = {
"graph_send_recv": {
"final_op_name": "final_state_graph_send_recv",
"final_op_name": "graph_send_recv",
"x": "X",
"src_index": "Src_index",
"dst_index": "Dst_index",
......@@ -31,7 +31,7 @@ final_state_name_mapping = {
"dst_count": "Dst_count"
},
"matmul_v2": {
"final_op_name": "final_state_matmul",
"final_op_name": "matmul",
"transpose_x": "trans_x",
"transpose_y": "trans_y",
"x": "X",
......@@ -39,33 +39,33 @@ final_state_name_mapping = {
"out": "Out",
},
# "elementwise_add": {
# "final_op_name": "final_state_add",
# "final_op_name": "add",
# "x": "X",
# "y": "Y",
# },
"trunc": {
"final_op_name": "final_state_trunc",
"final_op_name": "trunc",
"x": "X",
"out": "Out",
},
# "pool2d": {
# "final_op_name": "final_state_pool2d",
# "final_op_name": "pool2d",
# "x": "X",
# "kernel_size": "ksize",
# "out": "Out",
# },
"abs": {
"final_op_name": "final_state_abs",
"final_op_name": "abs",
"x": "X",
"out": "Out",
},
"digamma": {
"final_op_name": "final_state_digamma",
"final_op_name": "digamma",
"x": "X",
"out": "Out",
},
"diagonal": {
"final_op_name": "final_state_diagonal",
"final_op_name": "diagonal",
"x": "Input",
"offset": "offset",
"axis1": "axis1",
......@@ -73,7 +73,7 @@ final_state_name_mapping = {
"out": "Out",
},
"roi_align": {
"final_op_name": "final_state_roi_align",
"final_op_name": "roi_align",
"x": "X",
"boxes": "ROIs",
"boxes_num": "RoisNum",
......@@ -84,7 +84,7 @@ final_state_name_mapping = {
"aligned": "aligned",
},
# "one_hot": {
# "final_op_name": "final_state_one_hot",
# "final_op_name": "one_hot",
# "x": "X",
# "num_class": "depth",
# "out": "Out",
......@@ -110,22 +110,22 @@ class Tracer(core.Tracer):
self._train_mode = True
def eager_trace_op(self,
type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
function_ptr = _C_ops.__dict__[type]
def eager_legacy_trace_op(self,
op_type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
function_ptr = _legacy_C_ops.__dict__[op_type]
core_ops_args_info = _C_ops.get_core_ops_args_info()
core_ops_args_type_info = _C_ops.get_core_ops_args_type_info()
core_ops_returns_info = _C_ops.get_core_ops_returns_info()
core_ops_args_info = _legacy_C_ops.get_core_ops_args_info()
core_ops_args_type_info = _legacy_C_ops.get_core_ops_args_type_info()
core_ops_returns_info = _legacy_C_ops.get_core_ops_returns_info()
op_args = core_ops_args_info[type]
op_args_type = core_ops_args_type_info[type]
op_returns = core_ops_returns_info[type]
op_args = core_ops_args_info[op_type]
op_args_type = core_ops_args_type_info[op_type]
op_returns = core_ops_returns_info[op_type]
arg_list = []
for i in range(len(op_args)):
......@@ -175,7 +175,7 @@ class Tracer(core.Tracer):
attrs_list.append(v)
returns = function_ptr(*arg_list, *attrs_list)
if type == 'load_combine':
if op_type == 'load_combine':
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
for j in range(len(returns)):
......@@ -211,34 +211,33 @@ class Tracer(core.Tracer):
else:
outputs[key].reconstruct_from_(returns, False)
def eager_final_state_trace_op(self,
type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
assert type in final_state_name_mapping.keys()
def eager_trace_op(self,
op_type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
assert op_type in name_mapping.keys()
final_state_type = final_state_name_mapping[type]["final_op_name"]
function_ptr = _C_ops.__dict__[final_state_type]
op_type = name_mapping[op_type]["final_op_name"]
function_ptr = _C_ops.__dict__[op_type]
core_ops_args_info = _C_ops.get_final_state_core_ops_args_info()
core_ops_args_type_info = _C_ops.get_final_state_core_ops_args_type_info(
)
core_ops_returns_info = _C_ops.get_final_state_core_ops_returns_info()
core_ops_args_info = _C_ops.get_core_ops_args_info()
core_ops_args_type_info = _C_ops.get_core_ops_args_type_info()
core_ops_returns_info = _C_ops.get_core_ops_returns_info()
op_args = core_ops_args_info[final_state_type]
op_args_type = core_ops_args_type_info[final_state_type]
op_returns = core_ops_returns_info[final_state_type]
op_args = core_ops_args_info[op_type]
op_args_type = core_ops_args_type_info[op_type]
op_returns = core_ops_returns_info[op_type]
arg_list = []
for i in range(len(op_args)):
eager_arg_name = op_args[i]
arg_type = op_args_type[i]
assert eager_arg_name in final_state_name_mapping[type].keys()
arg_name = final_state_name_mapping[type][eager_arg_name]
assert eager_arg_name in name_mapping[op_type].keys()
arg_name = name_mapping[op_type][eager_arg_name]
if arg_name in inputs.keys():
arg_to_append = inputs[arg_name]
......@@ -271,8 +270,8 @@ class Tracer(core.Tracer):
for i in range(len(op_returns)):
eager_retname = op_returns[i]
assert eager_retname in final_state_name_mapping[type].keys()
retname = final_state_name_mapping[type][eager_retname]
assert eager_retname in name_mapping[op_type].keys()
retname = name_mapping[op_type][eager_retname]
if retname in outputs.keys():
# Replaced outputs by function returns
if isinstance(returns[i], list):
......@@ -304,16 +303,15 @@ class Tracer(core.Tracer):
if not framework._in_legacy_dygraph():
# inputs : {"sum": [tensor], ...}
# outputs : {"sum": [tensor], ...}
if type in final_state_name_mapping.keys():
final_state_type = final_state_name_mapping[type][
"final_op_name"]
if type in name_mapping.keys():
type = name_mapping[type]["final_op_name"]
assert final_state_type in _C_ops.__dict__
self.eager_final_state_trace_op(type, inputs, outputs, attrs,
stop_gradient, inplace_map)
else:
assert type in _legacy_C_ops.__dict__
self.eager_trace_op(type, inputs, outputs, attrs, stop_gradient,
inplace_map)
else:
self.eager_legacy_trace_op(type, inputs, outputs, attrs,
stop_gradient, inplace_map)
else:
self.trace(type, inputs, outputs, attrs,
framework._current_expected_place(), self._has_grad
......
......@@ -31,7 +31,7 @@ from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE
import paddle.utils.deprecated as deprecated
import paddle.profiler as profiler
from paddle.profiler.utils import in_profiler_mode
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
_grad_scalar = None
......@@ -818,13 +818,13 @@ def monkey_patch_varbase():
@framework.dygraph_only
def clone(self):
if in_dygraph_mode():
return _C_ops.final_state_assign(self)
return _C_ops.assign(self)
if _in_legacy_dygraph():
output = core.VarBase()
else:
output = core.eager.Tensor()
return _C_ops.assign(self, output)
return _legacy_C_ops.assign(self, output)
@framework.dygraph_only
def value(self):
......@@ -925,7 +925,7 @@ def monkey_patch_varbase():
"""
if self.is_sparse_coo() or self.is_sparse_csr():
return _C_ops.final_state_sparse_values(self)
return _C_ops.sparse_values(self)
else:
raise ValueError(
"only SparseCooTensor and SparseCsrTensor have method values")
......@@ -957,9 +957,9 @@ def monkey_patch_varbase():
"""
if self.is_sparse_coo():
return _C_ops.final_state_sparse_coo_to_dense(self)
return _C_ops.sparse_coo_to_dense(self)
elif self.is_sparse_csr():
return _C_ops.final_state_sparse_to_dense(self)
return _C_ops.sparse_to_dense(self)
else:
return self
......@@ -988,7 +988,7 @@ def monkey_patch_varbase():
"""
if self.is_sparse_csr():
return _C_ops.final_state_sparse_to_sparse_coo(self, sparse_dim)
return _C_ops.sparse_to_sparse_coo(self, sparse_dim)
elif self.is_sparse_coo():
return self
elif self.is_selected_rows():
......@@ -996,7 +996,7 @@ def monkey_patch_varbase():
"SelectedRows does not support to_sparse_coo method")
else:
#is dense tensor
return _C_ops.final_state_sparse_dense_to_coo(self, sparse_dim)
return _C_ops.sparse_dense_to_coo(self, sparse_dim)
if framework._in_eager_mode_ and not hasattr(core, "eager"):
return
......
......@@ -14,7 +14,7 @@
from . import core
from .framework import dygraph_only, in_dygraph_mode
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
@dygraph_only
......@@ -41,7 +41,7 @@ def _append_activation_in_dygraph(input,
if use_mkldnn:
attrs += ('use_mkldnn', use_mkldnn)
act_op = getattr(_C_ops, act)
act_op = getattr(_legacy_C_ops, act)
return act_op(input, *attrs)
......@@ -60,5 +60,5 @@ def _append_bias_in_dygraph(input, bias=None, axis=1, use_mkldnn=False):
if bias is None:
return input
return _C_ops.elementwise_add(input, bias, 'axis', axis, 'use_mkldnn',
use_mkldnn)
return _legacy_C_ops.elementwise_add(input, bias, 'axis', axis,
'use_mkldnn', use_mkldnn)
......@@ -115,7 +115,7 @@ def _update_monkey_methods(is_eager):
Update monkey methods of VarBase or eager.Tensor while
switching eager mode and legacy mode.
"""
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from .dygraph.varbase_patch_methods import monkey_patch_varbase
from .dygraph import monkey_patch_math_varbase
......@@ -125,7 +125,7 @@ def _update_monkey_methods(is_eager):
assert isinstance(is_eager, bool)
# switch into eager mode
if is_eager:
_C_ops.switch_to_eager_ops()
_legacy_C_ops.switch_to_eager_ops()
if not _already_patch_eager_tensor:
monkey_patch_varbase()
monkey_patch_math_varbase()
......@@ -133,7 +133,7 @@ def _update_monkey_methods(is_eager):
_already_patch_eager_tensor = True
# switch back into legacy mode
else:
_C_ops.switch_to_core_ops()
_legacy_C_ops.switch_to_core_ops()
if not _already_patch_varbase:
monkey_patch_varbase()
monkey_patch_math_varbase()
......
......@@ -25,7 +25,7 @@ import numpy as np
from .core import VarDesc
from . import unique_name
from .data_feeder import check_variable_and_dtype, check_type, check_dtype
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
import paddle
__all__ = [
......@@ -167,14 +167,15 @@ class ConstantInitializer(Initializer):
place = _current_expected_place()
if self._force_cpu:
place = core.CPUPlace()
_C_ops.final_state_full_(var, var.shape, str(float(self._value)),
var.dtype, place)
_C_ops.full_(var, var.shape, str(float(self._value)), var.dtype,
place)
return None
elif _in_legacy_dygraph():
_C_ops.fill_constant(var, 'value', float(self._value),
'force_cpu', self._force_cpu, 'dtype',
int(var.dtype), 'str_value',
str(float(self._value)), 'shape', var.shape)
_legacy_C_ops.fill_constant(var, 'value', float(self._value),
'force_cpu', self._force_cpu, 'dtype',
int(var.dtype), 'str_value',
str(float(self._value)), 'shape',
var.shape)
return None
else:
op = block.append_op(type="fill_constant",
......@@ -274,13 +275,13 @@ class UniformInitializer(Initializer):
out_var = var
if framework._non_static_mode():
out_var = _C_ops.uniform_random(
out_var = _legacy_C_ops.uniform_random(
'shape', var.shape, 'min', self._low, 'max', self._high, 'seed',
self._seed, 'dtype', out_dtype, 'diag_num', self._diag_num,
'diag_step', self._diag_step, 'diag_val', self._diag_val)
if var.dtype == VarDesc.VarType.FP16:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
......@@ -378,26 +379,25 @@ class NormalInitializer(Initializer):
if in_dygraph_mode():
place = _current_expected_place()
out_var = _C_ops.final_state_gaussian_random(
var.shape, self._mean, self._std_dev, self._seed, out_dtype,
place)
out_var = _C_ops.gaussian_random(var.shape, self._mean,
self._std_dev, self._seed,
out_dtype, place)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
var_tmp = _C_ops.final_state_cast(out_var, var.dtype)
var_tmp = _C_ops.cast(out_var, var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
return None
if _in_legacy_dygraph():
out_var = _C_ops.gaussian_random('shape', var.shape, 'dtype',
out_dtype, 'mean', self._mean,
'std', self._std_dev, 'seed',
self._seed, 'use_mkldnn', False)
out_var = _legacy_C_ops.gaussian_random(
'shape', var.shape, 'dtype', out_dtype, 'mean', self._mean,
'std', self._std_dev, 'seed', self._seed, 'use_mkldnn', False)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
......@@ -486,25 +486,23 @@ class TruncatedNormalInitializer(Initializer):
out_var = var
if in_dygraph_mode():
out_var = _C_ops.final_state_truncated_gaussian_random(
out_var = _C_ops.truncated_gaussian_random(
var.shape, self._mean, self._std_dev, self._seed, out_dtype,
_current_expected_place())
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
var_tmp = _C_ops.final_state_cast(out_var, var.dtype)
var_tmp = _C_ops.cast(out_var, var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
return None
if _in_legacy_dygraph():
out_var = _C_ops.truncated_gaussian_random('shape', var.shape,
'dtype', out_dtype,
'mean', self._mean,
'std', self._std_dev,
'seed', self._seed)
out_var = _legacy_C_ops.truncated_gaussian_random(
'shape', var.shape, 'dtype', out_dtype, 'mean', self._mean,
'std', self._std_dev, 'seed', self._seed)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
......@@ -632,34 +630,34 @@ class XavierInitializer(Initializer):
if self._uniform:
limit = math.sqrt(6.0 / float(fan_in + fan_out))
if in_dygraph_mode():
out_var = _C_ops.final_state_uniform_random(
out_var.shape, out_dtype, -limit, limit, self._seed,
_current_expected_place())
out_var = _C_ops.uniform_random(out_var.shape, out_dtype,
-limit, limit, self._seed,
_current_expected_place())
elif _in_legacy_dygraph():
out_var = _C_ops.uniform_random('shape', out_var.shape,
'min', -limit, 'max', limit,
'seed', self._seed, 'dtype',
out_dtype)
out_var = _legacy_C_ops.uniform_random(
'shape', out_var.shape, 'min', -limit, 'max', limit,
'seed', self._seed, 'dtype', out_dtype)
else:
std = math.sqrt(2.0 / float(fan_in + fan_out))
if in_dygraph_mode():
place = _current_expected_place()
out_var = _C_ops.final_state_gaussian_random(
out_var.shape, 0.0, std, self._seed, out_dtype, place)
out_var = _C_ops.gaussian_random(out_var.shape, 0.0, std,
self._seed, out_dtype,
place)
else:
out_var = _C_ops.gaussian_random('shape', out_var.shape,
'dtype', out_dtype, 'mean',
0.0, 'std', std, 'seed',
self._seed)
out_var = _legacy_C_ops.gaussian_random(
'shape', out_var.shape, 'dtype', out_dtype, 'mean', 0.0,
'std', std, 'seed', self._seed)
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
if in_dygraph_mode():
var_tmp = _C_ops.final_state_cast(out_var, var.dtype)
var_tmp = _C_ops.cast(out_var, var.dtype)
elif _in_legacy_dygraph():
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype',
out_var.dtype, 'out_dtype',
var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
......@@ -807,28 +805,28 @@ class MSRAInitializer(Initializer):
gain = calculate_gain(self._nonlinearity, self._negative_slope)
limit = gain * math.sqrt(3.0 / float(fan_in))
out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',
-limit, 'max', limit, 'seed',
self._seed, 'dtype',
int(out_dtype))
out_var = _legacy_C_ops.uniform_random('shape', out_var.shape,
'min', -limit, 'max',
limit, 'seed',
self._seed, 'dtype',
int(out_dtype))
else:
gain = calculate_gain(self._nonlinearity, self._negative_slope)
std = gain / math.sqrt(float(fan_in))
if in_dygraph_mode():
place = _current_expected_place()
out_var = _C_ops.final_state_gaussian_random(
out_var.shape, 0.0, std, self._seed, out_dtype, place)
out_var = _C_ops.gaussian_random(out_var.shape, 0.0, std,
self._seed, out_dtype,
place)
else:
out_var = _C_ops.gaussian_random('shape',
out_var.shape, 'dtype',
int(out_dtype), 'mean',
0.0, 'std', std, 'seed',
self._seed)
out_var = _legacy_C_ops.gaussian_random(
'shape', out_var.shape, 'dtype', int(out_dtype), 'mean',
0.0, 'std', std, 'seed', self._seed)
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
......@@ -988,14 +986,14 @@ class BilinearInitializer(Initializer):
raise ValueError("The size of input is too big. ")
if framework._non_static_mode():
_C_ops.assign_value(out_var, 'shape', list(shape), 'dtype',
out_dtype, value_name, values)
_legacy_C_ops.assign_value(out_var, 'shape', list(shape), 'dtype',
out_dtype, value_name, values)
if var.dtype in [
VarDesc.VarType.FP16, VarDesc.VarType.BF16,
VarDesc.VarType.FP64
]:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
......@@ -1095,11 +1093,12 @@ class NumpyArrayInitializer(Initializer):
"saving it to file and 'load_op' to load it")
if framework._non_static_mode():
_C_ops.assign_value(out_var, 'shape', list(self._value.shape),
'dtype', out_dtype, value_name, values)
_legacy_C_ops.assign_value(out_var, 'shape',
list(self._value.shape), 'dtype',
out_dtype, value_name, values)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
from ..layer_helper import LayerHelper, unique_name
from ..framework import Variable, in_dygraph_mode, _in_legacy_dygraph
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
def _allreduce(x, out=None, reduce_type="sum", sync_mode=False):
......@@ -124,7 +124,7 @@ def _c_allgather(x, nranks, ring_id=0, use_calc_stream=False):
if _in_legacy_dygraph():
attrs = ('nranks', nranks, 'ring_id', ring_id, 'use_calc_stream',
use_calc_stream)
return _C_ops.c_allgather(x, *attrs)
return _legacy_C_ops.c_allgather(x, *attrs)
helper = LayerHelper(op_type, **locals())
out_shape = list(x.shape[:])
......
......@@ -29,7 +29,7 @@ from functools import reduce, partial
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ... import compat as cpt
from ..backward import _infer_var_data_type_shape_
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'While', 'Switch', 'increment', 'array_write', 'create_array', 'less_than',
......@@ -1567,7 +1567,7 @@ def increment(x, value=1.0, in_place=True):
fluid.layers.increment(counter) # [1.]
"""
if in_dygraph_mode():
return _C_ops.final_state_increment_(x, value)
return _C_ops.increment_(x, value)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'increment')
......@@ -1891,7 +1891,7 @@ def greater_than(x, y, cond=None, name=None):
attrs = dict()
if in_dygraph_mode():
return _C_ops.final_state_greater_than(x, y, -1)
return _C_ops.greater_than(x, y, -1)
else:
helper.append_op(type='greater_than',
inputs={
......@@ -1990,7 +1990,7 @@ def equal(x, y, cond=None, name=None):
"""
if in_dygraph_mode():
default_axis = -1
return _C_ops.final_state_equal(x, y, default_axis)
return _C_ops.equal(x, y, default_axis)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"equal")
......@@ -4050,9 +4050,9 @@ def is_empty(x, name=None):
"""
if in_dygraph_mode():
return _C_ops.final_state_is_empty(x)
if _in_legacy_dygraph():
return _C_ops.is_empty(x)
if _in_legacy_dygraph():
return _legacy_C_ops.is_empty(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'is_empty')
......
......@@ -36,7 +36,7 @@ import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from paddle.utils import deprecated
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from ..framework import in_dygraph_mode
__all__ = [
......@@ -951,16 +951,13 @@ def box_coder(prior_box,
'box_coder')
if in_dygraph_mode():
if isinstance(prior_box_var, Variable):
box_coder_op = _C_ops.final_state_box_coder(prior_box,
prior_box_var,
target_box, code_type,
box_normalized, axis,
[])
box_coder_op = _C_ops.box_coder(prior_box, prior_box_var,
target_box, code_type,
box_normalized, axis, [])
elif isinstance(prior_box_var, list):
box_coder_op = _C_ops.final_state_box_coder(prior_box, None,
target_box, code_type,
box_normalized, axis,
prior_box_var)
box_coder_op = _C_ops.box_coder(prior_box, None, target_box,
code_type, box_normalized, axis,
prior_box_var)
else:
raise TypeError(
"Input variance of box_coder must be Variable or lisz")
......@@ -1121,7 +1118,8 @@ def yolov3_loss(x,
class_num, "ignore_thresh", ignore_thresh, "downsample_ratio",
downsample_ratio, "use_label_smooth", use_label_smooth,
"scale_x_y", scale_x_y)
loss, _, _ = _C_ops.yolov3_loss(x, gt_box, gt_label, gt_score, *attrs)
loss, _, _ = _legacy_C_ops.yolov3_loss(x, gt_box, gt_label, gt_score,
*attrs)
return loss
helper = LayerHelper('yolov3_loss', **locals())
......@@ -1912,10 +1910,9 @@ def prior_box(
step_w, step_h = steps
if max_sizes == None:
max_sizes = []
return _C_ops.final_state_prior_box(input, image, min_sizes,
aspect_ratios, variance, max_sizes,
flip, clip, step_w, step_h, offset,
min_max_aspect_ratios_order)
return _C_ops.prior_box(input, image, min_sizes, aspect_ratios,
variance, max_sizes, flip, clip, step_w, step_h,
offset, min_max_aspect_ratios_order)
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input',
......@@ -3646,7 +3643,7 @@ def matrix_nms(bboxes,
attrs = (score_threshold, nms_top_k, keep_top_k, post_threshold,
use_gaussian, gaussian_sigma, background_label, normalized)
out, index = _C_ops.final_state_matrix_nms(bboxes, scores, *attrs)
out, index = _C_ops.matrix_nms(bboxes, scores, *attrs)
if return_index:
return out, index
else:
......@@ -3930,7 +3927,7 @@ def collect_fpn_proposals(multi_rois,
if _non_static_mode():
assert rois_num_per_level is not None, "rois_num_per_level should not be None in dygraph mode."
attrs = ('post_nms_topN', post_nms_top_n)
output_rois, rois_num = _C_ops.collect_fpn_proposals(
output_rois, rois_num = _legacy_C_ops.collect_fpn_proposals(
input_rois, input_scores, rois_num_per_level, *attrs)
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
......
......@@ -24,7 +24,7 @@ from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype
from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'generate_layer_fn', 'generate_activation_fn', 'generate_inplace_fn',
......@@ -260,14 +260,13 @@ def generate_activation_fn(op_type):
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None):
final_state_op_type = "final_state_%s" % op_type
if in_dygraph_mode() and hasattr(_C_ops, final_state_op_type):
op = getattr(_C_ops, final_state_op_type)
if in_dygraph_mode() and hasattr(_C_ops, op_type):
op = getattr(_C_ops, op_type)
return op(x)
# TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _in_legacy_dygraph while all yaml work is done.
if _non_static_mode():
op = getattr(_C_ops, op_type)
op = getattr(_legacy_C_ops, op_type)
return op(x)
if op_type not in ["abs", "exp", "square"]:
......@@ -308,7 +307,7 @@ def generate_inplace_fn(inplace_op_type):
def func(x, name=None):
if _non_static_mode():
op = getattr(_C_ops, inplace_op_type)
op = getattr(_legacy_C_ops, inplace_op_type)
return op(x)
warnings.warn(
"In static mode, {}() is the same as {}() and does not perform inplace operation."
......
......@@ -28,7 +28,7 @@ from ..param_attr import ParamAttr
from ..initializer import NumpyArrayInitializer, Constant
from .. import core
import warnings
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'center_loss',
......@@ -266,8 +266,9 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
return cross_entropy2(input, label, ignore_index)
if _non_static_mode():
return _C_ops.cross_entropy(input, label, "soft_label", soft_label,
"ignore_index", ignore_index)
return _legacy_C_ops.cross_entropy(input, label, "soft_label",
soft_label, "ignore_index",
ignore_index)
inputs = {'X': [input], 'Label': [label]}
attrs = {"soft_label": soft_label, "ignore_index": ignore_index}
......@@ -285,8 +286,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
def cross_entropy2(input, label, ignore_index=kIgnoreIndex):
if _non_static_mode():
loss, _, _ = _C_ops.cross_entropy2(input, label, 'ignore_index',
ignore_index)
loss, _, _ = _legacy_C_ops.cross_entropy2(input, label, 'ignore_index',
ignore_index)
return loss
inputs = {'X': [input], 'Label': [label]}
......@@ -549,16 +550,15 @@ def warpctc(input,
raise ValueError(
"input_length and label_length must not be None in dygraph mode!"
)
loss_out = _C_ops.final_state_warpctc(input, label, input_length,
label_length, blank,
norm_by_times)
loss_out = _C_ops.warpctc(input, label, input_length, label_length,
blank, norm_by_times)
return loss_out
if _non_static_mode():
if input_length is None or label_length is None:
raise ValueError(
"input_length and label_length must not be None in dygraph mode!"
)
grad, loss_out = _C_ops.warpctc(
grad, loss_out = _legacy_C_ops.warpctc(
input,
label,
input_length,
......@@ -1058,16 +1058,16 @@ def sampled_softmax_with_cross_entropy(logits,
'uniq', True, 'remove_accidental_hits',
remove_accidental_hits, 'num_samples',
num_samples, 'seed', seed)
_, _, _, _, sampled_logits_out, sampled_label_out = _C_ops.sample_logits(
_, _, _, _, sampled_logits_out, sampled_label_out = _legacy_C_ops.sample_logits(
logits, label, *sample_logits_attrs)
depth = num_samples + 1
sampled_softlabel_out = _C_ops.one_hot(sampled_label_out, 'depth',
depth)
sampled_softlabel_out = _legacy_C_ops.one_hot(sampled_label_out,
'depth', depth)
softmax_with_cross_entropy_attrs = ('soft_label', True,
'numeric_stable_mode', False)
_, loss = _C_ops.softmax_with_cross_entropy(
_, loss = _legacy_C_ops.softmax_with_cross_entropy(
sampled_logits_out, sampled_softlabel_out,
*softmax_with_cross_entropy_attrs)
return loss / num_true
......@@ -1280,7 +1280,7 @@ def identity_loss(x, reduction="none"):
raise Exception("Unsupported reduction type.")
if _non_static_mode():
return _C_ops.identity_loss(x, "reduction", reduction)
return _legacy_C_ops.identity_loss(x, "reduction", reduction)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "identity_loss")
attrs = {'reduction': reduction}
......@@ -1455,8 +1455,8 @@ def sigmoid_cross_entropy_with_logits(x,
"""
if in_dygraph_mode():
return _C_ops.final_state_sigmoid_cross_entropy_with_logits(
x, label, normalize, int(ignore_index))
return _C_ops.sigmoid_cross_entropy_with_logits(x, label, normalize,
int(ignore_index))
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits')
......@@ -1585,7 +1585,7 @@ def huber_loss(input, label, delta):
print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32
"""
if in_dygraph_mode():
out, residual = _C_ops.final_state_huber_loss(input, label, delta)
out, residual = _C_ops.huber_loss(input, label, delta)
return out
helper = LayerHelper('huber_loss', **locals())
......
......@@ -26,7 +26,7 @@ from ..param_attr import ParamAttr
from . import nn
from . import tensor
from ..data_feeder import check_variable_and_dtype
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = ['accuracy', 'auc']
......@@ -76,10 +76,10 @@ def accuracy(input, label, k=1, correct=None, total=None):
total = _varbase_creator(dtype="int32")
_k = k.numpy().item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _C_ops.top_k_v2(input, 'k', _k, 'sorted',
False)
_acc, _, _ = _C_ops.accuracy(topk_out, topk_indices, label, correct,
total)
topk_out, topk_indices = _legacy_C_ops.top_k_v2(input, 'k', _k,
'sorted', False)
_acc, _, _ = _legacy_C_ops.accuracy(topk_out, topk_indices, label,
correct, total)
return _acc
helper = LayerHelper("accuracy", **locals())
......
此差异已折叠。
......@@ -19,7 +19,7 @@ from .. import core
from ..framework import convert_np_dtype_to_dtype_, Variable, in_dygraph_mode
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from paddle.utils import deprecated
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
import paddle
__deprecated_func_name__ = {
......@@ -818,7 +818,7 @@ _erf_ = generate_layer_fn('erf')
def erf(x, name=None):
if in_dygraph_mode():
return _C_ops.final_state_erf(x)
return _C_ops.erf(x)
locals_var = locals().copy()
kwargs = dict()
......
......@@ -20,7 +20,7 @@ from ..framework import core, Variable, _non_static_mode, in_dygraph_mode, _in_l
from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..core import VarDesc
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'sequence_conv',
......
......@@ -32,7 +32,7 @@ from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, con
from paddle.utils import deprecated
from .utils import check_shape
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'create_tensor',
......@@ -245,12 +245,12 @@ def cast(x, dtype):
if in_dygraph_mode():
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return _C_ops.final_state_cast(x, dtype)
return _C_ops.cast(x, dtype)
if _non_static_mode():
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
out = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return out
check_variable_and_dtype(x, 'x', [
......@@ -329,7 +329,7 @@ def concat(input, axis=0, name=None):
axis = axis.item(0)
if not isinstance(input, Variable):
input = [t for t in input if t.shape.count(0) == 0]
out = _C_ops.final_state_concat(input, axis)
out = _C_ops.concat(input, axis)
return out
if _in_legacy_dygraph():
......@@ -339,7 +339,7 @@ def concat(input, axis=0, name=None):
if not isinstance(input, Variable):
input = [t for t in input if t.shape.count(0) == 0]
out = _varbase_creator()
_C_ops.concat(input, out, 'axis', axis)
_legacy_C_ops.concat(input, out, 'axis', axis)
return out
check_type(input, 'input', (list, tuple, Variable), 'concat')
......@@ -633,14 +633,14 @@ def assign(input, output=None):
if isinstance(input, (Variable, core.VarBase)):
if _non_static_mode():
if in_dygraph_mode() and output is None:
output = _C_ops.final_state_assign(input)
output = _C_ops.assign(input)
else:
if output is None:
if _in_legacy_dygraph():
output = core.VarBase()
else:
output = core.eager.Tensor()
_C_ops.assign(input, output)
_legacy_C_ops.assign(input, output)
else:
check_dtype(input.dtype, 'input', [
'float16', 'uint16', 'float32', 'float64', 'int32', 'int64',
......@@ -690,13 +690,13 @@ def assign(input, output=None):
if in_dygraph_mode():
if output is None:
output = zeros(list(input.shape), dtype)
_C_ops.final_state_assign_value_(output, list(input.shape), dtype,
values, _current_expected_place())
_C_ops.assign_value_(output, list(input.shape), dtype, values,
_current_expected_place())
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_C_ops.assign_value(output, 'shape', list(input.shape), 'dtype',
dtype, value_name, values)
_legacy_C_ops.assign_value(output, 'shape', list(input.shape),
'dtype', dtype, value_name, values)
else:
if output is None:
output = helper.create_variable_for_type_inference(
......@@ -790,13 +790,13 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
dtype = convert_np_dtype_to_dtype_(dtype)
if out is None:
out = _C_ops.final_state_full(shape, float(value), dtype, place)
out = _C_ops.full(shape, float(value), dtype, place)
out.stop_gradient = True
return out
if out is not None:
# final state mode is support out is not None.
_C_ops.final_state_full_(out, shape, float(value), dtype, place)
_C_ops.full_(out, shape, float(value), dtype, place)
out.stop_gradient = True
return out
......@@ -811,9 +811,9 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
else:
attrs['str_value'] = str(float(value.numpy().item(0)))
_C_ops.fill_constant(out, 'value', float(value), 'force_cpu', force_cpu,
'dtype', out.dtype, 'str_value',
attrs['str_value'], 'shape', shape)
_legacy_C_ops.fill_constant(out, 'value', float(value), 'force_cpu',
force_cpu, 'dtype', out.dtype, 'str_value',
attrs['str_value'], 'shape', shape)
out.stop_gradient = True
return out
......@@ -903,9 +903,8 @@ def fill_constant_batch_size_like(input,
place = _current_expected_place()
if force_cpu:
place = core.CPUPlace()
out = _C_ops.final_state_full_batch_size_like(input, shape, dtype,
value, input_dim_idx,
output_dim_idx, place)
out = _C_ops.full_batch_size_like(input, shape, dtype, value,
input_dim_idx, output_dim_idx, place)
out.stop_gradient = True
return out
......@@ -1284,9 +1283,9 @@ def reverse(x, axis):
axis = [axis]
if in_dygraph_mode():
if x.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
return _C_ops.final_state_reverse_array(x, axis)
return _C_ops.reverse_array(x, axis)
else:
return _C_ops.final_state_reverse(x, axis)
return _C_ops.reverse(x, axis)
helper = LayerHelper("reverse", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='reverse',
......@@ -1390,7 +1389,7 @@ def has_inf(x):
"""
if _non_static_mode():
return _C_ops.isinf(x)
return _legacy_C_ops.isinf(x)
check_type(x, 'x', (Variable), 'has_inf')
helper = LayerHelper("isinf", **locals())
......@@ -1419,7 +1418,7 @@ def has_nan(x):
"""
if _non_static_mode():
return _C_ops.isnan(x)
return _legacy_C_ops.isnan(x)
check_type(x, 'x', (Variable), 'has_nan')
helper = LayerHelper("isnan", **locals())
......@@ -1536,11 +1535,10 @@ def range(start, end, step, dtype, name=None):
step = cast(step, dtype)
if in_dygraph_mode():
return _C_ops.final_state_arange(start, end, step, dtype,
_current_expected_place())
return _C_ops.arange(start, end, step, dtype, _current_expected_place())
if _in_legacy_dygraph():
out = _C_ops.range(start, end, step)
out = _legacy_C_ops.range(start, end, step)
out.stop_gradient = True
return out
......@@ -1609,11 +1607,10 @@ def linspace(start, stop, num, dtype=None, name=None):
with device_guard("cpu"):
tensor_num = fill_constant([1], 'int32', num)
if in_dygraph_mode():
return _C_ops.final_state_linspace(tensor_start, tensor_stop,
tensor_num, dtype)
return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype)
if _in_legacy_dygraph():
return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype',
dtype)
return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num,
'dtype', dtype)
helper = LayerHelper("linspace", **locals())
start_dtype = convert_dtype(tensor_start.dtype)
......@@ -1803,11 +1800,11 @@ def eye(num_rows,
num_columns = num_rows
if in_dygraph_mode():
out = _C_ops.final_state_eye(num_rows, num_columns, dtype,
_current_expected_place())
out = _C_ops.eye(num_rows, num_columns, dtype,
_current_expected_place())
elif _in_legacy_dygraph():
out = _C_ops.eye('dtype', dtype, 'num_rows', num_rows, 'num_columns',
num_columns)
out = _legacy_C_ops.eye('dtype', dtype, 'num_rows', num_rows,
'num_columns', num_columns)
else:
helper = LayerHelper("eye", **locals())
check_dtype(dtype, 'dtype',
......@@ -1830,8 +1827,8 @@ def eye(num_rows,
re_shape = re_shape + [num_rows, num_columns]
expand_times = batch_shape + [1, 1]
if _non_static_mode():
out = _C_ops.reshape(out, 'shape', re_shape)
return _C_ops.expand(out, None, 'expand_times', expand_times)
out = _legacy_C_ops.reshape(out, 'shape', re_shape)
return _legacy_C_ops.expand(out, None, 'expand_times', expand_times)
if not isinstance(batch_shape, list):
raise TypeError("batch_shape should be a list")
......
......@@ -43,7 +43,7 @@ from functools import cmp_to_key
from .wrapped_decorator import signature_safe_contextmanager
from .. import compat as cpt
import warnings
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _current_expected_place
__all__ = [
......@@ -445,14 +445,14 @@ class Optimizer(object):
if current_lr is not None:
if in_dygraph_mode():
place = _current_expected_place()
_C_ops.final_state_full_(current_lr, list(current_lr.shape),
float(value), current_lr.dtype,
place)
_C_ops.full_(current_lr, list(current_lr.shape),
float(value), current_lr.dtype, place)
elif _in_legacy_dygraph():
_C_ops.fill_constant(current_lr, 'value', float(value),
'dtype', current_lr.dtype, 'shape',
list(current_lr.shape))
_legacy_C_ops.fill_constant(current_lr, 'value',
float(value), 'dtype',
current_lr.dtype, 'shape',
list(current_lr.shape))
else:
global_block = framework.default_main_program(
).global_block()
......@@ -949,7 +949,7 @@ class Optimizer(object):
assert regularization_term is not None
if framework._non_static_mode():
return _C_ops.sum([grad, regularization_term])
return _legacy_C_ops.sum([grad, regularization_term])
new_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
......@@ -1376,12 +1376,12 @@ class SGDOptimizer(Optimizer):
lr = self._create_param_lr(param_and_grad)
if in_dygraph_mode():
_C_ops.final_state_sgd_(param_and_grad[0], lr, param_and_grad[1],
master_weight, find_master)
_C_ops.sgd_(param_and_grad[0], lr, param_and_grad[1], master_weight,
find_master)
return None
if _in_legacy_dygraph():
_C_ops.sgd(param_and_grad[0], lr, param_and_grad[1], master_weight,
param_and_grad[0], master_weight)
_legacy_C_ops.sgd(param_and_grad[0], lr, param_and_grad[1],
master_weight, param_and_grad[0], master_weight)
return None
assert isinstance(block, framework.Block)
......@@ -1514,11 +1514,10 @@ class MomentumOptimizer(Optimizer):
lr = self._create_param_lr(param_and_grad)
master_weight = None
if framework._non_static_mode():
_, _, _ = _C_ops.momentum(param_and_grad[0], param_and_grad[1],
velocity_acc, lr, master_weight,
param_and_grad[0], velocity_acc,
master_weight, 'mu', self._momentum,
'use_nesterov', self._use_nesterov)
_, _, _ = _legacy_C_ops.momentum(
param_and_grad[0], param_and_grad[1], velocity_acc, lr,
master_weight, param_and_grad[0], velocity_acc, master_weight,
'mu', self._momentum, 'use_nesterov', self._use_nesterov)
return None
attrs = {"mu": self._momentum, "use_nesterov": self._use_nesterov}
......@@ -2171,7 +2170,7 @@ class LarsMomentumOptimizer(Optimizer):
outputs["MasterParamOut"] = master_weight
if framework._non_static_mode():
tmp, tmp2 = _C_ops.lars_momentum(
tmp, tmp2 = _legacy_C_ops.lars_momentum(
[param_and_grad[0]], [param_and_grad[1]], [velocity_acc], [lr],
[param_and_grad[0]], [velocity_acc], "mu", self._momentum,
"lars_coeff", self._lars_coeff, "lars_weight_decay",
......@@ -2286,16 +2285,16 @@ class AdagradOptimizer(Optimizer):
moment_acc = self._get_accumulator(self._moment_acc_str,
param_and_grad[0])
if in_dygraph_mode():
_C_ops.final_state_adagrad_(param_and_grad[0], param_and_grad[1],
moment_acc,
self._create_param_lr(param_and_grad),
self._epsilon)
_C_ops.adagrad_(param_and_grad[0], param_and_grad[1], moment_acc,
self._create_param_lr(param_and_grad),
self._epsilon)
return None
elif _in_legacy_dygraph():
_C_ops.adagrad(param_and_grad[0], param_and_grad[1], moment_acc,
self._create_param_lr(param_and_grad),
param_and_grad[0], moment_acc, "epsilon",
self._epsilon)
_legacy_C_ops.adagrad(param_and_grad[0], param_and_grad[1],
moment_acc,
self._create_param_lr(param_and_grad),
param_and_grad[0], moment_acc, "epsilon",
self._epsilon)
return None
else:
# Create the adagrad optimizer op
......@@ -2574,7 +2573,7 @@ class AdamOptimizer(Optimizer):
_beta2 = self._beta2 if not isinstance(
self._beta2, Variable) else self._beta2.numpy().item(0)
master_weight = None
_, _, _, _, _, _ = _C_ops.adam(
_, _, _, _, _, _ = _legacy_C_ops.adam(
param_and_grad[0], param_and_grad[1], lr, moment1, moment2,
beta1_pow_acc, beta2_pow_acc, master_weight, param_and_grad[0],
moment1, moment2, beta1_pow_acc, beta2_pow_acc, master_weight,
......@@ -2813,16 +2812,16 @@ class AdamaxOptimizer(Optimizer):
param_and_grad[0])
if framework.in_dygraph_mode():
_C_ops.final_state_adamax_(param_and_grad[0], param_and_grad[1],
self._create_param_lr(param_and_grad),
moment, inf_norm, beta1_pow_acc,
self._beta1, self._beta2, self._epsilon)
_C_ops.adamax_(param_and_grad[0], param_and_grad[1],
self._create_param_lr(param_and_grad), moment,
inf_norm, beta1_pow_acc, self._beta1, self._beta2,
self._epsilon)
elif framework._in_legacy_dygraph():
_C_ops.adamax(param_and_grad[0], param_and_grad[1],
self._create_param_lr(param_and_grad), moment,
inf_norm, beta1_pow_acc, param_and_grad[0], moment,
inf_norm, "beta1", self._beta1, "beta2", self._beta2,
"epsilon", self._epsilon)
_legacy_C_ops.adamax(param_and_grad[0], param_and_grad[1],
self._create_param_lr(param_and_grad), moment,
inf_norm, beta1_pow_acc, param_and_grad[0],
moment, inf_norm, "beta1", self._beta1,
"beta2", self._beta2, "epsilon", self._epsilon)
else:
# create the adamax optimize op
adamax_op = block.append_op(
......@@ -2862,10 +2861,11 @@ class AdamaxOptimizer(Optimizer):
param)
if framework._non_static_mode():
if framework.in_dygraph_mode():
tmp = _C_ops.final_state_scale(beta1_pow_acc,
self._beta1, 0.0, True)
tmp = _C_ops.scale(beta1_pow_acc, self._beta1, 0.0,
True)
else:
tmp = _C_ops.scale(beta1_pow_acc, "scale", self._beta1)
tmp = _legacy_C_ops.scale(beta1_pow_acc, "scale",
self._beta1)
beta1_pow_acc.copy_(tmp, False)
else:
block.append_op(type="scale",
......@@ -2952,11 +2952,11 @@ class DpsgdOptimizer(Optimizer):
self._seed = 0
if framework._non_static_mode():
_C_ops.dpsgd(param_and_grad[0], param_and_grad[1],
self._create_param_lr(param_and_grad),
param_and_grad[0], "clip", self._clip, "batch_size",
self._batch_size, "sigma", self._sigma, "seed",
self._seed)
_legacy_C_ops.dpsgd(param_and_grad[0], param_and_grad[1],
self._create_param_lr(param_and_grad),
param_and_grad[0], "clip", self._clip,
"batch_size", self._batch_size, "sigma",
self._sigma, "seed", self._seed)
else:
dpsgd_op = block.append_op(type=self.type,
inputs={
......@@ -3072,11 +3072,12 @@ class DecayedAdagradOptimizer(Optimizer):
param_and_grad[0])
if framework._non_static_mode():
_C_ops.decayed_adagrad(param_and_grad[0], param_and_grad[1],
moment_acc,
self._create_param_lr(param_and_grad),
param_and_grad[0], moment_acc, "epsilon",
self._epsilon, "decay", self._decay)
_legacy_C_ops.decayed_adagrad(param_and_grad[0], param_and_grad[1],
moment_acc,
self._create_param_lr(param_and_grad),
param_and_grad[0], moment_acc,
"epsilon", self._epsilon, "decay",
self._decay)
else:
# Create the decayed adagrad optimizer op
decayed_adagrad_op = block.append_op(
......@@ -3198,16 +3199,15 @@ class AdadeltaOptimizer(Optimizer):
self._avg_squared_update_acc_str, param_and_grad[0])
if framework.in_dygraph_mode():
_C_ops.final_state_adadelta_(param_and_grad[0], param_and_grad[1],
avg_squared_grad_acc,
avg_squared_update_acc, self._rho,
self._epsilon)
_C_ops.adadelta_(param_and_grad[0], param_and_grad[1],
avg_squared_grad_acc, avg_squared_update_acc,
self._rho, self._epsilon)
elif framework._in_legacy_dygraph():
_C_ops.adadelta(param_and_grad[0], param_and_grad[1],
avg_squared_grad_acc, avg_squared_update_acc,
param_and_grad[0], avg_squared_grad_acc,
avg_squared_update_acc, "epsilon", self._epsilon,
"rho", self._rho)
_legacy_C_ops.adadelta(param_and_grad[0], param_and_grad[1],
avg_squared_grad_acc, avg_squared_update_acc,
param_and_grad[0], avg_squared_grad_acc,
avg_squared_update_acc, "epsilon",
self._epsilon, "rho", self._rho)
else:
# Create the adadelta optimizer op
adadelta_op = block.append_op(type=self.type,
......@@ -3399,20 +3399,20 @@ class RMSPropOptimizer(Optimizer):
mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str,
param_and_grad[0])
if in_dygraph_mode():
_C_ops.final_state_rmsprop_(param_and_grad[0], mean_square_acc,
param_and_grad[1], momentum_acc,
self._create_param_lr(param_and_grad),
mean_grad_acc, self._epsilon, self._rho,
self._momentum, self._centered)
_C_ops.rmsprop_(param_and_grad[0], mean_square_acc,
param_and_grad[1], momentum_acc,
self._create_param_lr(param_and_grad),
mean_grad_acc, self._epsilon, self._rho,
self._momentum, self._centered)
return None
elif _in_legacy_dygraph():
_C_ops.rmsprop(param_and_grad[0], mean_square_acc,
self._create_param_lr(param_and_grad),
param_and_grad[1], momentum_acc, param_and_grad[0],
momentum_acc, mean_square_acc, mean_grad_acc,
"epsilon", self._epsilon, "decay", self._rho,
"momentum", self._momentum, "centered",
self._centered)
_legacy_C_ops.rmsprop(param_and_grad[0], mean_square_acc,
self._create_param_lr(param_and_grad),
param_and_grad[1], momentum_acc,
param_and_grad[0], momentum_acc,
mean_square_acc, mean_grad_acc, "epsilon",
self._epsilon, "decay", self._rho, "momentum",
self._momentum, "centered", self._centered)
return None
else:
rmsprop_op = block.append_op(
......@@ -3579,11 +3579,12 @@ class FtrlOptimizer(Optimizer):
linear_acc = self._get_accumulator(self._linear_acc_str,
param_and_grad[0])
if framework._non_static_mode():
_C_ops.ftrl(param_and_grad[0], squared_acc,
linear_acc, param_and_grad[1],
self._create_param_lr(param_and_grad),
param_and_grad[0], squared_acc, linear_acc, "l1",
self._l1, "l2", self._l2, "lr_power", self._lr_power)
_legacy_C_ops.ftrl(param_and_grad[0], squared_acc, linear_acc,
param_and_grad[1],
self._create_param_lr(param_and_grad),
param_and_grad[0], squared_acc, linear_acc, "l1",
self._l1, "l2", self._l2, "lr_power",
self._lr_power)
else:
ftrl_op = block.append_op(type=self.type,
......@@ -3741,12 +3742,13 @@ class LambOptimizer(AdamOptimizer):
lr = self._create_param_lr(param_and_grad)
master_weight = None
if framework._non_static_mode():
_C_ops.lamb(param_and_grad[0], param_and_grad[1], lr, moment1,
moment2, beta1_pow_acc, beta2_pow_acc, master_weight,
param_and_grad[0], moment1, moment2, beta1_pow_acc,
beta2_pow_acc, master_weight, 'beta1', self._beta1,
'beta2', self._beta2, 'epsilon', self._epsilon,
'weight_decay', weight_decay)
_legacy_C_ops.lamb(param_and_grad[0], param_and_grad[1], lr,
moment1, moment2, beta1_pow_acc, beta2_pow_acc,
master_weight, param_and_grad[0], moment1,
moment2, beta1_pow_acc, beta2_pow_acc,
master_weight, 'beta1', self._beta1, 'beta2',
self._beta2, 'epsilon', self._epsilon,
'weight_decay', weight_decay)
return None
# create the lamb optimize op
......
......@@ -18,7 +18,7 @@ import logging
from . import framework
from .framework import _non_static_mode, _varbase_creator, in_dygraph_mode
from . import core
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
__all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer']
......@@ -135,11 +135,11 @@ class L2DecayRegularizer(WeightDecayRegularizer):
if framework._non_static_mode():
if framework.in_dygraph_mode():
return _C_ops.final_state_scale(param,
self._regularization_coeff, 0.0,
True)
return _C_ops.scale(param, self._regularization_coeff, 0.0,
True)
else:
return _C_ops.scale(param, "scale", self._regularization_coeff)
return _legacy_C_ops.scale(param, "scale",
self._regularization_coeff)
else:
decay = block.create_var(dtype=param.dtype,
shape=param.shape,
......@@ -253,9 +253,8 @@ class L1DecayRegularizer(WeightDecayRegularizer):
shape=param.shape,
lod_level=param.lod_level)
if in_dygraph_mode():
sign = _C_ops.final_state_sign(param)
return _C_ops.final_state_scale(sign, self._regularization_coeff,
0.0, True)
sign = _C_ops.sign(param)
return _C_ops.scale(sign, self._regularization_coeff, 0.0, True)
# Append sign op
block.append_op(type='sign', inputs={"X": param}, outputs={"Out": sign})
......
......@@ -30,7 +30,7 @@ from paddle.fluid.dygraph import Embedding, Linear, GRUUnit
from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.fluid.framework import _non_static_mode
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
SEED = 2020
......@@ -176,7 +176,7 @@ class LinearChainCRF(fluid.dygraph.Layer):
def forward(self, input, label, length=None):
if _non_static_mode():
_, _, _, log_likelihood = _C_ops.linear_chain_crf(
_, _, _, log_likelihood = _legacy_C_ops.linear_chain_crf(
input, self._transition, label, length, "is_test",
self._is_test)
return log_likelihood
......@@ -234,8 +234,8 @@ class CRFDecoding(fluid.dygraph.Layer):
def forward(self, input, label=None, length=None):
if _non_static_mode():
return _C_ops.crf_decoding(input, self._transition, label, length,
"is_test", self._is_test)
return _legacy_C_ops.crf_decoding(input, self._transition, label,
length, "is_test", self._is_test)
viterbi_path = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
......@@ -268,11 +268,12 @@ class ChunkEval(fluid.dygraph.Layer):
def forward(self, input, label, seq_length=None):
if _non_static_mode():
return _C_ops.chunk_eval(input, label, seq_length,
"num_chunk_types", self.num_chunk_types,
"chunk_scheme", self.chunk_scheme,
"excluded_chunk_types",
self.excluded_chunk_types or [])
return _legacy_C_ops.chunk_eval(input, label, seq_length,
"num_chunk_types",
self.num_chunk_types,
"chunk_scheme", self.chunk_scheme,
"excluded_chunk_types",
self.excluded_chunk_types or [])
precision = self._helper.create_variable_for_type_inference(
dtype="float32")
......
......@@ -26,7 +26,7 @@ import paddle.fluid.dygraph as dg
import paddle.static as static
from numpy.random import random as rand
from paddle.fluid import Program, program_guard
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
sys.path.append("../")
from op_test import OpTest
......@@ -75,15 +75,15 @@ def class_name(cls, num, params_dict):
def fft_c2c_python_api(x, axes, norm, forward):
return _C_ops.final_state_fft_c2c(x, axes, norm, forward)
return _C_ops.fft_c2c(x, axes, norm, forward)
def fft_r2c_python_api(x, axes, norm, forward, onesided):
return _C_ops.final_state_fft_r2c(x, axes, norm, forward, onesided)
return _C_ops.fft_r2c(x, axes, norm, forward, onesided)
def fft_c2r_python_api(x, axes, norm, forward, last_dim_size=0):
return _C_ops.final_state_fft_c2r(x, axes, norm, forward, last_dim_size)
return _C_ops.fft_c2r(x, axes, norm, forward, last_dim_size)
@parameterize(
......
......@@ -22,7 +22,7 @@ import sys
sys.path.append('..')
from op_test import OpTest
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
paddle.enable_static()
......@@ -57,8 +57,8 @@ class TestL2LossDeterministic(unittest.TestCase):
with paddle.fluid.dygraph.guard(place):
x_np = np.random.rand(5, 11, 13).astype('float32')
x = paddle.to_tensor(x_np)
y1 = _C_ops.squared_l2_norm(x)
y2 = _C_ops.squared_l2_norm(x)
y1 = _legacy_C_ops.squared_l2_norm(x)
y2 = _legacy_C_ops.squared_l2_norm(x)
np.testing.assert_allclose(y1.numpy(), y2.numpy())
def test_main(self):
......
......@@ -80,7 +80,7 @@ class TestTrunctedGaussianRandomOp(unittest.TestCase):
def gaussian_random_test_eager(self, place):
with fluid.dygraph.guard(place):
with _test_eager_guard():
out = paddle._C_ops.final_state_truncated_gaussian_random(
out = paddle._C_ops.truncated_gaussian_random(
self.attrs["shape"], self.attrs["mean"], self.attrs["std"],
self.attrs["seed"], core.VarDesc.VarType.FP32, place)
self.assertAlmostEqual(numpy.mean(out.numpy()), .0, delta=0.1)
......
......@@ -19,7 +19,7 @@ import sys
sys.path.append("..")
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle._C_ops as ops
import paddle._legacy_C_ops as ops
class TestGetFloatStatusOp(unittest.TestCase):
......
......@@ -24,7 +24,7 @@ sys.path.append("..")
from op_test import OpTest
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
import paddle.fluid as fluid
from paddle import compat as cpt
from paddle.fluid import core, framework, executor
......@@ -201,9 +201,9 @@ class RunProgramNPUOpTest(unittest.TestCase):
inputs = self.prepare_dygraph_input(place)
outputs = self.prepare_dygraph_output()
_C_ops.run_program(inputs['X'], inputs['Params'], outputs['Out'],
outputs['OutScope'], outputs['DOut'], None,
*self.attrs)
_legacy_C_ops.run_program(inputs['X'], inputs['Params'],
outputs['Out'], outputs['OutScope'],
outputs['DOut'], None, *self.attrs)
return outputs['Out']
def calc_dygraph_grad(self, place):
......@@ -215,9 +215,9 @@ class RunProgramNPUOpTest(unittest.TestCase):
inputs, input_param_list = self.prepare_dygraph_input(place, True)
outputs = self.prepare_dygraph_output()
_C_ops.run_program(inputs['X'], inputs['Params'], outputs['Out'],
outputs['OutScope'], outputs['DOut'], None,
*self.attrs)
_legacy_C_ops.run_program(inputs['X'], inputs['Params'],
outputs['Out'], outputs['OutScope'],
outputs['DOut'], None, *self.attrs)
for param in input_param_list:
var_type = self._get_grad_vartype(param.name)
......
......@@ -3117,7 +3117,7 @@ class TestSwishAPI(unittest.TestCase):
for r in res:
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
def func_test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.swish(x)
......@@ -3128,9 +3128,10 @@ class TestSwishAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_dygraph_final_state_api(self):
def test_dygraph_api(self):
with _test_eager_guard():
self.test_dygraph_api()
self.func_test_dygraph_api()
self.func_test_dygraph_api()
def test_fluid_api(self):
paddle.enable_static()
......
......@@ -48,11 +48,10 @@ def bicubic_interp_test(x,
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple):
SizeTensor = [SizeTensor]
return paddle._C_ops.final_state_bicubic_interp(x, OutSize, SizeTensor,
Scale, data_layout, out_d,
out_h, out_w, scale,
interp_method,
align_corners, align_mode)
return paddle._C_ops.bicubic_interp(x, OutSize, SizeTensor, Scale,
data_layout, out_d, out_h, out_w, scale,
interp_method, align_corners,
align_mode)
def cubic_1(x, a):
......
......@@ -46,11 +46,10 @@ def bilinear_interp_test(x,
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple):
SizeTensor = [SizeTensor]
return paddle._C_ops.final_state_bilinear_interp(x, OutSize, SizeTensor,
Scale, data_layout, out_d,
out_h, out_w, scale,
interp_method,
align_corners, align_mode)
return paddle._C_ops.bilinear_interp(x, OutSize, SizeTensor, Scale,
data_layout, out_d, out_h, out_w,
scale, interp_method, align_corners,
align_mode)
def bilinear_interp_np(input,
......
......@@ -1470,7 +1470,7 @@ class CrossEntropyLoss(unittest.TestCase):
np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
def test_soft_1d_dygraph_final_state_api(self):
def test_soft_1d_dygraph_api(self):
with _test_eager_guard():
self.test_cross_entropy_loss_soft_1d()
self.test_cross_entropy_loss_soft_1d_weight()
......@@ -1478,12 +1478,12 @@ class CrossEntropyLoss(unittest.TestCase):
self.test_cross_entropy_loss_soft_1d_weight_mean()
# put all testcases in one test will be failed
def test_soft_2d_dygraph_final_state_api(self):
def test_soft_2d_dygraph_api(self):
with _test_eager_guard():
self.test_cross_entropy_loss_soft_2d()
self.test_cross_entropy_loss_soft_2d_weight_mean()
def test_other_dygraph_final_state_api(self):
def test_other_dygraph_api(self):
with _test_eager_guard():
self.test_cross_entropy_loss_1d_with_mean_ignore()
self.test_cross_entropy_loss_1d_with_mean_ignore_negative()
......
......@@ -23,7 +23,7 @@ import paddle.fluid.core as core
import sys
import warnings
import paddle.utils.deprecated as deprecated
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
LOWEST_WARNING_POSTION = 3
ERROR_WARNING_POSTION = sys.maxsize
......@@ -141,7 +141,7 @@ class TestDeprecatedDocorator(unittest.TestCase):
b = np.random.uniform(0.1, 1, [51, 76]).astype(np.float32)
x = paddle.to_tensor(a)
y = paddle.to_tensor(b)
res = _C_ops.elementwise_mul(x, y)
res = _legacy_C_ops.elementwise_mul(x, y)
# expected
expected = LOWEST_WARNING_POSTION
......
......@@ -22,7 +22,7 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.static import default_main_program
......@@ -39,12 +39,12 @@ def dropout_nd(x,
if default_main_program().random_seed != 0:
seed = default_main_program().random_seed
out, mask = _C_ops.dropout_nd(x, 'dropout_prob', p, 'is_test',
not training, 'fix_seed', seed
is not None, 'seed',
seed if seed is not None else 0,
'dropout_implementation', mode, 'axis',
drop_axes)
out, mask = _legacy_C_ops.dropout_nd(x, 'dropout_prob', p, 'is_test',
not training, 'fix_seed', seed
is not None, 'seed',
seed if seed is not None else 0,
'dropout_implementation', mode,
'axis', drop_axes)
return out
helper = LayerHelper('dropout_nd', **locals())
......
......@@ -25,7 +25,7 @@ from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard, _enable_legacy_dygraph
import os
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
class TestDropoutOp(OpTest):
......@@ -1031,8 +1031,8 @@ class TestDropoutBackward(unittest.TestCase):
with _test_eager_guard():
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = _C_ops.final_state_dropout(
input, None, 0.5, False, "downgrade_in_infer", 0, False)
out, mask = _C_ops.dropout(input, None, 0.5, False,
"downgrade_in_infer", 0, False)
out.backward()
np.testing.assert_array_equal(
input.gradient(),
......@@ -1063,8 +1063,8 @@ class TestDropoutBackward(unittest.TestCase):
prob = 0.5
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = _C_ops.final_state_dropout(
input, None, 0.5, False, "upscale_in_train", 0, False)
out, mask = _C_ops.dropout(input, None, 0.5, False,
"upscale_in_train", 0, False)
out.backward()
np.testing.assert_allclose(input.gradient(),
......@@ -1098,8 +1098,8 @@ class TestDropoutBackward(unittest.TestCase):
prob = 0.3
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = _C_ops.final_state_dropout(
input, None, 0.3, False, "upscale_in_train", 0, False)
out, mask = _C_ops.dropout(input, None, 0.3, False,
"upscale_in_train", 0, False)
out.backward()
......
......@@ -14,7 +14,7 @@
import paddle
import numpy as np
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard, Variable, _in_legacy_dygraph
from paddle.fluid import core
from paddle.fluid.layers.utils import _hash_with_id
......@@ -102,8 +102,8 @@ class TestRunProgram(unittest.TestCase):
'end_op_index', main_program.desc.block(0).op_size(),
'is_test', False, 'program_id', _hash_with_id(program))
_C_ops.run_program([x_t, y_t], [fake_var], [out_t], [scope],
[fake_var], None, *attrs)
_legacy_C_ops.run_program([x_t, y_t], [fake_var], [out_t], [scope],
[fake_var], None, *attrs)
loss = paddle.mean(out_t)
loss.backward()
......
......@@ -19,7 +19,7 @@ import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
......
......@@ -24,7 +24,7 @@ import paddle.nn as nn
from paddle.dataset.common import DATA_HOME
from paddle.fluid.framework import core, _non_static_mode, _test_eager_guard
from paddle.fluid.layer_helper import LayerHelper
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
import sys
import tempfile
......@@ -79,7 +79,7 @@ class FasterTokenizer(nn.Layer):
is_split_into_words=False,
pad_to_max_seq_len=False):
if _non_static_mode():
input_ids, seg_ids = _C_ops.faster_tokenizer(
input_ids, seg_ids = _legacy_C_ops.faster_tokenizer(
self.vocab, text, text_pair, "do_lower_case", do_lower_case,
"max_seq_len", max_seq_len, "pad_to_max_seq_len",
pad_to_max_seq_len, "is_split_into_words", is_split_into_words)
......
......@@ -19,7 +19,7 @@ import paddle
import numpy as np
import paddle.fluid.core as core
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard
......@@ -50,8 +50,8 @@ class TestBakcwardFunctionHookError(unittest.TestCase):
x = paddle.to_tensor(input_data.astype(np.float32), stop_gradient=False)
z = paddle.to_tensor(input_data.astype(np.float32), stop_gradient=False)
y = _C_ops.sigmoid(x)
out = _C_ops.matmul_v2(y, z, 'trans_x', False, 'trans_y', False)
y = _legacy_C_ops.sigmoid(x)
out = _legacy_C_ops.matmul_v2(y, z, 'trans_x', False, 'trans_y', False)
out._register_void_function_post_hook(test_hook)
y._register_void_function_post_hook(test_hook)
......
......@@ -25,7 +25,7 @@ from paddle import tensor
import unittest
from op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float
from test_sparse_attention_op import get_cuda_version
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import default_main_program
from paddle.fluid import core
......@@ -232,7 +232,7 @@ class TestFusedGateAttentionOp(OpTest):
output_w = paddle.to_tensor(self.output_w, stop_gradient=False)
output_b = paddle.to_tensor(self.output_b, stop_gradient=False)
_, _, _, _, softmax_out, fmha_out, gate_out, out = _C_ops.fused_gate_attention(
_, _, _, _, softmax_out, fmha_out, gate_out, out = _legacy_C_ops.fused_gate_attention(
query, key, q_weight, k_weight, v_weight, qkv_weight,
nonbatched_bias, src_mask, gating_w, gating_b, output_w, output_b,
'has_gating', self.has_gating, 'merge_qkv', self.merge_qkv)
......
......@@ -32,7 +32,7 @@ from paddle.fluid.layer_helper import LayerHelper
from paddle.nn.initializer import Constant
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.fluid.framework import _non_static_mode, default_main_program
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.incubate.nn.functional import fused_multi_transformer
default_main_program().random_seed = 42
......
......@@ -303,7 +303,7 @@ class TestGroupNormException(unittest.TestCase):
class TestGroupNormEager(unittest.TestCase):
def test_dygraph_final_state_api(self):
def test_dygraph_api(self):
self.dtype = np.float64
self.shape = (8, 32, 32)
input = np.random.random(self.shape).astype(self.dtype)
......
......@@ -18,7 +18,7 @@ import unittest
import paddle.fluid as fluid
import numpy as np
import paddle
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard
......@@ -44,7 +44,7 @@ class MyLayer(fluid.Layer):
def forward(self, x):
for i, p in enumerate(self.params):
x = _C_ops.mul(x, p)
x = _legacy_C_ops.mul(x, p)
return x
......
......@@ -16,7 +16,7 @@ import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph, _in_eager_without_dygraph_check
if fluid.is_compiled_with_cuda():
......@@ -117,8 +117,9 @@ class InstanceNorm(fluid.dygraph.Layer):
def forward(self, input):
if fluid._non_static_mode():
out, _, _ = _C_ops.instance_norm(input, self.scale, self.bias,
'epsilon', self.epsilon)
out, _, _ = _legacy_C_ops.instance_norm(input, self.scale,
self.bias, 'epsilon',
self.epsilon)
return out
else:
return fluid.layers.instance_norm(
......
......@@ -15,7 +15,7 @@
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
import unittest
paddle.disable_static()
......@@ -26,7 +26,7 @@ def clear_grad(w, a):
@paddle.no_grad()
def warp(*_):
assert w.grad is not None
_C_ops.scale_(w.grad, 'scale', 0.5)
_legacy_C_ops.scale_(w.grad, 'scale', 0.5)
w.clear_gradient(False)
return warp
......@@ -44,7 +44,7 @@ class TestInplaceAndClearGradient(unittest.TestCase):
w._register_backward_hook(_clear_grad)
for i in range(10):
out = _C_ops.scale(w, 'scale', 0.1)
out = _legacy_C_ops.scale(w, 'scale', 0.1)
out.backward()
......
......@@ -47,11 +47,9 @@ def linear_interp_test(x,
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple):
SizeTensor = [SizeTensor]
return paddle._C_ops.final_state_linear_interp(x, OutSize, SizeTensor,
Scale, data_layout, out_d,
out_h, out_w, scale,
interp_method, align_corners,
align_mode)
return paddle._C_ops.linear_interp(x, OutSize, SizeTensor, Scale,
data_layout, out_d, out_h, out_w, scale,
interp_method, align_corners, align_mode)
def linear_interp_np(input,
......
......@@ -109,7 +109,7 @@ class TestMaxoutAPI(unittest.TestCase):
for r in res:
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
def func_test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.maxout(x, self.groups, self.axis)
......@@ -152,9 +152,10 @@ class TestMaxoutAPI(unittest.TestCase):
x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8])
self.assertRaises(ValueError, F.maxout, x_float32, 2, 2)
def test_dygraph_final_state_api(self):
def test_dygraph_api(self):
with _test_eager_guard():
self.test_dygraph_api()
self.func_test_dygraph_api()
self.func_test_dygraph_api()
if __name__ == '__main__':
......
......@@ -15,7 +15,7 @@
import unittest
import paddle
import numpy as np
from paddle import _C_ops
from paddle import _C_ops, _legacy_C_ops
def run_adam_op(params,
......@@ -55,7 +55,7 @@ def run_adam_op(params,
if not use_merged:
for i in range(len(param_vars)):
_, _, _, _, _, _ = _C_ops.adam(
_, _, _, _, _, _ = _legacy_C_ops.adam(
param_vars[i], grad_vars[i], lr_vars[i], moment1_vars[i],
moment2_vars[i], beta1_pow_vars[i], beta2_pow_vars[i],
master_param_vars[i], param_vars[i], moment1_vars[i],
......@@ -63,7 +63,7 @@ def run_adam_op(params,
master_param_vars[i], 'epsilon', epsilon, 'beta1', beta1,
'beta2', beta2, 'multi_precision', multi_precision)
else:
_, _, _, _, _, _ = _C_ops.merged_adam(
_, _, _, _, _, _ = _legacy_C_ops.merged_adam(
param_vars, grad_vars, lr_vars, moment1_vars, moment2_vars,
beta1_pow_vars, beta2_pow_vars, master_param_vars, param_vars,
moment1_vars, moment2_vars, beta1_pow_vars, beta2_pow_vars,
......
......@@ -231,7 +231,7 @@ class TestMeshgridOp8(unittest.TestCase):
class TestMeshgridEager(unittest.TestCase):
def test_dygraph_final_state_api(self):
def test_dygraph_api(self):
input_1 = np.random.randint(0, 100, [
100,
]).astype('int32')
......
......@@ -282,7 +282,7 @@ class APITestMultiDot(unittest.TestCase):
expected_result = np.linalg.multi_dot([input_array1, input_array2])
np.testing.assert_allclose(expected_result, out.numpy(), rtol=1e-05)
def test_dygraph_final_state_api(self):
def test_dygraph_api(self):
with _test_eager_guard():
self.test_dygraph_without_out()
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册