From 9ad800ebb2a8b32c28e5440d2145ff053219389d Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Fri, 4 Dec 2020 10:16:29 +0800 Subject: [PATCH] Support type promote for basic math ops (quantum required) (#29265) * basic impl of type promote * add comment & another testcase * fix complex bugs & support python op promote type * fix failed unittests & polish code * add unittest for coverage * change to only promote complex type * polish code details * polish several comments --- paddle/fluid/framework/data_type.cc | 53 ++++ paddle/fluid/framework/data_type.h | 9 + paddle/fluid/framework/operator.cc | 60 +++++ paddle/fluid/framework/operator.h | 20 +- paddle/fluid/framework/tensor_util.cc | 10 +- paddle/fluid/operators/cast_op.cc | 4 +- paddle/fluid/operators/cast_op.cu | 6 +- paddle/fluid/operators/cast_op.h | 11 + .../elementwise/elementwise_mul_op.h | 16 +- .../operators/elementwise/elementwise_op.h | 16 +- paddle/fluid/operators/matmul_op.cc | 16 +- paddle/fluid/operators/matmul_v2_op.cc | 19 +- paddle/fluid/platform/complex128.h | 21 +- paddle/fluid/platform/complex64.h | 27 +- paddle/fluid/pybind/pybind.cc | 3 + python/paddle/fluid/core.py | 2 + python/paddle/fluid/dygraph/math_op_patch.py | 39 ++- .../fluid/tests/unittests/test_cast_op.py | 15 ++ .../test_complex_elementwise_layers.py | 176 +++++++------ .../tests/unittests/test_complex_matmul.py | 233 ++++++++---------- .../unittests/test_math_op_patch_var_base.py | 9 + .../fluid/tests/unittests/test_multiply.py | 14 +- python/paddle/tensor/math.py | 12 +- 23 files changed, 536 insertions(+), 255 deletions(-) diff --git a/paddle/fluid/framework/data_type.cc b/paddle/fluid/framework/data_type.cc index e4be866dca1..0959a060515 100644 --- a/paddle/fluid/framework/data_type.cc +++ b/paddle/fluid/framework/data_type.cc @@ -98,5 +98,58 @@ size_t SizeOfType(proto::VarType::Type type) { DataTypeToString(type))); } +// Now only supports promotion of complex type +bool NeedPromoteTypes(const proto::VarType::Type a, + const proto::VarType::Type b) { + return (IsComplexType(a) || IsComplexType(b)); +} + +int DataTypeNumAlign(const proto::VarType::Type t) { + int cast_type_num = -1; + if (t == proto::VarType::FP32 || t == proto::VarType::FP64) { + cast_type_num = static_cast(t) - 5; + } else if (t == proto::VarType::COMPLEX64 || + t == proto::VarType::COMPLEX128) { + cast_type_num = static_cast(t) - 21; + } else { + PADDLE_THROW(platform::errors::Unavailable( + "Only supports to align data type include float32, float64, complex64 " + "and complex128, but received data type is `s`.", + DataTypeToString(t))); + } + return cast_type_num; +} + +// Now only supports promotion of complex type +proto::VarType::Type PromoteTypesIfComplexExists( + const proto::VarType::Type type_a, const proto::VarType::Type type_b) { + constexpr auto f4 = proto::VarType::FP32; // 5 + constexpr auto f8 = proto::VarType::FP64; // 6 + constexpr auto c4 = proto::VarType::COMPLEX64; // 23 + constexpr auto c8 = proto::VarType::COMPLEX128; // 24 + + if (!NeedPromoteTypes(type_a, type_b)) { + // NOTE(chenweihang): keep consistent with rule in original op's impl, + // kernel type based on the first input tensor's dtype + return type_a; + } + + int type_an = DataTypeNumAlign(type_a); + int type_bn = DataTypeNumAlign(type_b); + + // Here is a complete rules table, but some rules are not used. + // It is still written this way because array accessing is still + // more efficient than if-else + static constexpr proto::VarType::Type promote_types_table[4][4] = { + /* f4 f8 c4 c8*/ + /* f4 */ {f4, f8, c4, c8}, + /* f8 */ {f8, f8, c8, c8}, + /* c4 */ {c4, c8, c4, c8}, + /* c8 */ {c8, c8, c8, c8}, + }; + + return promote_types_table[type_an][type_bn]; +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index d3cc0ac4e73..eafb8ade9e5 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -141,5 +141,14 @@ inline std::ostream& operator<<(std::ostream& out, out << DataTypeToString(type); return out; } + +extern inline bool IsComplexType(const proto::VarType::Type type) { + return (type == proto::VarType::COMPLEX64 || + type == proto::VarType::COMPLEX128); +} + +extern proto::VarType::Type PromoteTypesIfComplexExists( + const proto::VarType::Type type_a, const proto::VarType::Type type_b); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 026c1092eb3..7b40a5977a0 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1480,6 +1480,66 @@ proto::VarType::Type OperatorWithKernel::IndicateVarDataType( return data_type; } +Tensor* OperatorWithKernel::GetTensorFormInputSafely( + const ExecutionContext& ctx, const std::string& name) const { + // 1. get variable and check + // NOTE: only supports signal input var now + // NOTE: using const_cast is because we don't have method + // can get single mutable var, and here will not change + // the var's data, only use some attribute + Variable* var = const_cast(ctx.InputVar(name)); + PADDLE_ENFORCE_NOT_NULL( + var, + platform::errors::NotFound( + "The variable %s is not found when promote complex types.", name)); + // 2. get tensor and check + Tensor* t = nullptr; + if (var->IsType()) { + t = var->GetMutable(); + } else if (var->IsType()) { + t = var->GetMutable(); + } else if (var->IsType()) { + t = var->GetMutable()->mutable_value(); + } else { + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupported input variable type in complex type promotion.")); + } + PADDLE_ENFORCE_NOT_NULL( + t, + platform::errors::InvalidArgument( + "The Tensor of variable %s is nullptr when promote complex types.")); + PADDLE_ENFORCE_EQ(t->IsInitialized(), true, + platform::errors::InvalidArgument( + "The Tensor in the %s Op's Input Variable %s(%s) is " + "not initialized.", + Type(), name, ctx.InputName(name))); + return t; +} + +/** NOTE(chenweihang): For safety reasons, we now only + * perform type promotes for binary operations with + * complex type inputs, which is used to support the + * paddle quantum function. + * In other cases, the first input data type is used as + * the kernel data type. + */ +proto::VarType::Type OperatorWithKernel::IndicateOrPromoteVarDataTypes( + const ExecutionContext& ctx, const std::string& name1, + const std::string& name2) const { + // 1. Get tensor + auto* tensor_a = GetTensorFormInputSafely(ctx, name1); + auto* tensor_b = GetTensorFormInputSafely(ctx, name2); + + // 2. Get two input types + auto type_a = tensor_a->type(); + auto type_b = tensor_b->type(); + + // 3. Get first input type or promote complex types + auto target_type = PromoteTypesIfComplexExists(type_a, type_b); + + return target_type; +} + OpKernelType OperatorWithKernel::GetExpectedKernelType( const ExecutionContext& ctx) const { return OpKernelType(IndicateDataType(ctx), ctx.GetPlace()); diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index d5107ef5ca2..652d5330f2b 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -504,6 +504,10 @@ class OperatorWithKernel : public OperatorBase { proto::VarType::Type IndicateVarDataType(const ExecutionContext& ctx, const std::string& name) const; + proto::VarType::Type IndicateOrPromoteVarDataTypes( + const ExecutionContext& ctx, const std::string& name1, + const std::string& name2) const; + virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const; // change this to public so that in dygraph mode we can call it to check if we @@ -518,11 +522,6 @@ class OperatorWithKernel : public OperatorBase { } private: - void ParseInputDataType(const ExecutionContext& ctx, const std::string& name, - proto::VarType::Type* type) const; - // indicate kernel DataType by input data. By default all input data must be - // same. - proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const; void RunImpl(const Scope& scope, const platform::Place& place) const final; void RunImpl(const Scope& scope, const platform::Place& place, RuntimeContext* runtime_ctx) const; @@ -546,6 +545,17 @@ class OperatorWithKernel : public OperatorBase { void ChooseKernel(const RuntimeContext& ctx, const Scope& scope, const platform::Place& place) const; + /* Inner assist methods */ + // indicate kernel DataType by input data. + // By default all input data must be same. + proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const; + // used for IndicateDataType + void ParseInputDataType(const ExecutionContext& ctx, const std::string& name, + proto::VarType::Type* type) const; + // used for IndicateOrPromoteVarDataTypes + Tensor* GetTensorFormInputSafely(const ExecutionContext& ctx, + const std::string& name) const; + protected: mutable std::unique_ptr kernel_type_; mutable std::unique_ptr kernel_func_; diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 5e38309dfe9..6bc656851da 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -1000,9 +1000,10 @@ std::ostream& print_tensor( os << " - data: ["; if (element_num > 0) { - os << signed(inspect[0].real) << signed(inspect[0].imag) << "j"; + os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j"; for (int j = 1; j < element_num; ++j) { - os << signed(inspect[j].real) << signed(inspect[j].imag) << "j"; + os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag) + << "j"; } } os << "]"; @@ -1017,9 +1018,10 @@ std::ostream& print_tensor( os << " - data: ["; if (element_num > 0) { - os << signed(inspect[0].real) << signed(inspect[0].imag) << "j"; + os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j"; for (int j = 1; j < element_num; ++j) { - os << signed(inspect[j].real) << signed(inspect[j].imag) << "j"; + os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag) + << "j"; } } os << "]"; diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index eb4483c9c5c..c5cfa7a3baf 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -96,4 +96,6 @@ REGISTER_OP_CPU_KERNEL(cast, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, - ops::CastOpKernel); + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel); diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index 422adfdbb50..f71af205766 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -25,4 +25,8 @@ REGISTER_OP_CUDA_KERNEL( ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel); + paddle::platform::float16>, + ops::CastOpKernel, + ops::CastOpKernel); diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index 66079243eb4..91276ba6e8b 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -82,6 +82,17 @@ class CastOpKernel : public framework::OpKernel { CastFunction(context); } else if (out_type == paddle::framework::proto::VarType::BOOL) { CastFunction(context); + } else if (out_type == paddle::framework::proto::VarType::COMPLEX64) { + CastFunction(context); + } else if (out_type == paddle::framework::proto::VarType::COMPLEX128) { + CastFunction(context); + } else { + // NOTE(chenweihang): if else branch do nothing, the output var will + // be non-initialized in dygraph, which will throw error if the + // non-initialized var is used as the next op's input + PADDLE_THROW(platform::errors::Unimplemented( + "Now does not support casting Tensor to `%s` data type.", + framework::DataTypeToString(out_type))); } } }; diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.h b/paddle/fluid/operators/elementwise/elementwise_mul_op.h index 49456149c2c..a5bd7221c75 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.h @@ -30,7 +30,8 @@ class ElementwiseMulOp : public ElementwiseOp { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); + auto input_data_type = + OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); #ifdef PADDLE_WITH_MKLDNN if (this->CanMKLDNNBeUsed(ctx)) { @@ -41,6 +42,19 @@ class ElementwiseMulOp : public ElementwiseOp { #endif return framework::OpKernelType(input_data_type, ctx.GetPlace()); } + + framework::OpKernelType GetKernelTypeForVar( + const std::string& var_name, const framework::Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const { + if (framework::IsComplexType(expected_kernel_type.data_type_)) { + // only promote inputs’s types when contains complex input + return framework::OpKernelType(tensor.type(), tensor.place(), + tensor.layout()); + } else { + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); + } + } }; template diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index bbb240efaea..abafedf2057 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -105,7 +105,8 @@ class ElementwiseOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { - auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); + auto input_data_type = + OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); #ifdef PADDLE_WITH_MKLDNN if (this->CanMKLDNNBeUsed(ctx)) { @@ -116,6 +117,19 @@ class ElementwiseOp : public framework::OperatorWithKernel { #endif return framework::OpKernelType(input_data_type, ctx.GetPlace()); } + + framework::OpKernelType GetKernelTypeForVar( + const std::string &var_name, const framework::Tensor &tensor, + const framework::OpKernelType &expected_kernel_type) const { + if (framework::IsComplexType(expected_kernel_type.data_type_)) { + // only promote inputs’s types when contains complex input + return framework::OpKernelType(tensor.type(), tensor.place(), + tensor.layout()); + } else { + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); + } + } }; class ElementwiseOpInferVarType diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index 639a6991a4f..d45669a9f07 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -655,7 +655,8 @@ class MatMulOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { - auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); + auto input_data_type = + OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); #ifdef PADDLE_WITH_MKLDNN using mkldnn::memory; @@ -667,6 +668,19 @@ class MatMulOp : public framework::OperatorWithKernel { #endif return framework::OpKernelType(input_data_type, ctx.GetPlace()); } + + framework::OpKernelType GetKernelTypeForVar( + const std::string &var_name, const framework::Tensor &tensor, + const framework::OpKernelType &expected_kernel_type) const { + if (framework::IsComplexType(expected_kernel_type.data_type_)) { + // only promote inputs’s types when contains complex input + return framework::OpKernelType(tensor.type(), tensor.place(), + tensor.layout()); + } else { + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); + } + } }; class MatMulOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/fluid/operators/matmul_v2_op.cc b/paddle/fluid/operators/matmul_v2_op.cc index 27023ecd29c..7a3db793184 100644 --- a/paddle/fluid/operators/matmul_v2_op.cc +++ b/paddle/fluid/operators/matmul_v2_op.cc @@ -85,9 +85,22 @@ class MatMulV2Op : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - OperatorWithKernel::IndicateVarDataType(ctx, "X"), - ctx.device_context()); + auto data_type = + OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); + return framework::OpKernelType(data_type, ctx.device_context()); + } + + framework::OpKernelType GetKernelTypeForVar( + const std::string& var_name, const framework::Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const { + if (framework::IsComplexType(expected_kernel_type.data_type_)) { + // only promote inputs’s types when contains complex input + return framework::OpKernelType(tensor.type(), tensor.place(), + tensor.layout()); + } else { + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); + } } }; diff --git a/paddle/fluid/platform/complex128.h b/paddle/fluid/platform/complex128.h index bc3f6cc0319..2a2cd3b7be2 100644 --- a/paddle/fluid/platform/complex128.h +++ b/paddle/fluid/platform/complex128.h @@ -70,10 +70,13 @@ struct PADDLE_ALIGN(16) complex128 { } #endif - HOSTDEVICE complex128(const float& val) { real = static_cast(val); } - HOSTDEVICE complex128(const double& val) { real = val; } - HOSTDEVICE complex128(const int& val) { real = static_cast(val); } - HOSTDEVICE complex128(const int64_t& val) { real = static_cast(val); } + HOSTDEVICE complex128(const float& val) + : real(static_cast(val)), imag(0) {} + HOSTDEVICE complex128(const double& val) : real(val), imag(0) {} + HOSTDEVICE complex128(const int& val) + : real(static_cast(val)), imag(0) {} + HOSTDEVICE complex128(const int64_t& val) + : real(static_cast(val)), imag(0) {} HOSTDEVICE inline explicit operator std::complex() { return static_cast>(std::complex(real, imag)); @@ -94,51 +97,61 @@ struct PADDLE_ALIGN(16) complex128 { HOSTDEVICE inline complex128& operator=(int8_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(uint8_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(int16_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(uint16_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(int32_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(uint32_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(int64_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(uint64_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(float val) { real = val; + imag = 0; return *this; } HOSTDEVICE inline complex128& operator=(double val) { real = static_cast(val); + imag = 0; return *this; } diff --git a/paddle/fluid/platform/complex64.h b/paddle/fluid/platform/complex64.h index d378f14e6f3..d4ab7f3fda4 100644 --- a/paddle/fluid/platform/complex64.h +++ b/paddle/fluid/platform/complex64.h @@ -70,14 +70,16 @@ struct PADDLE_ALIGN(8) complex64 { } #endif - HOSTDEVICE complex64(const float& val) { real = val; } - HOSTDEVICE complex64(const double& val) { real = static_cast(val); } - HOSTDEVICE complex64(const int& val) { real = static_cast(val); } - HOSTDEVICE complex64(const int64_t& val) { real = static_cast(val); } - HOSTDEVICE complex64(const complex128& val) { - real = static_cast(val.real); - imag = static_cast(val.imag); - } + HOSTDEVICE complex64(const float& val) : real(val), imag(0) {} + HOSTDEVICE complex64(const double& val) + : real(static_cast(val)), imag(0) {} + HOSTDEVICE complex64(const int& val) + : real(static_cast(val)), imag(0) {} + HOSTDEVICE complex64(const int64_t& val) + : real(static_cast(val)), imag(0) {} + HOSTDEVICE complex64(const complex128& val) + : real(static_cast(val.real)), + imag(static_cast(val.imag)) {} HOSTDEVICE inline explicit operator std::complex() { return static_cast>(std::complex(real, imag)); @@ -98,21 +100,25 @@ struct PADDLE_ALIGN(8) complex64 { HOSTDEVICE inline complex64& operator=(int8_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex64& operator=(uint8_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex64& operator=(int16_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex64& operator=(uint16_t val) { real = static_cast(val); + imag = 0; return *this; } @@ -123,26 +129,31 @@ struct PADDLE_ALIGN(8) complex64 { HOSTDEVICE inline complex64& operator=(uint32_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex64& operator=(int64_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex64& operator=(uint64_t val) { real = static_cast(val); + imag = 0; return *this; } HOSTDEVICE inline complex64& operator=(float val) { real = val; + imag = 0; return *this; } HOSTDEVICE inline complex64& operator=(double val) { real = static_cast(val); + imag = 0; return *this; } diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index b2d1cac37eb..778b670769a 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -514,6 +514,9 @@ PYBIND11_MODULE(core_noavx, m) { m.def("_set_paddle_lib_path", &paddle::platform::dynload::SetPaddleLibPath); + m.def("_promote_types_if_complex_exists", + &paddle::framework::PromoteTypesIfComplexExists); + BindImperative(&m); py::class_(m, "Tensor", py::buffer_protocol()) diff --git a/python/paddle/fluid/core.py b/python/paddle/fluid/core.py index 224a021cd6a..69881dd4528 100644 --- a/python/paddle/fluid/core.py +++ b/python/paddle/fluid/core.py @@ -272,6 +272,7 @@ if avx_supported(): from .core_avx import _load_dygraph_dict from .core_avx import _create_loaded_parameter from .core_avx import _cuda_synchronize + from .core_avx import _promote_types_if_complex_exists if sys.platform != 'win32': from .core_avx import _set_process_pids from .core_avx import _erase_process_pids @@ -317,6 +318,7 @@ if load_noavx: from .core_noavx import _load_dygraph_dict from .core_noavx import _create_loaded_parameter from .core_noavx import _cuda_synchronize + from .core_noavx import _promote_types_if_complex_exists if sys.platform != 'win32': from .core_noavx import _set_process_pids from .core_noavx import _erase_process_pids diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 203a5e0f86a..4208d9a259f 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -30,6 +30,27 @@ _supported_int_dtype_ = [ core.VarDesc.VarType.INT64, ] +# NOTE(chenweihang): We currently do not fully support the type promotion +# between tensors. Parting support here is because the interoperation of +# real and complex numbers in paddle quantum is very frequent, such as the +# binary operation between `float` and `complex64`, so we must support the +# correct type promotion on the APIs paddle quantum used. +# Now only check in dygraph (paddle quantum based dygraph) +# Full type promotion support will need to be fully verified later. +_supported_promote_complex_types_ = [ + '__add__', + '__radd__', + '__sub__', + '__rsub__', + '__mul__', + '__rmul__', + '__div__', + '__truediv__', + '__rdiv__', + '__rtruediv__', + '__matmul__', +] + _already_patch_varbase = False @@ -197,10 +218,22 @@ def monkey_patch_math_varbase(): # add fill_op other_var = create_scalar(value=other_var, dtype=lhs_dtype) - # 3. unify right var type to left var + # 3. promote types or unify right var type to left var rhs_dtype = other_var.dtype if lhs_dtype != rhs_dtype: - other_var = astype(other_var, lhs_dtype) + if method_name in _supported_promote_complex_types_: + # only when lhs_dtype or rhs_dtype is complex type, + # the dtype will promote, in other cases, directly + # use lhs_dtype, this is consistent will original rule + promote_dtype = core._promote_types_if_complex_exists( + lhs_dtype, rhs_dtype) + self = self if lhs_dtype == promote_dtype else astype( + self, promote_dtype) + other_var = other_var if rhs_dtype == promote_dtype else astype( + other_var, promote_dtype) + else: + other_var = astype(other_var, lhs_dtype) + if reverse: tmp = self self = other_var @@ -266,6 +299,8 @@ def monkey_patch_math_varbase(): 'elementwise_floordiv', False, None)), ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False, None)), + ('__matmul__', _binary_creator_('__matmul__', "matmul_v2", False, + None)), ## for logical compare ('__eq__', _binary_creator_('__eq__', 'equal', False, None)), ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)), diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index 084efc94559..44fdd8c74bf 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -17,6 +17,8 @@ from __future__ import print_function import op_test import unittest import numpy as np + +import paddle import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard @@ -88,5 +90,18 @@ class TestCastOpError(unittest.TestCase): self.assertRaises(TypeError, test_dtype_type) +class TestCastOpErrorInDygraph(unittest.TestCase): + def test_non_support_out_dtype(self): + paddle.disable_static() + + with self.assertRaises(NotImplementedError): + tensor = paddle.randn([10, 10], 'float32') + core.ops.cast(tensor, 'in_dtype', core.VarDesc.VarType.FP32, + 'out_dtype', core.VarDesc.VarType.INT16) + + paddle.enable_static() + + if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py b/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py index 25b885214cf..1b63ae2f681 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py +++ b/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py @@ -15,9 +15,11 @@ import unittest import numpy as np from numpy.random import random as rand -from paddle import complex as cpx + +import paddle import paddle.fluid as fluid import paddle.fluid.dygraph as dg +from paddle import complex as cpx layers = { "add": cpx.elementwise_add, @@ -26,121 +28,135 @@ layers = { "div": cpx.elementwise_div, } -fluid_layers = { - "add": fluid.layers.elementwise_add, - "sub": fluid.layers.elementwise_sub, - "mul": fluid.layers.elementwise_mul, - "div": fluid.layers.elementwise_div, +paddle_apis = { + "add": paddle.add, + "sub": paddle.subtract, + "mul": paddle.multiply, + "div": paddle.divide, } class TestComplexElementwiseLayers(unittest.TestCase): def setUp(self): - self._dtype = "float64" - self._places = [fluid.CPUPlace()] + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places.append(paddle.CUDAPlace(0)) - def calc(self, x, y, layer_type, place): + def calc(self, x, y, op, place): with dg.guard(place): var_x = dg.to_variable(x) var_y = dg.to_variable(y) - return layers[layer_type](var_x, var_y).numpy() + return layers[op](var_x, var_y).numpy() - def fuild_calc(self, x, y, layer_type, place): + def paddle_calc(self, x, y, op, place): with dg.guard(place): - var_x = fluid.core.VarBase( + x_t = paddle.Tensor( value=x, - place=fluid.framework._current_expected_place(), + place=place, persistable=False, - zero_copy=None, - name='') - var_y = fluid.core.VarBase( + zero_copy=False, + stop_gradient=True) + y_t = paddle.Tensor( value=y, - place=fluid.framework._current_expected_place(), + place=place, persistable=False, - zero_copy=None, - name='') - return fluid_layers[layer_type](var_x, var_y).numpy() - - def compare(self, x, y): + zero_copy=False, + stop_gradient=True) + return paddle_apis[op](x_t, y_t).numpy() + + def assert_check(self, pd_result, np_result, place): + self.assertTrue( + np.allclose(pd_result, np_result), + "\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n". + format(place, pd_result[~np.isclose(pd_result, np_result)], + np_result[~np.isclose(pd_result, np_result)])) + + def compare_by_complex_api(self, x, y): for place in self._places: - self.assertTrue(np.allclose(self.calc(x, y, "add", place), x + y)) - self.assertTrue(np.allclose(self.calc(x, y, "sub", place), x - y)) - self.assertTrue(np.allclose(self.calc(x, y, "mul", place), x * y)) - self.assertTrue(np.allclose(self.calc(x, y, "div", place), x / y)) + self.assert_check(self.calc(x, y, "add", place), x + y, place) + self.assert_check(self.calc(x, y, "sub", place), x - y, place) + self.assert_check(self.calc(x, y, "mul", place), x * y, place) + self.assert_check(self.calc(x, y, "div", place), x / y, place) - def compare_1(self, x, y): + def compare_by_basic_api(self, x, y): for place in self._places: - self.assertTrue( - np.allclose(self.fuild_calc(x, y, "add", place), x + y)) - self.assertTrue( - np.allclose(self.fuild_calc(x, y, "sub", place), x - y)) - self.assertTrue( - np.allclose(self.fuild_calc(x, y, "mul", place), x * y)) - self.assertTrue( - np.allclose(self.fuild_calc(x, y, "div", place), x / y)) - - def compare_op(self, x, y): + self.assert_check( + self.paddle_calc(x, y, "add", place), x + y, place) + self.assert_check( + self.paddle_calc(x, y, "sub", place), x - y, place) + self.assert_check( + self.paddle_calc(x, y, "mul", place), x * y, place) + self.assert_check( + self.paddle_calc(x, y, "div", place), x / y, place) + + def compare_op_by_complex_api(self, x, y): for place in self._places: with dg.guard(place): var_x = dg.to_variable(x) var_y = dg.to_variable(y) - self.assertTrue(var_x + var_y, x + y) - self.assertTrue(var_x - var_y, x - y) - self.assertTrue(var_x * var_y, x * y) - self.assertTrue(var_x / var_y, x / y) + self.assert_check((var_x + var_y).numpy(), x + y, place) + self.assert_check((var_x - var_y).numpy(), x - y, place) + self.assert_check((var_x * var_y).numpy(), x * y, place) + self.assert_check((var_x / var_y).numpy(), x / y, place) - def compare_op_1(self, x, y): + def compare_op_by_basic_api(self, x, y): for place in self._places: with dg.guard(place): - var_x = fluid.core.VarBase( + x_t = paddle.Tensor( value=x, - place=fluid.framework._current_expected_place(), + place=place, persistable=False, - zero_copy=None, - name='') - var_y = fluid.core.VarBase( + zero_copy=False, + stop_gradient=True) + y_t = paddle.Tensor( value=y, - place=fluid.framework._current_expected_place(), + place=place, persistable=False, - zero_copy=None, - name='') - self.assertTrue(np.allclose((var_x + var_y).numpy(), x + y)) - self.assertTrue(np.allclose((var_x - var_y).numpy(), x - y)) - self.assertTrue(np.allclose((var_x * var_y).numpy(), x * y)) - self.assertTrue(np.allclose((var_x / var_y).numpy(), x / y)) + zero_copy=False, + stop_gradient=True) + self.assert_check((x_t + y_t).numpy(), x + y, place) + self.assert_check((x_t - y_t).numpy(), x - y, place) + self.assert_check((x_t * y_t).numpy(), x * y, place) + self.assert_check((x_t / y_t).numpy(), x / y, place) def test_complex_xy(self): - x = rand([2, 3, 4, 5]).astype(self._dtype) + 1j * rand( - [2, 3, 4, 5]).astype(self._dtype) - y = rand([2, 3, 4, 5]).astype(self._dtype) + 1j * rand( - [2, 3, 4, 5]).astype(self._dtype) - self.compare(x, y) - self.compare_op(x, y) - self.compare_1(x, y) - self.compare_op_1(x, y) + for dtype in self._dtypes: + x = rand([2, 3, 4, 5]).astype(dtype) + 1j * rand( + [2, 3, 4, 5]).astype(dtype) + y = rand([2, 3, 4, 5]).astype(dtype) + 1j * rand( + [2, 3, 4, 5]).astype(dtype) + + self.compare_by_complex_api(x, y) + self.compare_op_by_complex_api(x, y) + + self.compare_op_by_complex_api(x, y) + self.compare_op_by_basic_api(x, y) def test_complex_x_real_y(self): - x = rand([2, 3, 4, 5]).astype(self._dtype) + 1j * rand( - [2, 3, 4, 5]).astype(self._dtype) - y = rand([4, 5]).astype(self._dtype) - self.compare(x, y) - self.compare_op(x, y) + for dtype in self._dtypes: + x = rand([2, 3, 4, 5]).astype(dtype) + 1j * rand( + [2, 3, 4, 5]).astype(dtype) + y = rand([4, 5]).astype(dtype) + + self.compare_by_complex_api(x, y) + self.compare_op_by_complex_api(x, y) + + # promote types cases + self.compare_by_basic_api(x, y) + self.compare_op_by_basic_api(x, y) def test_real_x_complex_y(self): - x = rand([2, 3, 4, 5]).astype(self._dtype) - y = rand([5]).astype(self._dtype) + 1j * rand([5]).astype(self._dtype) - self.compare(x, y) - self.compare_op(x, y) - - def test_complex64_xy(self): - x = rand([2, 3, 4, 5]).astype("float32") + 1j * rand( - [2, 3, 4, 5]).astype("float32") - y = rand([2, 3, 4, 5]).astype("float32") + 1j * rand( - [2, 3, 4, 5]).astype("float32") - self.compare_1(x, y) - self.compare_op_1(x, y) + for dtype in self._dtypes: + x = rand([2, 3, 4, 5]).astype(dtype) + y = rand([5]).astype(dtype) + 1j * rand([5]).astype(dtype) + + self.compare_by_complex_api(x, y) + self.compare_op_by_complex_api(x, y) + + # promote types cases + self.compare_by_basic_api(x, y) + self.compare_op_by_basic_api(x, y) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_complex_matmul.py b/python/paddle/fluid/tests/unittests/test_complex_matmul.py index 22861b07e3c..9f5a1d5fdd7 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_matmul.py +++ b/python/paddle/fluid/tests/unittests/test_complex_matmul.py @@ -21,21 +21,25 @@ import paddle.fluid.dygraph as dg class TestComplexMatMulLayer(unittest.TestCase): def setUp(self): + self._dtypes = ["float32", "float64"] self._places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): self._places.append(fluid.CUDAPlace(0)) - def compare_by_complex_api(self, x, y): - np_result = np.matmul(x, y) + def compare_by_complex_api(self, x, y, np_result): for place in self._places: with dg.guard(place): x_var = dg.to_variable(x) y_var = dg.to_variable(y) result = paddle.complex.matmul(x_var, y_var) - self.assertTrue(np.allclose(result.numpy(), np_result)) - - def compare_by_basic_api(self, x, y): - np_result = np.matmul(x, y) + pd_result = result.numpy() + self.assertTrue( + np.allclose(pd_result, np_result), + "\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n". + format(place, pd_result[~np.isclose(pd_result, np_result)], + np_result[~np.isclose(pd_result, np_result)])) + + def compare_by_basic_api(self, x, y, np_result): for place in self._places: with dg.guard(place): x_var = fluid.core.VarBase( @@ -51,19 +55,27 @@ class TestComplexMatMulLayer(unittest.TestCase): zero_copy=None, name='') result = paddle.matmul(x_var, y_var) - self.assertTrue(np.allclose(result.numpy(), np_result)) - - def compare_op_by_complex_api(self, x, y): - np_result = np.matmul(x, y) + pd_result = result.numpy() + self.assertTrue( + np.allclose(pd_result, np_result), + "\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n". + format(place, pd_result[~np.isclose(pd_result, np_result)], + np_result[~np.isclose(pd_result, np_result)])) + + def compare_op_by_complex_api(self, x, y, np_result): for place in self._places: with dg.guard(place): x_var = dg.to_variable(x) y_var = dg.to_variable(y) result = x_var.matmul(y_var) - self.assertTrue(np.allclose(result.numpy(), np_result)) - - def compare_op_by_basic_api(self, x, y): - np_result = np.matmul(x, y) + pd_result = result.numpy() + self.assertTrue( + np.allclose(pd_result, np_result), + "\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n". + format(place, pd_result[~np.isclose(pd_result, np_result)], + np_result[~np.isclose(pd_result, np_result)])) + + def compare_op_by_basic_api(self, x, y, np_result): for place in self._places: with dg.guard(place): x_var = fluid.core.VarBase( @@ -79,126 +91,89 @@ class TestComplexMatMulLayer(unittest.TestCase): zero_copy=None, name='') result = x_var.matmul(y_var) - self.assertTrue(np.allclose(result.numpy(), np_result)) + pd_result = result.numpy() + self.assertTrue( + np.allclose(pd_result, np_result), + "\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n". + format(place, pd_result[~np.isclose(pd_result, np_result)], + np_result[~np.isclose(pd_result, np_result)])) def test_complex_xy(self): - x = np.random.random( - (2, 3, 4, 5)).astype("float32") + 1J * np.random.random( - (2, 3, 4, 5)).astype("float32") - y = np.random.random( - (2, 3, 5, 4)).astype("float32") + 1J * np.random.random( - (2, 3, 5, 4)).astype("float32") - self.compare_by_complex_api(x, y) - self.compare_op_by_complex_api(x, y) - self.compare_by_basic_api(x, y) - self.compare_op_by_basic_api(x, y) - - def test_complex_x(self): - x = np.random.random( - (2, 3, 4, 5)).astype("float32") + 1J * np.random.random( - (2, 3, 4, 5)).astype("float32") - y = np.random.random((2, 3, 5, 4)).astype("float32") - self.compare_by_complex_api(x, y) - self.compare_op_by_complex_api(x, y) - - def test_complex_y(self): - x = np.random.random((2, 3, 4, 5)).astype("float32") - y = np.random.random( - (2, 3, 5, 4)).astype("float32") + 1J * np.random.random( - (2, 3, 5, 4)).astype("float32") - self.compare_by_complex_api(x, y) - - def test_complex_xy_128(self): - x = np.random.random( - (2, 3, 4, 5)).astype("float64") + 1J * np.random.random( - (2, 3, 4, 5)).astype("float64") - y = np.random.random( - (2, 3, 5, 4)).astype("float64") + 1J * np.random.random( - (2, 3, 5, 4)).astype("float64") - self.compare_by_basic_api(x, y) - self.compare_op_by_basic_api(x, y) + for dtype in self._dtypes: + x = np.random.random( + (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( + (2, 3, 4, 5)).astype(dtype) + y = np.random.random( + (2, 3, 5, 4)).astype(dtype) + 1J * np.random.random( + (2, 3, 5, 4)).astype(dtype) - def test_complex_xy_gemv(self): - x = np.random.random( - (2, 1, 100)).astype("float32") + 1J * np.random.random( - (2, 1, 100)).astype("float32") - y = np.random.random((100)).astype("float32") + 1J * np.random.random( - (100)).astype("float32") - self.compare_by_basic_api(x, y) - self.compare_op_by_basic_api(x, y) - - x = np.random.random( - (2, 1, 100)).astype("float64") + 1J * np.random.random( - (2, 1, 100)).astype("float64") - y = np.random.random((100)).astype("float64") + 1J * np.random.random( - (100)).astype("float64") - self.compare_by_basic_api(x, y) - self.compare_op_by_basic_api(x, y) - - def test_complex_xy_gemm_128(self): - x = np.random.random( - (1, 2, 50)).astype("float64") + 1J * np.random.random( - (1, 2, 50)).astype("float64") - y = np.random.random( - (1, 50, 2)).astype("float64") + 1J * np.random.random( - (1, 50, 2)).astype("float64") - self.compare_by_basic_api(x, y) - self.compare_op_by_basic_api(x, y) - - -class TestComplexMatMulLayerGEMM(unittest.TestCase): - def setUp(self): - self._places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + np_result = np.matmul(x, y) - def compare_by_basic_api(self, x, y): - np_result = np.matmul(x, y) - for place in self._places: - with dg.guard(place): - x_var = fluid.core.VarBase( - value=x, - place=place, - persistable=False, - zero_copy=None, - name='') - y_var = fluid.core.VarBase( - value=y, - place=place, - persistable=False, - zero_copy=None, - name='') - result = paddle.matmul(x_var, y_var) - self.assertTrue(np.allclose(result.numpy(), np_result)) + self.compare_by_complex_api(x, y, np_result) + self.compare_op_by_complex_api(x, y, np_result) - def compare_op_by_basic_api(self, x, y): - np_result = np.matmul(x, y) - for place in self._places: - with dg.guard(place): - x_var = fluid.core.VarBase( - value=x, - place=place, - persistable=False, - zero_copy=None, - name='') - y_var = fluid.core.VarBase( - value=y, - place=place, - persistable=False, - zero_copy=None, - name='') - result = x_var.matmul(y_var) - self.assertTrue(np.allclose(result.numpy(), np_result)) - - def test_complex_xy_gemm_64(self): - x = np.random.random( - (1, 2, 50)).astype("float32") + 1J * np.random.random( - (1, 2, 50)).astype("float32") - y = np.random.random( - (1, 50, 2)).astype("float32") + 1J * np.random.random( - (1, 50, 2)).astype("float32") - self.compare_by_basic_api(x, y) - self.compare_op_by_basic_api(x, y) + self.compare_by_basic_api(x, y, np_result) + self.compare_op_by_basic_api(x, y, np_result) + + def test_complex_x_real_y(self): + for dtype in self._dtypes: + x = np.random.random( + (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( + (2, 3, 4, 5)).astype(dtype) + y = np.random.random((2, 3, 5, 4)).astype(dtype) + + np_result = np.matmul(x, y) + + self.compare_by_complex_api(x, y, np_result) + self.compare_op_by_complex_api(x, y, np_result) + + # float -> complex type promotion + self.compare_by_basic_api(x, y, np_result) + self.compare_op_by_basic_api(x, y, np_result) + + def test_real_x_complex_y(self): + for dtype in self._dtypes: + x = np.random.random((2, 3, 4, 5)).astype(dtype) + y = np.random.random( + (2, 3, 5, 4)).astype(dtype) + 1J * np.random.random( + (2, 3, 5, 4)).astype(dtype) + + np_result = np.matmul(x, y) + + self.compare_by_complex_api(x, y, np_result) + + # float -> complex type promotion + self.compare_by_basic_api(x, y, np_result) + self.compare_op_by_basic_api(x, y, np_result) + + # for coverage + def test_complex_xy_gemv(self): + for dtype in self._dtypes: + x = np.random.random( + (2, 1, 100)).astype(dtype) + 1J * np.random.random( + (2, 1, 100)).astype(dtype) + y = np.random.random((100)).astype(dtype) + 1J * np.random.random( + (100)).astype(dtype) + + np_result = np.matmul(x, y) + + self.compare_by_basic_api(x, y, np_result) + self.compare_op_by_basic_api(x, y, np_result) + + # for coverage + def test_complex_xy_gemm(self): + for dtype in self._dtypes: + x = np.random.random( + (1, 2, 50)).astype(dtype) + 1J * np.random.random( + (1, 2, 50)).astype(dtype) + y = np.random.random( + (1, 50, 2)).astype(dtype) + 1J * np.random.random( + (1, 50, 2)).astype(dtype) + + np_result = np.matmul(x, y) + + self.compare_by_basic_api(x, y, np_result) + self.compare_op_by_basic_api(x, y, np_result) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py index 4795b493015..e908f1a60a0 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py @@ -262,6 +262,15 @@ class TestMathOpPatchesVarBase(unittest.TestCase): res = a + b self.assertTrue(np.array_equal(res.numpy(), a_np + b_np)) + def test_floordiv_different_dtype(self): + a_np = np.full(self.shape, 10, np.int64) + b_np = np.full(self.shape, 2, np.int32) + with fluid.dygraph.guard(): + a = paddle.to_tensor(a_np) + b = paddle.to_tensor(b_np) + res = a // b + self.assertTrue(np.array_equal(res.numpy(), a_np // b_np)) + def test_astype(self): a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype) with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_multiply.py b/python/paddle/fluid/tests/unittests/test_multiply.py index 72e5a4453f2..b839272ccf0 100755 --- a/python/paddle/fluid/tests/unittests/test_multiply.py +++ b/python/paddle/fluid/tests/unittests/test_multiply.py @@ -127,41 +127,41 @@ class TestMultiplyError(unittest.TestCase): y = paddle.to_tensor(y_data) self.assertRaises(ValueError, paddle.multiply, x, y) - # test dynamic computation graph: dtype must be same + # test dynamic computation graph: dtype must be same x_data = np.random.randn(200).astype(np.int64) y_data = np.random.randn(200).astype(np.float64) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) - self.assertRaises(TypeError, paddle.multiply, x, y) + self.assertRaises(ValueError, paddle.multiply, x, y) # test dynamic computation graph: dtype must be Tensor type x_data = np.random.randn(200).astype(np.int64) y_data = np.random.randn(200).astype(np.float64) y = paddle.to_tensor(y_data) - self.assertRaises(TypeError, paddle.multiply, x_data, y) + self.assertRaises(ValueError, paddle.multiply, x_data, y) # test dynamic computation graph: dtype must be Tensor type x_data = np.random.randn(200).astype(np.int64) y_data = np.random.randn(200).astype(np.float64) x = paddle.to_tensor(x_data) - self.assertRaises(TypeError, paddle.multiply, x, y_data) + self.assertRaises(ValueError, paddle.multiply, x, y_data) # test dynamic computation graph: dtype must be Tensor type x_data = np.random.randn(200).astype(np.float32) y_data = np.random.randn(200).astype(np.float32) x = paddle.to_tensor(x_data) - self.assertRaises(TypeError, paddle.multiply, x, y_data) + self.assertRaises(ValueError, paddle.multiply, x, y_data) # test dynamic computation graph: dtype must be Tensor type x_data = np.random.randn(200).astype(np.float32) y_data = np.random.randn(200).astype(np.float32) x = paddle.to_tensor(x_data) - self.assertRaises(TypeError, paddle.multiply, x_data, y) + self.assertRaises(ValueError, paddle.multiply, x_data, y) # test dynamic computation graph: dtype must be Tensor type x_data = np.random.randn(200).astype(np.float32) y_data = np.random.randn(200).astype(np.float32) - self.assertRaises(TypeError, paddle.multiply, x_data, y_data) + self.assertRaises(ValueError, paddle.multiply, x_data, y_data) if __name__ == '__main__': diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index cdb7561dba2..88af78bf993 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -504,19 +504,15 @@ def multiply(x, y, name=None): act = None axis = -1 + if in_dygraph_mode(): + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type) + if x.dtype != y.dtype: raise TypeError( 'Input tensors must be same type, but received type of x: %s, type of y: %s ' % (x.dtype, y.dtype)) - if in_dygraph_mode(): - if not isinstance(x, (paddle.Tensor)): - raise TypeError( - 'Input x must tensor type, but received type of x: %s' - % (x.dtype)) - - return _elementwise_op_in_dygraph( - x, y, axis=axis, act=act, op_name=op_type) return _elementwise_op(LayerHelper(op_type, **locals())) def maximum(x, y, name=None): -- GitLab