From 10188e8f95d0295fd1d51fb2dc442b1603c5c956 Mon Sep 17 00:00:00 2001 From: xiaoguoguo626807 <100397923+xiaoguoguo626807@users.noreply.github.com> Date: Tue, 13 Jun 2023 14:26:43 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90prim=E3=80=91delete=20multiply=5Ftripl?= =?UTF-8?q?e=5Fgrad=20dygraph=20path=20=20(#54558)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * mutiply_triple delete * add case * add timeout --- .../eager_manual/nodes/multiply_node.cc | 398 +----------------- .../api/manual/eager_manual/nodes/nodes.h | 63 --- test/prim/prim/vjp/CMakeLists.txt | 2 +- test/prim/prim/vjp/test_comp_high_grad.py | 3 - 4 files changed, 17 insertions(+), 449 deletions(-) diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index bb0b8c08e78..44c646e5be4 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -349,35 +349,22 @@ MultiplyDoubleGradNode::operator()( // Call grad_api function - if (paddle::prim::PrimCommonUtils::IsEagerPrimEnabled()) { - bool original_global_grad = egr::Controller::Instance().HasGrad(); - if (!create_graph) { - egr::Controller::Instance().SetHasGrad(create_graph); - } - paddle::prim::multiply_double_grad(x, - y, - fwd_grad_out, - fwd_grad_grad_x_optional, - fwd_grad_grad_y_optional, - axis, - api_output_0, - api_output_1, - api_output_2); - VLOG(4) << "Composite api multiply_double_grad is called "; - if (!create_graph) { - egr::Controller::Instance().SetHasGrad(original_global_grad); - } - } else { - paddle::experimental::multiply_double_grad(x, - y, - fwd_grad_out, - fwd_grad_grad_x_optional, - fwd_grad_grad_y_optional, - axis, - api_output_0, - api_output_1, - api_output_2); - VLOG(4) << "Fused api multiply_double_grad is called "; + bool original_global_grad = egr::Controller::Instance().HasGrad(); + if (!create_graph) { + egr::Controller::Instance().SetHasGrad(create_graph); + } + paddle::prim::multiply_double_grad(x, + y, + fwd_grad_out, + fwd_grad_grad_x_optional, + fwd_grad_grad_y_optional, + axis, + api_output_0, + api_output_1, + api_output_2); + VLOG(4) << "Composite api multiply_double_grad is called "; + if (!create_graph) { + egr::Controller::Instance().SetHasGrad(original_global_grad); } // Check NaN and Inf id needed @@ -417,56 +404,6 @@ MultiplyDoubleGradNode::operator()( // Create Grad Node - if (!paddle::prim::PrimCommonUtils::IsEagerPrimEnabled()) { - if (trace_backward) { - paddle::platform::RecordEvent node_creation_record_event( - "multiply_double_grad node_creation", - paddle::platform::TracerEventType::OperatorInner, - 1); - - // Node Construction - auto grad_node = std::shared_ptr( - new MultiplyTripleGradNode(3, 5)); - // SetAttributes if needed - grad_node->SetAttributeaxis(-1); - // Set TensorWrappers for Forward Inputs if needed - grad_node->SetTensorWrapperx(x); - grad_node->SetTensorWrappery(y); - grad_node->SetTensorWrapperfwd_grad_out(fwd_grad_out); - grad_node->SetTensorWrapperfwd_grad_grad_x(fwd_grad_grad_x); - grad_node->SetTensorWrapperfwd_grad_grad_y(fwd_grad_grad_y); - // SetGradOutMeta & SetEdges - grad_node->SetGradOutMeta(x, 0); - grad_node->SetGradOutMeta(y, 1); - grad_node->SetGradOutMeta(fwd_grad_out, 2); - grad_node->SetGradOutMeta(fwd_grad_grad_x, 3); - grad_node->SetGradOutMeta(fwd_grad_grad_y, 4); - // SetOutRank & SetHistory & SetGradInMeta - if (grad_x_autograd_meta) { - egr::EagerUtils::SetOutRankWithSlot(grad_x_autograd_meta, 0); - } - if (grad_y_autograd_meta) { - egr::EagerUtils::SetOutRankWithSlot(grad_y_autograd_meta, 1); - } - if (grad_grad_out_autograd_meta) { - egr::EagerUtils::SetOutRankWithSlot(grad_grad_out_autograd_meta, 2); - } - if (grad_x_autograd_meta) { - egr::EagerUtils::SetHistory(grad_x_autograd_meta, grad_node); - } - if (grad_y_autograd_meta) { - egr::EagerUtils::SetHistory(grad_y_autograd_meta, grad_node); - } - if (grad_grad_out_autograd_meta) { - egr::EagerUtils::SetHistory(grad_grad_out_autograd_meta, grad_node); - } - grad_node->SetGradInMeta(grad_x, 0); - grad_node->SetGradInMeta(grad_y, 1); - grad_node->SetGradInMeta(grad_grad_out, 2); - // Set TensorWrappers for Forward Outputs if needed - } - } - VLOG(4) << "Finish AD API GRAD: multiply_double_grad"; // LOG IF DEBUG @@ -521,309 +458,6 @@ MultiplyDoubleGradNode::operator()( return returns; } -paddle::small_vector, egr::kSlotSmallVectorSize> -MultiplyTripleGradNode::operator()( - paddle::small_vector, - egr::kSlotSmallVectorSize>& grads, - bool create_graph, - bool is_new_grad) { - VLOG(3) << "Running AD API GRAD: " - << "multiply_triple_grad"; - // Fill Zero For GradIn Tensors - const auto& input_metas = this->InputMeta(); - egr::EagerUtils::FillZeroForEmptyOptionalGradInput(&grads[0][0], - input_metas[0][0]); - egr::EagerUtils::FillZeroForEmptyOptionalGradInput(&grads[1][0], - input_metas[1][0]); - egr::EagerUtils::FillZeroForEmptyOptionalGradInput(&grads[2][0], - input_metas[2][0]); - - // Apply Gradient Hooks - auto hooked_grads = ApplyGradientHooks(grads); - - // Collect GradIn Tensors, Attrs and Recovered TensorWrappers - auto x = egr::EagerUtils::RecoverTensorWrapper(&this->x_); - auto y = egr::EagerUtils::RecoverTensorWrapper(&this->y_); - auto fwd_grad_out = - egr::EagerUtils::RecoverTensorWrapper(&this->fwd_grad_out_); - auto fwd_grad_grad_x = - egr::EagerUtils::RecoverTensorWrapper(&this->fwd_grad_grad_x_); - - paddle::optional fwd_grad_grad_x_optional; - if (fwd_grad_grad_x.impl()) - fwd_grad_grad_x_optional = - paddle::make_optional(fwd_grad_grad_x); - - auto fwd_grad_grad_y = - egr::EagerUtils::RecoverTensorWrapper(&this->fwd_grad_grad_y_); - - paddle::optional fwd_grad_grad_y_optional; - if (fwd_grad_grad_y.impl()) - fwd_grad_grad_y_optional = - paddle::make_optional(fwd_grad_grad_y); - - auto& grad_x_grad = hooked_grads[0][0]; - - paddle::optional grad_x_grad_optional; - if (grad_x_grad.initialized()) - grad_x_grad_optional = paddle::make_optional(grad_x_grad); - - auto& grad_y_grad = hooked_grads[1][0]; - - paddle::optional grad_y_grad_optional; - if (grad_y_grad.initialized()) - grad_y_grad_optional = paddle::make_optional(grad_y_grad); - - auto& grad_grad_out_grad = hooked_grads[2][0]; - - paddle::optional grad_grad_out_grad_optional; - if (grad_grad_out_grad.initialized()) - grad_grad_out_grad_optional = - paddle::make_optional(grad_grad_out_grad); - - auto& axis = this->axis_; - // Prepare Grad function call - - const auto& out_metas = OutputMeta(); - paddle::small_vector, egr::kSlotSmallVectorSize> - returns(5); - for (int i = 0; i < 5; ++i) { - out_metas[i].size() == 0 ? returns[i].resize(1) - : returns[i].resize(out_metas[i].size()); - } - - auto* api_output_0 = - (out_metas[0].empty() || out_metas[0][0].IsStopGradient()) - ? nullptr - : &returns[0][0]; - auto* api_output_1 = - (out_metas[1].empty() || out_metas[1][0].IsStopGradient()) - ? nullptr - : &returns[1][0]; - auto* api_output_2 = - (out_metas[2].empty() || out_metas[2][0].IsStopGradient()) - ? nullptr - : &returns[2][0]; - auto* api_output_3 = - (out_metas[3].empty() || out_metas[3][0].IsStopGradient()) - ? nullptr - : &returns[3][0]; - auto* api_output_4 = - (out_metas[4].empty() || out_metas[4][0].IsStopGradient()) - ? nullptr - : &returns[4][0]; - // Runtime check if we need next grad - bool trace_backward = egr::Controller::Instance().HasGrad() && create_graph; - - // Inplace Check - - // Inplace Strategy - - VLOG(5) << "Running C++ API: " - << "multiply_triple_grad"; - // Before log info - - if (VLOG_IS_ON(3)) { - const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s]} "; - - std::string input_str = ""; - std::string output_str = ""; - const char* TENSOR_GRAD_X_GRAD_TEMPLATE = " \n( grad_x_grad , [%s]), "; - std::string input_grad_x_grad_str = paddle::string::Sprintf( - TENSOR_GRAD_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_x_grad)); - input_str += input_grad_x_grad_str; - const char* TENSOR_GRAD_Y_GRAD_TEMPLATE = " \n( grad_y_grad , [%s]), "; - std::string input_grad_y_grad_str = paddle::string::Sprintf( - TENSOR_GRAD_Y_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_y_grad)); - input_str += input_grad_y_grad_str; - const char* TENSOR_GRAD_GRAD_OUT_GRAD_TEMPLATE = - " \n( grad_grad_out_grad , [%s]), "; - std::string input_grad_grad_out_grad_str = - paddle::string::Sprintf(TENSOR_GRAD_GRAD_OUT_GRAD_TEMPLATE, - egr::EagerUtils::TensorStr(grad_grad_out_grad)); - input_str += input_grad_grad_out_grad_str; - const char* TENSOR_X_TEMPLATE = " \n( x , [%s]), "; - std::string input_x_str = paddle::string::Sprintf( - TENSOR_X_TEMPLATE, egr::EagerUtils::TensorStr(x)); - input_str += input_x_str; - const char* TENSOR_Y_TEMPLATE = " \n( y , [%s]), "; - std::string input_y_str = paddle::string::Sprintf( - TENSOR_Y_TEMPLATE, egr::EagerUtils::TensorStr(y)); - input_str += input_y_str; - const char* TENSOR_FWD_GRAD_OUT_TEMPLATE = " \n( fwd_grad_out , [%s]), "; - std::string input_fwd_grad_out_str = paddle::string::Sprintf( - TENSOR_FWD_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(fwd_grad_out)); - input_str += input_fwd_grad_out_str; - const char* TENSOR_FWD_GRAD_GRAD_X_TEMPLATE = - " \n( fwd_grad_grad_x , [%s]), "; - std::string input_fwd_grad_grad_x_str = - paddle::string::Sprintf(TENSOR_FWD_GRAD_GRAD_X_TEMPLATE, - egr::EagerUtils::TensorStr(fwd_grad_grad_x)); - input_str += input_fwd_grad_grad_x_str; - const char* TENSOR_FWD_GRAD_GRAD_Y_TEMPLATE = - " \n( fwd_grad_grad_y , [%s]), "; - std::string input_fwd_grad_grad_y_str = - paddle::string::Sprintf(TENSOR_FWD_GRAD_GRAD_Y_TEMPLATE, - egr::EagerUtils::TensorStr(fwd_grad_grad_y)); - input_str += input_fwd_grad_grad_y_str; - VLOG(3) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str); - } - - // Call grad_api function - - paddle::experimental::multiply_triple_grad(x, - y, - fwd_grad_out, - fwd_grad_grad_x_optional, - fwd_grad_grad_y_optional, - grad_x_grad_optional, - grad_y_grad_optional, - grad_grad_out_grad_optional, - axis, - api_output_0, - api_output_1, - api_output_2, - api_output_3, - api_output_4); - // Check NaN and Inf id needed - - if (FLAGS_check_nan_inf) { - try { - egr::CheckTensorHasNanOrInf("multiply_triple_grad", returns); - } catch (...) { - LOG(WARNING) << "There are nan/inf in (multiply_triple_grad)"; - auto forward_trace = GetForwardTrace(); - std::cout << forward_trace << std::endl; - std::rethrow_exception(std::current_exception()); - } - } - - // Get GradOut autograd_meta - - auto& x_grad = returns[0][0]; - egr::AutogradMeta* x_grad_autograd_meta = - returns[0][0].initialized() ? egr::EagerUtils::autograd_meta(&x_grad) - : nullptr; - if (x_grad_autograd_meta) x_grad_autograd_meta->SetStopGradient(false); - - auto& y_grad = returns[1][0]; - egr::AutogradMeta* y_grad_autograd_meta = - returns[1][0].initialized() ? egr::EagerUtils::autograd_meta(&y_grad) - : nullptr; - if (y_grad_autograd_meta) y_grad_autograd_meta->SetStopGradient(false); - - auto& fwd_grad_out_grad = returns[2][0]; - egr::AutogradMeta* fwd_grad_out_grad_autograd_meta = - returns[2][0].initialized() - ? egr::EagerUtils::autograd_meta(&fwd_grad_out_grad) - : nullptr; - if (fwd_grad_out_grad_autograd_meta) - fwd_grad_out_grad_autograd_meta->SetStopGradient(false); - - auto& fwd_grad_grad_x_grad = returns[3][0]; - egr::AutogradMeta* fwd_grad_grad_x_grad_autograd_meta = - returns[3][0].initialized() - ? egr::EagerUtils::autograd_meta(&fwd_grad_grad_x_grad) - : nullptr; - if (fwd_grad_grad_x_grad_autograd_meta) - fwd_grad_grad_x_grad_autograd_meta->SetStopGradient(false); - - auto& fwd_grad_grad_y_grad = returns[4][0]; - egr::AutogradMeta* fwd_grad_grad_y_grad_autograd_meta = - returns[4][0].initialized() - ? egr::EagerUtils::autograd_meta(&fwd_grad_grad_y_grad) - : nullptr; - if (fwd_grad_grad_y_grad_autograd_meta) - fwd_grad_grad_y_grad_autograd_meta->SetStopGradient(false); - - // Create Grad Node - if (trace_backward) { - PADDLE_THROW(phi::errors::Unavailable( - "The Op multiply_triple_grad doesn't have any grad" - "op. If you don't intend calculating higher order" - "derivatives, please set `create_graph`to False.")); - } - VLOG(4) << "Finish AD API GRAD: multiply_triple_grad"; - // LOG IF DEBUG - - if (VLOG_IS_ON(4)) { - const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; - - std::string input_str = ""; - std::string output_str = ""; - const char* TENSOR_GRAD_X_GRAD_TEMPLATE = " \n( grad_x_grad , [%s]), "; - std::string input_grad_x_grad_str = paddle::string::Sprintf( - TENSOR_GRAD_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_x_grad)); - input_str += input_grad_x_grad_str; - const char* TENSOR_GRAD_Y_GRAD_TEMPLATE = " \n( grad_y_grad , [%s]), "; - std::string input_grad_y_grad_str = paddle::string::Sprintf( - TENSOR_GRAD_Y_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_y_grad)); - input_str += input_grad_y_grad_str; - const char* TENSOR_GRAD_GRAD_OUT_GRAD_TEMPLATE = - " \n( grad_grad_out_grad , [%s]), "; - std::string input_grad_grad_out_grad_str = - paddle::string::Sprintf(TENSOR_GRAD_GRAD_OUT_GRAD_TEMPLATE, - egr::EagerUtils::TensorStr(grad_grad_out_grad)); - input_str += input_grad_grad_out_grad_str; - const char* TENSOR_X_TEMPLATE = " \n( x , [%s]), "; - std::string input_x_str = paddle::string::Sprintf( - TENSOR_X_TEMPLATE, egr::EagerUtils::TensorStr(x)); - input_str += input_x_str; - const char* TENSOR_Y_TEMPLATE = " \n( y , [%s]), "; - std::string input_y_str = paddle::string::Sprintf( - TENSOR_Y_TEMPLATE, egr::EagerUtils::TensorStr(y)); - input_str += input_y_str; - const char* TENSOR_FWD_GRAD_OUT_TEMPLATE = " \n( fwd_grad_out , [%s]), "; - std::string input_fwd_grad_out_str = paddle::string::Sprintf( - TENSOR_FWD_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(fwd_grad_out)); - input_str += input_fwd_grad_out_str; - const char* TENSOR_FWD_GRAD_GRAD_X_TEMPLATE = - " \n( fwd_grad_grad_x , [%s]), "; - std::string input_fwd_grad_grad_x_str = - paddle::string::Sprintf(TENSOR_FWD_GRAD_GRAD_X_TEMPLATE, - egr::EagerUtils::TensorStr(fwd_grad_grad_x)); - input_str += input_fwd_grad_grad_x_str; - const char* TENSOR_FWD_GRAD_GRAD_Y_TEMPLATE = - " \n( fwd_grad_grad_y , [%s]), "; - std::string input_fwd_grad_grad_y_str = - paddle::string::Sprintf(TENSOR_FWD_GRAD_GRAD_Y_TEMPLATE, - egr::EagerUtils::TensorStr(fwd_grad_grad_y)); - input_str += input_fwd_grad_grad_y_str; - const char* TENSOR_X_GRAD_TEMPLATE = " \n ( x_grad , [%s]), "; - std::string output_x_grad_str = paddle::string::Sprintf( - TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(x_grad)); - output_str += output_x_grad_str; - const char* TENSOR_Y_GRAD_TEMPLATE = " \n ( y_grad , [%s]), "; - std::string output_y_grad_str = paddle::string::Sprintf( - TENSOR_Y_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(y_grad)); - output_str += output_y_grad_str; - const char* TENSOR_FWD_GRAD_OUT_GRAD_TEMPLATE = - " \n ( fwd_grad_out_grad , [%s]), "; - std::string output_fwd_grad_out_grad_str = - paddle::string::Sprintf(TENSOR_FWD_GRAD_OUT_GRAD_TEMPLATE, - egr::EagerUtils::TensorStr(fwd_grad_out_grad)); - output_str += output_fwd_grad_out_grad_str; - const char* TENSOR_FWD_GRAD_GRAD_X_GRAD_TEMPLATE = - " \n ( fwd_grad_grad_x_grad , [%s]), "; - std::string output_fwd_grad_grad_x_grad_str = paddle::string::Sprintf( - TENSOR_FWD_GRAD_GRAD_X_GRAD_TEMPLATE, - egr::EagerUtils::TensorStr(fwd_grad_grad_x_grad)); - output_str += output_fwd_grad_grad_x_grad_str; - const char* TENSOR_FWD_GRAD_GRAD_Y_GRAD_TEMPLATE = - " \n ( fwd_grad_grad_y_grad , [%s]), "; - std::string output_fwd_grad_grad_y_grad_str = paddle::string::Sprintf( - TENSOR_FWD_GRAD_GRAD_Y_GRAD_TEMPLATE, - egr::EagerUtils::TensorStr(fwd_grad_grad_y_grad)); - output_str += output_fwd_grad_grad_y_grad_str; - VLOG(4) << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); - } - - // Return - if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); - return returns; -} - namespace sparse { paddle::small_vector, egr::kSlotSmallVectorSize> MultiplyGradNode::operator()( diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h b/paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h index 6925c3de505..8f63f4fdfeb 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h @@ -312,69 +312,6 @@ class MultiplyDoubleGradNode : public egr::GradNodeBase { int axis_ = -1; }; -class MultiplyTripleGradNode : public egr::GradNodeBase { - public: - MultiplyTripleGradNode() : egr::GradNodeBase() {} - MultiplyTripleGradNode(size_t bwd_in_slot_num, size_t bwd_out_slot_num) - : egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {} - ~MultiplyTripleGradNode() override = default; - - virtual paddle::small_vector, - egr::kSlotSmallVectorSize> - operator()(paddle::small_vector, // NOLINT - egr::kSlotSmallVectorSize>& grads, // NOLINT - bool create_graph = false, - bool is_new_grad = false) override; - std::string name() override { return "MultiplyTripleGradNode"; } - - void ClearTensorWrappers() override { - x_.clear(); - y_.clear(); - fwd_grad_out_.clear(); - fwd_grad_grad_x_.clear(); - fwd_grad_grad_y_.clear(); - - SetIsTensorWrappersCleared(true); - } - - std::shared_ptr Copy() const override { - auto copied_node = std::shared_ptr( - new MultiplyTripleGradNode(*this)); - return copied_node; - } - - // SetTensorWrapperX, SetTensorWrapperY, ... - void SetTensorWrapperx(const paddle::Tensor& x) { - x_ = egr::TensorWrapper(x, false); - } - void SetTensorWrappery(const paddle::Tensor& y) { - y_ = egr::TensorWrapper(y, false); - } - void SetTensorWrapperfwd_grad_out(const paddle::Tensor& fwd_grad_out) { - fwd_grad_out_ = egr::TensorWrapper(fwd_grad_out, false); - } - void SetTensorWrapperfwd_grad_grad_x(const paddle::Tensor& fwd_grad_grad_x) { - fwd_grad_grad_x_ = egr::TensorWrapper(fwd_grad_grad_x, false); - } - void SetTensorWrapperfwd_grad_grad_y(const paddle::Tensor& fwd_grad_grad_y) { - fwd_grad_grad_y_ = egr::TensorWrapper(fwd_grad_grad_y, false); - } - - // SetAttributes - void SetAttributeaxis(const int& axis) { axis_ = axis; } - - private: - // TensorWrappers - egr::TensorWrapper x_; - egr::TensorWrapper y_; - egr::TensorWrapper fwd_grad_out_; - egr::TensorWrapper fwd_grad_grad_x_; - egr::TensorWrapper fwd_grad_grad_y_; - - // Attributes - int axis_ = -1; -}; - class SyncBatchNormGradNode : public egr::GradNodeBase { public: SyncBatchNormGradNode() : egr::GradNodeBase() {} diff --git a/test/prim/prim/vjp/CMakeLists.txt b/test/prim/prim/vjp/CMakeLists.txt index c7cae170629..84084b0b4d9 100644 --- a/test/prim/prim/vjp/CMakeLists.txt +++ b/test/prim/prim/vjp/CMakeLists.txt @@ -8,7 +8,7 @@ foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS}) endforeach() -set_tests_properties(test_comp_high_grad PROPERTIES TIMEOUT 50) +set_tests_properties(test_comp_high_grad PROPERTIES TIMEOUT 100) add_subdirectory(eager) add_subdirectory(static) diff --git a/test/prim/prim/vjp/test_comp_high_grad.py b/test/prim/prim/vjp/test_comp_high_grad.py index e754454ae1b..c080d4d7231 100644 --- a/test/prim/prim/vjp/test_comp_high_grad.py +++ b/test/prim/prim/vjp/test_comp_high_grad.py @@ -226,7 +226,6 @@ class TestSubtractHighGradCheck(unittest.TestCase): self.func_triple(p) -''' @param.parameterized_class( ('shape1', 'shape2'), [ @@ -330,8 +329,6 @@ class TestMultiplyHighGradCheck(unittest.TestCase): self.func_double(p) self.func_triple(p) -''' - @param.parameterized_class( ('shape1'), -- GitLab