未验证 提交 81dec05a 编写于 作者: J jakpiase 提交者: GitHub

Fix for failing CI(test_activation_mkldnn_op.py) (#34329)

* fixed CI failing

* removed unnecessary imports
上级 577fdde5
develop Ligoml-patch-1 ZHUI-patch-1 add_some_yaml_config addfile ascendrelease cherry_undefined_var delete_delete_addfile delete_disable_iterable_dataset_unittest delete_fix_retry_ci delete_fix_undefined_var delete_paralleltest delete_prv-disable-more-cache delete_revert-34910-spinlocks_for_allocator delete_revert-35069-revert-34910-spinlocks_for_allocator delete_revert-36057-dev/read_flags_in_ut dingjiaweiww-patch-1 disable_iterable_dataset_unittest dy2static enable_eager_model_test final_state_gen_python_c final_state_intermediate fix-numpy-issue fix_concat_slice fix_npu_ci fix_op_flops fix_retry_ci fix_rnn_docs fix_tensor_type fix_undefined_var fixiscan fixiscan1 fixiscan2 fixiscan3 incubate/infrt inplace_addto make_flag_adding_easier move_embedding_to_phi move_histogram_to_pten move_sgd_to_phi move_slice_to_pten move_temporal_shift_to_phi move_yolo_box_to_phi npu_fix_alloc paralleltest preln_ernie prv-disable-more-cache prv-md-even-more prv-onednn-2.5 pten_tensor_refactor release/2.2 release/2.3 release/2.3-fc-ernie-fix release/2.4 revert-34406-add_copy_from_tensor revert-34910-spinlocks_for_allocator revert-35069-revert-34910-spinlocks_for_allocator revert-36057-dev/read_flags_in_ut revert-36201-refine_fast_threaded_ssa_graph_executor revert-36985-add_license revert-37318-refactor_dygraph_to_eager revert-37926-eager_coreops_500 revert-37956-revert-37727-pylayer_support_tuple revert-38100-mingdong revert-38301-allocation_rearrange_pr revert-38703-numpy_bf16_package_reupload revert-38732-remove_useless_header_in_elementwise_mul_grad revert-38959-Reduce_Grad revert-39143-adjust_empty revert-39227-move_trace_op_to_pten revert-39268-dev/remove_concat_fluid_kernel revert-40170-support_partial_grad revert-41056-revert-40727-move_some_activaion_to_phi revert-41065-revert-40993-mv_ele_floordiv_pow revert-41068-revert-40790-phi_new revert-41944-smaller_inference_api_test revert-42149-do-not-reset-default-stream-for-stream-safe-cuda-allocator revert-43155-fix_ut_tempfile revert-43882-revert-41944-smaller_inference_api_test revert-45808-phi/simplify_size_op revert-46827-deform_comment support_weight_transpose zhiqiu-patch-1 v2.4.0-rc0 v2.3.2 v2.3.1 v2.3.0 v2.3.0-rc0 v2.2.2 v2.2.1 v2.2.0 v2.2.0-rc0 v2.2.0-bak0
无相关合并请求
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from scipy.special import expit, erf
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_activation_op import TestActivation
from paddle.fluid.tests.unittests.test_gelu_op import gelu
@OpTestTool.skip_if_not_cpu_bf16()
class TestMKLDNNSigmoidBF16Op(TestActivation):
def config(self):
self.op_type = "sigmoid"
def op_forward(self, x):
return 1 / (1 + np.exp(-x))
def op_grad(self, dout, x):
return dout * self.op_forward(x) * (1 - self.op_forward(x))
def set_attrs(self):
self.attrs = {"use_mkldnn": True}
def init_data(self):
self.x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)
def setUp(self):
self.dtype = np.uint16
self.init_data()
self.config()
self.out = self.op_forward(self.x)
self.inputs = {'X': convert_float_to_uint16(self.x)}
self.outputs = {'Out': self.out}
self.set_attrs()
def calculate_grads(self):
self.dx = self.op_grad(self.out, self.x)
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
def test_check_grad(self):
self.calculate_grads()
self.check_grad_with_place(
core.CPUPlace(), ["X"],
"Out",
user_defined_grads=[self.dx],
user_defined_grad_outputs=[convert_float_to_uint16(self.out)])
class TestMKLDNNGeluErfBF16Op(TestMKLDNNSigmoidBF16Op):
def config(self):
self.op_type = "gelu"
def op_forward(self, x):
return gelu(x, False)
def op_grad(self, dout, x):
return (dout *
(0.5 + 0.5 * erf(x / np.sqrt(2)) +
(x / np.sqrt(2 * np.pi) * np.exp(-0.5 * np.power(x, 2)))))
class TestMKLDNNGeluErfDim2BF16Op(TestMKLDNNGeluErfBF16Op):
def init_data(self):
self.x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)
class TestMKLDNNGeluTanhBF16Op(TestMKLDNNSigmoidBF16Op):
def config(self):
self.op_type = "gelu"
def op_forward(self, x):
return gelu(x, True)
def op_grad(self, dout, x):
grad_part = np.tanh(
np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
return dout * 0.5 * (1 + grad_part) * (1 + np.sqrt(2 / np.pi) *
(x + 0.134145 * np.power(x, 3)) *
(1 - grad_part))
def set_attrs(self):
self.attrs = {"use_mkldnn": True, "approximate": True}
class TestMKLDNNGeluTanhDim2BF16Op(TestMKLDNNGeluTanhBF16Op):
def init_data(self):
self.x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)
......@@ -79,90 +79,6 @@ class TestMKLDNNGeluDim2Approx(TestActivation):
self.attrs = {"use_mkldnn": True, "approximate": True}
#Use it as a base class for BF16 activation tests, just override necessary functions
class TestMKLDNNSigmoidBF16Op(TestActivation):
@OpTestTool.skip_if_not_cpu_bf16()
def config(self):
self.op_type = "sigmoid"
def op_forward(self, x):
return 1 / (1 + np.exp(-x))
def op_grad(self, dout, x):
return dout * self.op_forward(x) * (1 - self.op_forward(x))
def set_attrs(self):
self.attrs = {"use_mkldnn": True}
def init_data(self):
self.x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)
def setUp(self):
self.dtype = np.uint16
self.init_data()
self.config()
self.out = self.op_forward(self.x)
self.inputs = {'X': convert_float_to_uint16(self.x)}
self.outputs = {'Out': self.out}
self.set_attrs()
def calculate_grads(self):
self.dx = self.op_grad(self.out, self.x)
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
def test_check_grad(self):
self.calculate_grads()
self.check_grad_with_place(
core.CPUPlace(), ["X"],
"Out",
user_defined_grads=[self.dx],
user_defined_grad_outputs=[convert_float_to_uint16(self.out)])
class TestMKLDNNGeluErfBF16Op(TestMKLDNNSigmoidBF16Op):
def config(self):
self.op_type = "gelu"
def op_forward(self, x):
return gelu(x, False)
def op_grad(self, dout, x):
return (dout *
(0.5 + 0.5 * erf(x / np.sqrt(2)) +
(x / np.sqrt(2 * np.pi) * np.exp(-0.5 * np.power(x, 2)))))
class TestMKLDNNGeluErfDim2BF16Op(TestMKLDNNGeluErfBF16Op):
def init_data(self):
self.x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)
class TestMKLDNNGeluTanhBF16Op(TestMKLDNNSigmoidBF16Op):
def config(self):
self.op_type = "gelu"
def op_forward(self, x):
return gelu(x, True)
def op_grad(self, dout, x):
grad_part = np.tanh(
np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
return dout * 0.5 * (1 + grad_part) * (1 + np.sqrt(2 / np.pi) *
(x + 0.134145 * np.power(x, 3)) *
(1 - grad_part))
def set_attrs(self):
self.attrs = {"use_mkldnn": True, "approximate": True}
class TestMKLDNNGeluTanhDim2BF16Op(TestMKLDNNGeluTanhBF16Op):
def init_data(self):
self.x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)
class TestMKLDNNTanhDim2(TestTanh):
def setUp(self):
super(TestMKLDNNTanhDim2, self).setUp()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部