提交 ad09faca 编写于 作者: T Tomasz Patejko 提交者: Michal Gallus

MKLDNN elementwise_mul: CPU tests initially refactored. MKLDNN mul test for broadcast added

上级 2d73ad18
...@@ -85,8 +85,6 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel<T> { ...@@ -85,8 +85,6 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel<T> {
mul_func_t mul_func = (mul_func_t)mul.getCode(); mul_func_t mul_func = (mul_func_t)mul.getCode();
auto ptr_x = x_data;
for (int ni = 0; ni < n; ni++) { for (int ni = 0; ni < n; ni++) {
for (int ci = 0; ci < C; ci++) { for (int ci = 0; ci < C; ci++) {
for (int hi = 0; hi < h; hi++) { for (int hi = 0; hi < h; hi++) {
......
...@@ -43,19 +43,13 @@ class TestElementwiseAddOp(OpTest): ...@@ -43,19 +43,13 @@ class TestElementwiseAddOp(OpTest):
self.check_output() self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
if self.dtype == np.float16:
return
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005) self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
if self.dtype == np.float16:
return
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
if self.dtype == np.float16:
return
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from test_elementwise_mul_op import *
class ElementwiseMulMKLDNNOp(ElementwiseMulOp):
def init_input_output(self):
x = np.random.rand(1, 16, 2, 2).astype(self.dtype)
self.x = x.transpose(0, 2, 3, 1).reshape(1, 16, 2, 2)
self.y = np.random.rand(1, 16).astype(self.dtype)
self.out = x * self.y.reshape(1, 16, 1, 1)
self.out = self.out.transpose(0, 2, 3, 1).reshape(1, 16, 2, 2)
def init_kernel_type(self):
self.use_mkldnn = True
def init_axis(self):
self.axis = 0
def test_check_grad_normal(self):
pass
def test_check_grad_ingore_x(self):
pass
def test_check_grad_ingore_y(self):
pass
if __name__ == '__main__':
unittest.main()
...@@ -21,13 +21,24 @@ from paddle.fluid.op import Operator ...@@ -21,13 +21,24 @@ from paddle.fluid.op import Operator
class ElementwiseMulOp(OpTest): class ElementwiseMulOp(OpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.dtype = np.float32
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [13, 17]).astype("float64"), 'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float64") 'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
} }
self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -41,6 +52,17 @@ class ElementwiseMulOp(OpTest): ...@@ -41,6 +52,17 @@ class ElementwiseMulOp(OpTest):
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y')) self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
def init_dtype(self):
pass
def init_axis(self):
pass
class TestElementwiseMulOp_scalar(ElementwiseMulOp): class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def setUp(self): def setUp(self):
...@@ -63,17 +85,13 @@ class TestElementwiseMulOp_Vector(ElementwiseMulOp): ...@@ -63,17 +85,13 @@ class TestElementwiseMulOp_Vector(ElementwiseMulOp):
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_mul" self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.inputs = { self.y = np.random.rand(2).astype(self.dtype)
'X': np.random.rand(2, 3, 4).astype(np.float64), self.out = self.x * self.y.reshape(2, 1, 1)
'Y': np.random.rand(2).astype(np.float64)
}
self.attrs = {'axis': 0} def init_axis(self):
self.outputs = { self.axis = 0
'Out': self.inputs['X'] * self.inputs['Y'].reshape(2, 1, 1)
}
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册