未验证 提交 84b72c5f 编写于 作者: Y ykkk2333 提交者: GitHub

add xpu pnorm op and fix pool op, *test=kunlun (#44214)

上级 270ba570
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/reduce_ops/reduce_op_xpu.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
namespace paddle {
namespace operators {
inline void GetDims(
const phi::DDim& dim, int axis, int* m, int* t, int* n, bool asvector) {
*m = 1;
*n = 1;
*t = dim[axis];
if (asvector) {
*t = product(dim);
} else {
for (int i = 0; i < axis; ++i) {
(*m) *= dim[i];
}
for (int i = axis + 1; i < dim.size(); ++i) {
(*n) *= dim[i];
}
}
}
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T>
class P_NormXPUKernel : public framework::OpKernel<T> {
using XPUType = typename XPUTypeTrait<T>::Type;
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X");
auto* out = ctx.Output<framework::Tensor>("Out");
out->mutable_data<T>(ctx.GetPlace());
float porder = ctx.Attr<float>("porder");
int axis = ctx.Attr<int>("axis");
bool asvector = ctx.Attr<bool>("asvector");
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto xdim = in->dims();
if (axis < 0) axis = xdim.size() + axis;
std::vector<int> r_dim;
std::vector<int> x_dim;
std::vector<int> y_dim;
int m = 1;
int n = 1;
int t = 1;
GetDims(xdim, axis, &m, &t, &n, asvector);
x_dim.push_back(m);
x_dim.push_back(t);
x_dim.push_back(n);
r_dim.push_back(1);
y_dim.push_back(m);
y_dim.push_back(n);
int r = 0;
xpu::ctx_guard RAII_GUARD(dev_ctx.x_context());
XPUType* tmp_x = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(tmp_x);
r = xpu::abs(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(in->data<T>()),
tmp_x,
m * t * n);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "abs");
if (porder == INFINITY) {
r = xpu::reduce_max(dev_ctx.x_context(),
tmp_x,
reinterpret_cast<XPUType*>(out->data<T>()),
x_dim,
r_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "reduce_max");
} else if (porder == -INFINITY) {
r = xpu::reduce_min(dev_ctx.x_context(),
tmp_x,
reinterpret_cast<XPUType*>(out->data<T>()),
x_dim,
r_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "reduce_min");
} else if (porder == 0) {
XPUType* zeros = RAII_GUARD.alloc_l3_or_gm<XPUType>(1);
PADDLE_ENFORCE_XDNN_NOT_NULL(zeros);
r = xpu::constant(dev_ctx.x_context(), zeros, 1, 0.0f);
std::vector<int> zeros_dim(1, 1);
bool* tmp2_x = RAII_GUARD.alloc_l3_or_gm<bool>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(tmp2_x);
r = xpu::broadcast_not_equal(
dev_ctx.x_context(), tmp_x, zeros, tmp2_x, x_dim, zeros_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_not_equal");
XPUType* x_mid = tmp_x;
r = xpu::cast<bool, XPUType>(
dev_ctx.x_context(), tmp2_x, x_mid, m * t * n);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "cast");
r = xpu::reduce_sum(dev_ctx.x_context(),
x_mid,
reinterpret_cast<XPUType*>(out->data<T>()),
x_dim,
r_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "reduce_sum");
} else {
Tensor porder_tensor;
framework::DDim pdim = phi::make_ddim({1});
porder_tensor.mutable_data<float>(pdim, in->place());
r = xpu::constant(
dev_ctx.x_context(), porder_tensor.data<float>(), 1, porder);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant");
std::vector<int> p_dim(1, 1);
XPUType* tmp2_x = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(tmp2_x);
r = xpu::broadcast_pow(
dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(tmp_x),
reinterpret_cast<const XPUType*>(porder_tensor.data<float>()),
tmp2_x,
x_dim,
p_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_pow");
XPUType* tmp_y = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(tmp_y);
r = xpu::reduce_sum(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(tmp2_x),
tmp_y,
x_dim,
r_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "reduce_sum");
r = xpu::constant(
dev_ctx.x_context(), porder_tensor.data<float>(), 1, 1.0f / porder);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant");
r = xpu::broadcast_pow(
dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(tmp_y),
reinterpret_cast<const XPUType*>(porder_tensor.data<float>()),
reinterpret_cast<XPUType*>(out->data<T>()),
y_dim,
p_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_pow");
dev_ctx.Wait();
}
}
};
template <typename DeviceContext, typename T>
class P_NormGradXPUKernel : public framework::OpKernel<T> {
using XPUType = typename XPUTypeTrait<T>::Type;
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* y = ctx.Input<Tensor>("Out");
auto* dy = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
auto xdim = x->dims();
float porder = ctx.Attr<float>("porder");
bool asvector = ctx.Attr<bool>("asvector");
int axis = ctx.Attr<int>("axis");
axis = axis < 0 ? xdim.size() + axis : axis;
auto& dev_ctx = ctx.template device_context<DeviceContext>();
int m, t, n;
GetDims(xdim, axis, &m, &t, &n, asvector);
std::vector<int> r_dim;
std::vector<int> x_dim;
std::vector<int> y_dim;
x_dim.push_back(m);
x_dim.push_back(t);
x_dim.push_back(n);
y_dim.push_back(m);
y_dim.push_back(1);
y_dim.push_back(n);
int r = 0;
if (porder == 0) {
r = xpu::constant(dev_ctx.x_context(),
reinterpret_cast<XPUType*>(dx->data<T>()),
m * t * n,
static_cast<T>(0));
PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant");
} else if (porder == INFINITY || porder == -INFINITY) {
xpu::ctx_guard RAII_GUARD(dev_ctx.x_context());
XPUType* x_abs = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(x_abs);
r = xpu::abs(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x->data<T>()),
x_abs,
m * t * n);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "abs");
bool* dx_t = RAII_GUARD.alloc_l3_or_gm<bool>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(dx_t);
XPUType* dx_mid = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(dx_mid);
r = xpu::broadcast_equal<XPUType>(
dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x_abs),
reinterpret_cast<const XPUType*>(y->data<T>()),
dx_t,
x_dim,
y_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_equal");
r = xpu::cast<bool, XPUType>(
dev_ctx.x_context(), dx_t, dx_mid, m * t * n);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "cast");
XPUType* x_sign = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(x_sign);
r = xpu::sign(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x->data<T>()),
x_sign,
m * t * n);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "sign");
XPUType* dx_pre_dy = x_abs;
r = xpu::mul(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(dx_mid),
reinterpret_cast<const XPUType*>(x_sign),
dx_pre_dy,
m * t * n);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "mul");
r = xpu::broadcast_mul(dev_ctx.x_context(),
dx_pre_dy,
reinterpret_cast<const XPUType*>(dy->data<T>()),
reinterpret_cast<XPUType*>(dx->data<T>()),
x_dim,
y_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_mul");
} else {
xpu::ctx_guard RAII_GUARD(dev_ctx.x_context());
XPUType* x_abs = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(x_abs);
r = xpu::abs(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x->data<T>()),
x_abs,
m * t * n);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "abs");
Tensor porder_tensor;
framework::DDim pdim = phi::make_ddim({1});
porder_tensor.mutable_data<float>(pdim, x->place());
r = xpu::constant(
dev_ctx.x_context(), porder_tensor.data<float>(), 1, porder - 1.0f);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant");
std::vector<int> p_dim(1, 1);
XPUType* x_pow = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(x_pow);
r = xpu::broadcast_pow(
dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x_abs),
reinterpret_cast<const XPUType*>(porder_tensor.data<float>()),
x_pow,
x_dim,
p_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_pow");
XPUType* y_pow = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(y_pow);
r = xpu::broadcast_pow(
dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(y->data<T>()),
reinterpret_cast<const XPUType*>(porder_tensor.data<float>()),
y_pow,
y_dim,
p_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_pow");
dev_ctx.Wait();
XPUType* dx_t = x_abs;
r = xpu::broadcast_div(
dev_ctx.x_context(), x_pow, y_pow, dx_t, x_dim, y_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_div");
XPUType* x_sign = x_pow;
r = xpu::sign(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x->data<T>()),
x_sign,
m * t * n);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "sign");
XPUType* dx_mid = RAII_GUARD.alloc_l3_or_gm<XPUType>(m * t * n);
PADDLE_ENFORCE_XDNN_NOT_NULL(dx_mid);
r = xpu::broadcast_mul(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x_sign),
reinterpret_cast<const XPUType*>(dy->data<T>()),
dx_mid,
x_dim,
y_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_mul");
r = xpu::broadcast_mul(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(dx_t),
reinterpret_cast<const XPUType*>(dx_mid),
reinterpret_cast<XPUType*>(dx->data<T>()),
x_dim,
x_dim);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "broadcast_mul");
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(
p_norm, ops::P_NormXPUKernel<paddle::platform::XPUDeviceContext, float>);
REGISTER_OP_XPU_KERNEL(
p_norm_grad,
ops::P_NormGradXPUKernel<paddle::platform::XPUDeviceContext, float>);
#endif
...@@ -13,6 +13,7 @@ limitations under the License. */ ...@@ -13,6 +13,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
namespace paddle { namespace paddle {
...@@ -51,6 +52,9 @@ class PoolXPUKernel : public framework::OpKernel<T> { ...@@ -51,6 +52,9 @@ class PoolXPUKernel : public framework::OpKernel<T> {
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
bool exclusive = context.Attr<bool>("exclusive"); bool exclusive = context.Attr<bool>("exclusive");
bool adaptive = context.Attr<bool>("adaptive"); bool adaptive = context.Attr<bool>("adaptive");
bool ceil_mode = context.Attr<bool>("ceil_mode");
std::string padding_algorithm =
context.Attr<std::string>("padding_algorithm");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ksize.size(), ksize.size(),
2, 2,
...@@ -70,10 +74,27 @@ class PoolXPUKernel : public framework::OpKernel<T> { ...@@ -70,10 +74,27 @@ class PoolXPUKernel : public framework::OpKernel<T> {
ksize[i] = static_cast<int>(in_x->dims()[i + 2]); ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
} }
} }
const int n = in_x->dims()[0]; const int n = in_x->dims()[0];
const int c = in_x->dims()[1]; const int c = in_x->dims()[1];
const int in_h = in_x->dims()[2]; const int in_h = in_x->dims()[2];
const int in_w = in_x->dims()[3]; const int in_w = in_x->dims()[3];
framework::DDim data_dims;
data_dims = phi::slice_ddim(in_x->dims(), 2, in_x->dims().size());
phi::funcs::UpdatePadding(&paddings,
global_pooling,
adaptive,
padding_algorithm,
data_dims,
strides,
ksize);
if (ceil_mode) {
paddings[1] += (strides[0] - 1);
paddings[3] += (strides[1] - 1);
}
auto input = reinterpret_cast<const XPUType*>(in_x->data<T>()); auto input = reinterpret_cast<const XPUType*>(in_x->data<T>());
out->mutable_data<T>(context.GetPlace()); out->mutable_data<T>(context.GetPlace());
auto output = reinterpret_cast<XPUType*>(out->data<T>()); auto output = reinterpret_cast<XPUType*>(out->data<T>());
...@@ -135,6 +156,9 @@ class PoolGradXPUKernel : public framework::OpKernel<T> { ...@@ -135,6 +156,9 @@ class PoolGradXPUKernel : public framework::OpKernel<T> {
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
bool exclusive = context.Attr<bool>("exclusive"); bool exclusive = context.Attr<bool>("exclusive");
bool adaptive = context.Attr<bool>("adaptive"); bool adaptive = context.Attr<bool>("adaptive");
bool ceil_mode = context.Attr<bool>("ceil_mode");
std::string padding_algorithm =
context.Attr<std::string>("padding_algorithm");
const int* index_data = nullptr; const int* index_data = nullptr;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ksize.size(), ksize.size(),
...@@ -163,6 +187,22 @@ class PoolGradXPUKernel : public framework::OpKernel<T> { ...@@ -163,6 +187,22 @@ class PoolGradXPUKernel : public framework::OpKernel<T> {
const int c = in_x->dims()[1]; const int c = in_x->dims()[1];
const int in_h = in_x->dims()[2]; const int in_h = in_x->dims()[2];
const int in_w = in_x->dims()[3]; const int in_w = in_x->dims()[3];
framework::DDim data_dims;
data_dims = phi::slice_ddim(in_x->dims(), 2, in_x->dims().size());
phi::funcs::UpdatePadding(&paddings,
global_pooling,
adaptive,
padding_algorithm,
data_dims,
strides,
ksize);
if (ceil_mode) {
paddings[1] += (strides[0] - 1);
paddings[3] += (strides[1] - 1);
}
auto input = reinterpret_cast<const XPUType*>(in_x->data<T>()); auto input = reinterpret_cast<const XPUType*>(in_x->data<T>());
auto output = reinterpret_cast<const XPUType*>(out->data<T>()); auto output = reinterpret_cast<const XPUType*>(out->data<T>());
auto output_grad = reinterpret_cast<const XPUType*>(out_grad->data<T>()); auto output_grad = reinterpret_cast<const XPUType*>(out_grad->data<T>());
......
...@@ -323,6 +323,8 @@ XPUOpMap& get_kl2_ops() { ...@@ -323,6 +323,8 @@ XPUOpMap& get_kl2_ops() {
{"one_hot_v2", {"one_hot_v2",
XPUKernelSet({pOpKernelType(vartype::INT32, XPUPlace()), XPUKernelSet({pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace())})}, pOpKernelType(vartype::INT64, XPUPlace())})},
{"p_norm", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"p_norm_grad", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"pool2d_grad", {"pool2d_grad",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::FP16, XPUPlace())})}, pOpKernelType(vartype::FP16, XPUPlace())})},
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
import sys
import unittest
from functools import reduce
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
from operator import mul
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
def ref_p_norm(x, axis, porder, keepdims=False, reduce_all=False):
r = []
if axis is None or reduce_all:
x = x.flatten()
if porder == np.inf:
r = np.amax(np.abs(x), keepdims=keepdims)
elif porder == -np.inf:
r = np.amin(np.abs(x), keepdims=keepdims)
else:
r = np.linalg.norm(x, ord=porder, keepdims=keepdims)
elif isinstance(axis, list or tuple) and len(axis) == 2:
if porder == np.inf:
axis = tuple(axis)
r = np.amax(np.abs(x), axis=axis, keepdims=keepdims)
elif porder == -np.inf:
axis = tuple(axis)
r = np.amin(np.abs(x), axis=axis, keepdims=keepdims)
elif porder == 0:
axis = tuple(axis)
r = x.astype(bool)
r = np.sum(r, axis, keepdims=keepdims)
elif porder == 1:
axis = tuple(axis)
r = np.sum(np.abs(x), axis, keepdims=keepdims)
else:
axis = tuple(axis)
xp = np.power(np.abs(x), porder)
s = np.sum(xp, axis=axis, keepdims=keepdims)
r = np.power(s, 1.0 / porder)
else:
if isinstance(axis, list):
axis = tuple(axis)
r = np.linalg.norm(x, ord=porder, axis=axis, keepdims=keepdims)
r = r.astype(x.dtype)
return r
class XPUTestPNormOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'p_norm'
self.use_dynamic_create_class = False
class TestXPUPNormOp(XPUOpTest):
def setUp(self):
self.op_type = "p_norm"
self.dtype = self.in_type
self.shape = [2, 3, 4, 5]
self.epsilon = 1e-12
self.axis = 1
self.porder = 2.0
self.asvector = False
self.keepdims = False
self.set_attrs()
np.random.seed(12345)
x_np = np.random.uniform(-10, 10, self.shape).astype(self.dtype)
ref_y_np = ref_p_norm(x_np, self.axis, self.porder, self.keepdims,
self.asvector)
self.inputs = {'X': x_np}
self.outputs = {'Out': ref_y_np}
self.attrs = {
'epsilon': self.epsilon,
'axis': self.axis,
'porder': float(self.porder),
'asvector': self.asvector
}
def set_attrs(self):
pass
def test_check_output(self):
self.check_output_with_place(paddle.XPUPlace(0), atol=1e-4)
def test_check_grad(self):
self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Out')
class TestPnormOp2(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3]
self.axis = 2
self.porder = 2.0
class TestPnormOp3(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3]
self.axis = 2
self.porder = np.inf
class TestPnormOp4(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3]
self.axis = 2
self.porder = -np.inf
class TestPnormOp5(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3]
self.axis = 2
self.porder = 0
class TestPnormOp6(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3]
self.axis = -1
self.porder = 2
class TestPnormOp7(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3, 10]
self.axis = 2
self.porder = 2.0
class TestPnormOp8(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3]
self.axis = 2
self.porder = np.inf
class TestPnormOp9(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3, 10]
self.axis = 1
self.porder = -np.inf
class TestPnormOp10(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3, 10]
self.axis = 2
self.porder = 0
class TestPnormOp11(TestXPUPNormOp):
def set_attrs(self):
self.shape = [3, 20, 3, 10]
self.axis = -1
self.porder = 2
support_types = get_xpu_op_support_types('p_norm')
for stype in support_types:
create_test_class(globals(), XPUTestPNormOp, stype)
if __name__ == "__main__":
unittest.main()
...@@ -297,6 +297,7 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): ...@@ -297,6 +297,7 @@ class XPUTestPool2D_Op(XPUOpTestWrapper):
'exclusive': self.exclusive, 'exclusive': self.exclusive,
'adaptive': self.adaptive, 'adaptive': self.adaptive,
"padding_algorithm": self.padding_algorithm, "padding_algorithm": self.padding_algorithm,
'ceil_mode': self.ceil_mode
} }
self.outputs = {'Out': output} self.outputs = {'Out': output}
...@@ -469,6 +470,77 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): ...@@ -469,6 +470,77 @@ class XPUTestPool2D_Op(XPUOpTestWrapper):
def init_shape(self): def init_shape(self):
self.shape = [2, 3, 7, 7] self.shape = [2, 3, 7, 7]
class TestCaseCeil1(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [0, 0]
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
def init_shape(self):
self.shape = [2, 3, 7, 7]
def init_ceil_mode(self):
self.ceil_mode = True
class TestCaseCeil2(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [1, 1]
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
def init_shape(self):
self.shape = [2, 3, 7, 7]
def init_ceil_mode(self):
self.ceil_mode = True
class TestCaseCeil3(TestPool2D_Op):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
def init_ceil_mode(self):
self.ceil_mode = True
class TestCaseCeil4(TestCaseCeil1):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
def init_ceil_mode(self):
self.ceil_mode = True
class TestCaseCeil5(TestCaseCeil2):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
def init_ceil_mode(self):
self.ceil_mode = True
support_types = get_xpu_op_support_types('pool2d') support_types = get_xpu_op_support_types('pool2d')
for stype in support_types: for stype in support_types:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册