未验证 提交 49461a02 编写于 作者: Z zhangyuqin1998 提交者: GitHub

move elementwise_raw_kernel to new dir (#51965)

* move elementwise raw

* fix

* fix
上级 2bd0a946
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/complex.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/elementwise.h"
#include "paddle/phi/kernels/impl/elementwise_kernel_impl.h"
namespace phi {
template <typename T, typename Context>
void MaximumRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
funcs::ElementwiseCompute<funcs::MaximumFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::MaximumFunctor<T>(), out);
}
template <typename T, typename Context>
void MinimumRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
funcs::ElementwiseCompute<funcs::MinimumFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::MinimumFunctor<T>(), out);
}
template <typename T, typename Context>
void RemainderRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
auto x_dims = x.dims();
auto y_dims = y.dims();
if (x_dims.size() >= y_dims.size()) {
funcs::ElementwiseCompute<funcs::RemainderFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::RemainderFunctor<T>(), out);
} else {
funcs::ElementwiseCompute<funcs::InverseRemainderFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::InverseRemainderFunctor<T>(), out);
}
}
template <typename T, typename Context>
void FloorDivideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
auto x_dims = x.dims();
auto y_dims = y.dims();
if (x_dims.size() >= y_dims.size()) {
funcs::ElementwiseCompute<funcs::FloorDivideFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::FloorDivideFunctor<T>(), out);
} else {
funcs::ElementwiseCompute<funcs::InverseFloorDivideFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::InverseFloorDivideFunctor<T>(), out);
}
}
template <typename T, typename Context>
void ElementwisePowRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
auto x_dims = x.dims();
auto y_dims = y.dims();
if (x_dims.size() >= y_dims.size()) {
funcs::ElementwiseCompute<funcs::ElementwisePowFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::ElementwisePowFunctor<T>(), out);
} else {
funcs::ElementwiseCompute<funcs::ElementwiseInversePowFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::ElementwiseInversePowFunctor<T>(), out);
}
}
} // namespace phi
PD_REGISTER_KERNEL(maximum_raw,
CPU,
ALL_LAYOUT,
phi::MaximumRawKernel,
float,
double,
int,
int64_t,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(minimum_raw,
CPU,
ALL_LAYOUT,
phi::MinimumRawKernel,
float,
double,
int,
int64_t,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(remainder_raw,
CPU,
ALL_LAYOUT,
phi::RemainderRawKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(floor_divide_raw,
CPU,
ALL_LAYOUT,
phi::FloorDivideRawKernel,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow_raw,
CPU,
ALL_LAYOUT,
phi::ElementwisePowRawKernel,
float,
double,
int,
int64_t) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#ifndef PADDLE_WITH_XPU_KP
#include "paddle/phi/common/complex.h"
#include "paddle/phi/common/float16.h"
#endif
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/elementwise_kernel_impl.h"
namespace phi {
template <typename T, typename Context>
void MaximumRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
std::vector<const DenseTensor*> inputs;
inputs.reserve(2);
std::vector<DenseTensor*> outputs;
outputs.reserve(1);
inputs.emplace_back(&x);
inputs.emplace_back(&y);
outputs.emplace_back(out);
dev_ctx.template Alloc<T>(out);
funcs::BroadcastKernel<ElementwiseType::kBinary, T, T>(
dev_ctx, inputs, &outputs, axis, funcs::MaximumFunctor<T>());
}
template <typename T, typename Context>
void MinimumRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
std::vector<const DenseTensor*> inputs;
inputs.reserve(2);
std::vector<DenseTensor*> outputs;
outputs.reserve(1);
inputs.emplace_back(&x);
inputs.emplace_back(&y);
outputs.emplace_back(out);
dev_ctx.template Alloc<T>(out);
funcs::BroadcastKernel<ElementwiseType::kBinary, T, T>(
dev_ctx, inputs, &outputs, axis, funcs::MinimumFunctor<T>());
}
template <typename T, typename Context>
void RemainderRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
std::vector<const DenseTensor*> inputs;
inputs.reserve(2);
std::vector<DenseTensor*> outputs;
outputs.reserve(1);
inputs.emplace_back(&x);
inputs.emplace_back(&y);
outputs.emplace_back(out);
dev_ctx.template Alloc<T>(out);
funcs::BroadcastKernel<ElementwiseType::kBinary, T, T>(
dev_ctx, inputs, &outputs, axis, funcs::RemainderFunctor<T>());
}
template <typename T, typename Context>
void FloorDivideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
std::vector<const DenseTensor*> inputs;
inputs.reserve(2);
std::vector<DenseTensor*> outputs;
outputs.reserve(1);
inputs.emplace_back(&x);
inputs.emplace_back(&y);
outputs.emplace_back(out);
dev_ctx.template Alloc<T>(out);
funcs::BroadcastKernel<ElementwiseType::kBinary, T, T>(
dev_ctx, inputs, &outputs, axis, funcs::FloorDivideFunctor<T>());
}
template <typename T, typename Context>
void ElementwisePowRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
std::vector<const DenseTensor*> inputs;
inputs.reserve(2);
std::vector<DenseTensor*> outputs;
outputs.reserve(1);
inputs.emplace_back(&x);
inputs.emplace_back(&y);
outputs.emplace_back(out);
dev_ctx.template Alloc<T>(out);
funcs::BroadcastKernel<ElementwiseType::kBinary, T, T>(
dev_ctx, inputs, &outputs, axis, funcs::ElementwisePowFunctor<T>());
}
} // namespace phi
#ifdef PADDLE_WITH_XPU_KP
PD_REGISTER_KERNEL(maximum_raw, KPS, ALL_LAYOUT, phi::MaximumRawKernel, float) {
}
PD_REGISTER_KERNEL(minimum_raw, KPS, ALL_LAYOUT, phi::MinimumRawKernel, float) {
}
PD_REGISTER_KERNEL(
floor_divide_raw, KPS, ALL_LAYOUT, phi::FloorDivideRawKernel, int) {}
PD_REGISTER_KERNEL(
elementwise_pow_raw, KPS, ALL_LAYOUT, phi::ElementwisePowRawKernel, float) {
}
#else
using float16 = phi::dtype::float16;
using bfloat16 = phi::dtype::bfloat16;
PD_REGISTER_KERNEL(maximum_raw,
KPS,
ALL_LAYOUT,
phi::MaximumRawKernel,
float,
double,
int,
int64_t,
float16,
bfloat16) {}
PD_REGISTER_KERNEL(minimum_raw,
KPS,
ALL_LAYOUT,
phi::MinimumRawKernel,
float,
double,
int,
int64_t,
float16,
bfloat16) {}
PD_REGISTER_KERNEL(remainder_raw,
KPS,
ALL_LAYOUT,
phi::RemainderRawKernel,
float,
double,
int,
float16,
int64_t) {}
PD_REGISTER_KERNEL(floor_divide_raw,
KPS,
ALL_LAYOUT,
phi::FloorDivideRawKernel,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow_raw,
KPS,
ALL_LAYOUT,
phi::ElementwisePowRawKernel,
float,
double,
int,
float16,
int64_t) {}
#endif
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/elementwise_kernel_impl.h"
#include "paddle/phi/kernels/xpu/elementwise.h"
namespace phi {
template <typename T, typename Context>
void MaximumRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;
auto f = [](xpu::Context* ctx,
const XPUType* x,
const XPUType* y,
XPUType* z,
const std::vector<int>& xshape,
const std::vector<int>& yshape) {
return xpu::broadcast_max<XPUType>(ctx, x, y, z, xshape, yshape);
};
XPUElementwise<T, XPUType>(dev_ctx, x, y, axis, out, f);
}
template <typename T, typename Context>
void MinimumRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;
auto f = [](xpu::Context* ctx,
const XPUType* x,
const XPUType* y,
XPUType* z,
const std::vector<int>& xshape,
const std::vector<int>& yshape) {
return xpu::broadcast_min<XPUType>(ctx, x, y, z, xshape, yshape);
};
XPUElementwise<T, XPUType>(dev_ctx, x, y, axis, out, f);
}
template <typename T, typename Context>
void RemainderRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;
auto f = [](xpu::Context* ctx,
const XPUType* x,
const XPUType* y,
XPUType* z,
const std::vector<int>& xshape,
const std::vector<int>& yshape) {
return xpu::broadcast_mod<XPUType>(ctx, x, y, z, xshape, yshape);
};
XPUElementwise<T, XPUType>(dev_ctx, x, y, axis, out, f);
}
template <typename T, typename Context>
void FloorDivideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;
auto f = [](xpu::Context* ctx,
const XPUType* x,
const XPUType* y,
XPUType* z,
const std::vector<int>& xshape,
const std::vector<int>& yshape) {
return xpu::broadcast_floordiv<XPUType>(ctx, x, y, z, xshape, yshape);
};
XPUElementwise<T, XPUType>(dev_ctx, x, y, axis, out, f);
}
template <typename T, typename Context>
void ElementwisePowRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;
auto f = [](xpu::Context* ctx,
const XPUType* x,
const XPUType* y,
XPUType* z,
const std::vector<int>& xshape,
const std::vector<int>& yshape) {
return xpu::broadcast_pow<XPUType>(ctx, x, y, z, xshape, yshape);
};
XPUElementwise<T, XPUType>(dev_ctx, x, y, axis, out, f);
}
} // namespace phi
PD_REGISTER_KERNEL(floor_divide_raw,
XPU,
ALL_LAYOUT,
phi::FloorDivideRawKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(maximum_raw,
XPU,
ALL_LAYOUT,
phi::MaximumRawKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(minimum_raw,
XPU,
ALL_LAYOUT,
phi::MinimumRawKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(remainder_raw,
XPU,
ALL_LAYOUT,
phi::RemainderRawKernel,
float,
phi::dtype::float16,
int32_t,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow_raw,
XPU,
ALL_LAYOUT,
phi::ElementwisePowRawKernel,
float,
phi::dtype::float16) {}
......@@ -269,90 +269,6 @@ class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op):
)
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float64
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}
class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float64
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
)
}
class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
)
}
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
......@@ -393,48 +309,6 @@ class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp):
}
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float64)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float64)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype(
np.float64
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)
)
}
class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)
)
}
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
......
......@@ -115,44 +115,6 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp):
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = broadcast_wrapper(shape=[100, 1, 1])
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float64
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.minimum(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = broadcast_wrapper(shape=[1, 100, 1])
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float64
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.minimum(
self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
)
}
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
......@@ -171,25 +133,6 @@ class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
}
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = broadcast_wrapper(shape=[1, 25, 4, 1])
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float64)
sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float64)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype(
np.float64
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.minimum(
self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1)
)
}
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
......@@ -246,10 +189,7 @@ class TestElementwiseMinOpFP16(unittest.TestCase):
self.check_main((13, 17), (13, 17))
self.check_main((10, 3, 4), (1,))
self.check_main((100,), (100,))
self.check_main((100, 3, 2), (100,), 0)
self.check_main((2, 100, 3), (100,), 1)
self.check_main((2, 3, 100), (100,))
self.check_main((2, 25, 4, 1), (25, 4), 1)
self.check_main((2, 10, 2, 5), (2, 10, 1, 5))
......
......@@ -172,73 +172,6 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float64"),
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
}
def test_check_grad_normal(self):
if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
else:
self.check_grad(['X', 'Y'], 'Out')
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float64"),
}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.power(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}
def test_check_grad_normal(self):
if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
else:
self.check_grad(['X', 'Y'], 'Out')
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float64"),
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(
self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5, 1)
)
}
def test_check_grad_normal(self):
if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
else:
self.check_grad(['X', 'Y'], 'Out')
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
......
......@@ -108,38 +108,6 @@ class XPUTestElementwiseMaxOp(XPUOpTestWrapper):
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
self.dtype
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
self.dtype
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
)
}
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(self.dtype)
......@@ -155,22 +123,6 @@ class XPUTestElementwiseMaxOp(XPUOpTestWrapper):
)
}
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype(
self.dtype
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)
)
}
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype)
......
......@@ -107,36 +107,6 @@ class XPUTestElementwiseMinOp(XPUOpTestWrapper):
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
self.dtype
)
self.attrs = {'axis': 0}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
self.dtype
)
self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(
self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
)
}
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype)
......@@ -151,21 +121,6 @@ class XPUTestElementwiseMinOp(XPUOpTestWrapper):
)
}
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (25, 4)).astype(self.dtype)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype(
self.dtype
)
self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(
self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1)
)
}
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(self.dtype)
......
......@@ -72,26 +72,6 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
class TestElementwiseModOp_broadcast_1(ElementwiseModOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(2, 100, 3).astype(self.dtype),
}
self.attrs = {'axis': 1}
self.outputs = {'Out': self.inputs['X'] % self.inputs['Y']}
class TestElementwiseModOp_broadcast_2(ElementwiseModOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(22, 128, 3).astype(self.dtype),
'Y': np.random.rand(22, 128, 3).astype(self.dtype),
}
self.attrs = {'axis': 1}
self.outputs = {'Out': self.inputs['X'] % self.inputs['Y']}
class TestRemainderOp(unittest.TestCase):
def test_dygraph(self):
with fluid.dygraph.guard():
......
......@@ -98,47 +98,6 @@ class XPUTestElementwisePowOp(XPUOpTestWrapper):
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(
self.inputs['X'], self.inputs['Y'].reshape(100, 1)
)
}
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.power(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype(
self.dtype
),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype),
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(
self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5, 1)
)
}
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册