未验证 提交 a5c95cd5 编写于 作者: T TeslaZhao 提交者: GitHub

Add xpu transpose2 op.test=kunlun (#28086)

上级 a5f65d51
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/operators/transpose_op.h"
#include <memory>
#include <string>
#include <vector>
namespace paddle {
namespace operators {
using framework::Tensor;
bool XPUSupported(int ndims, const std::vector<int>& axis) {
/*
* XPU currently support:
* permute = {0, 2, 1}, permute = {1, 0},
* permute = {0, 2, 1, 3}, permute = {1, 0, 2},
* permute = {0, 2, 3, 1}
*/
bool is_supported = false;
std::vector<int> permute_10(2, 0);
std::vector<int> permute_102(3, 0);
std::vector<int> permute_021(3, 0);
std::vector<int> permute_210(3, 0);
std::vector<int> permute_0213(4, 0);
std::vector<int> permute_0231(4, 0);
std::vector<int> permute_0312(4, 0);
std::vector<int> permute_3201(4, 0);
permute_10[0] = 1;
permute_102[0] = 1;
permute_102[2] = 2;
permute_021[1] = 2;
permute_021[2] = 1;
permute_210[0] = 2;
permute_210[1] = 1;
permute_0213[1] = 2;
permute_0213[2] = 1;
permute_0213[3] = 3;
permute_0231[1] = 2;
permute_0231[2] = 3;
permute_0231[3] = 1;
permute_0312[1] = 3;
permute_0312[2] = 1;
permute_0312[3] = 2;
permute_3201[0] = 3;
permute_3201[1] = 2;
permute_3201[3] = 1;
switch (ndims) {
case 2:
if (axis == permute_10) {
is_supported = true;
}
break;
case 3:
if ((axis == permute_021) || (axis == permute_102) ||
(axis == permute_210)) {
is_supported = true;
}
break;
case 4:
if ((axis == permute_0213) || (axis == permute_0231) ||
(axis == permute_0312) || (axis == permute_3201)) {
is_supported = true;
}
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Tensors with rank only 2, 3 and 4 are supported on XPU"));
}
return is_supported;
}
template <typename DeviceContext, typename T>
class TransposeXPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto x = context.Input<framework::Tensor>("X");
auto out = context.Output<framework::Tensor>("Out");
// axis is permute
auto axis = context.Attr<std::vector<int>>("axis");
int ndims = axis.size();
const auto x_dims = x->dims();
const T* x_data = x->data<T>();
T* y_data = out->mutable_data<T>(context.GetPlace());
if (!XPUSupported(ndims, axis)) {
VLOG(0) << "XPU does not support the permute, try to do on cpu";
framework::Tensor x_cpu;
framework::Tensor out_cpu;
auto x_cpu_data = x_cpu.mutable_data<T>(x->dims(), platform::CPUPlace());
auto out_cpu_data =
out_cpu.mutable_data<T>(out->dims(), platform::CPUPlace());
memory::Copy(platform::CPUPlace(), reinterpret_cast<void*>(x_cpu_data),
BOOST_GET_CONST(platform::XPUPlace, context.GetPlace()),
(const void*)x_data, x->numel() * sizeof(T));
const platform::CPUDeviceContext* cpu_dev_ctx =
static_cast<const platform::CPUDeviceContext*>(
platform::DeviceContextPool::Instance().Get(
platform::CPUPlace()));
TransCompute<platform::CPUDeviceContext, T>(ndims, *cpu_dev_ctx, x_cpu,
&out_cpu, axis);
memory::Copy(BOOST_GET_CONST(platform::XPUPlace, context.GetPlace()),
reinterpret_cast<void*>(y_data), platform::CPUPlace(),
(const void*)out_cpu_data, out->numel() * sizeof(T));
return;
}
std::vector<int> x_shape_host(ndims, 0);
for (int i = 0; i < ndims; ++i) {
x_shape_host[i] = x_dims[i];
}
int* permute_host = axis.data();
auto& dev_ctx = context.template device_context<DeviceContext>();
int r = xpu::transpose(dev_ctx.x_context(), x_data, y_data,
x_shape_host.data(), permute_host, ndims);
PADDLE_ENFORCE_EQ(
r, xpu::Error_t::SUCCESS,
platform::errors::External("XPU kernel error! error code=%d", r));
}
};
template <typename DeviceContext, typename T>
class TransposeGradXPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* out_grad =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* x_grad =
context.Output<framework::Tensor>(framework::GradVarName("X"));
if (!x_grad) return;
x_grad->mutable_data<T>(context.GetPlace());
std::vector<int> axis = context.Attr<std::vector<int>>("axis");
std::vector<int> reversed_axis(axis);
for (size_t i = 0; i < axis.size(); i++) {
reversed_axis[axis[i]] = i;
}
int ndims = axis.size();
if (!XPUSupported(ndims, reversed_axis)) {
PADDLE_THROW(
platform::errors::Unimplemented("XPU does not support the permute"));
}
std::vector<int> out_shape_host(ndims, 0);
for (int i = 0; i < ndims; ++i) {
out_shape_host[i] = out_grad->dims()[i];
}
int* permute_host = reversed_axis.data();
auto& dev_ctx = context.template device_context<DeviceContext>();
int r = xpu::transpose(dev_ctx.x_context(), out_grad->data<T>(),
x_grad->data<T>(), out_shape_host.data(),
permute_host, ndims);
PADDLE_ENFORCE_EQ(
r, xpu::Error_t::SUCCESS,
platform::errors::External("XPU kernel error! error code=%d", r));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(
transpose,
ops::TransposeXPUKernel<paddle::platform::XPUDeviceContext, float>);
REGISTER_OP_XPU_KERNEL(
transpose_grad,
ops::TransposeGradXPUKernel<paddle::platform::XPUDeviceContext, float>);
REGISTER_OP_XPU_KERNEL(
transpose2,
ops::TransposeXPUKernel<paddle::platform::XPUDeviceContext, float>);
REGISTER_OP_XPU_KERNEL(
transpose2_grad,
ops::TransposeGradXPUKernel<paddle::platform::XPUDeviceContext, float>);
#endif // PADDLE_WITH_XPU
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestXPUTransposeOp(OpTest):
def setUp(self):
self.init_op_type()
self.initTestCase()
self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = {
'axis': list(self.axis),
'use_mkldnn': False,
'use_xpu': True
}
self.outputs = {
'XShape': np.random.random(self.shape).astype("float64"),
'Out': self.inputs['X'].transpose(self.axis)
}
def init_op_type(self):
self.op_type = "transpose2"
self.use_mkldnn = False
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place=place, no_check_set=['XShape'])
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
def initTestCase(self):
self.shape = (3, 40)
self.axis = (1, 0)
class TestCase0(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (100, )
self.axis = (0, )
class TestCase1(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (3, 4, 10)
self.axis = (0, 2, 1)
class TestCase2(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5)
self.axis = (0, 2, 3, 1)
class TestCase3(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.axis = (4, 2, 3, 1, 0)
class TestCase4(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6, 1)
self.axis = (4, 2, 3, 1, 0, 5)
class TestCase5(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 16, 96)
self.axis = (0, 2, 1)
class TestCase6(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 10, 12, 16)
self.axis = (3, 1, 2, 0)
class TestCase7(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 10, 2, 16)
self.axis = (0, 1, 3, 2)
class TestCase8(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 2, 3, 2, 4, 3, 3)
self.axis = (0, 1, 3, 2, 4, 5, 6, 7)
class TestCase9(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 2, 3, 2, 4, 3, 3)
self.axis = (6, 1, 3, 5, 0, 2, 4, 7)
class TestTransposeOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float64')
def test_x_Variable_check():
# the Input(x)'s type must be Variable
fluid.layers.transpose("not_variable", perm=[1, 0, 2])
self.assertRaises(TypeError, test_x_Variable_check)
def test_x_dtype_check():
# the Input(x)'s dtype must be one of [float16, float32, float64, int32, int64]
x1 = fluid.layers.data(
name='x1', shape=[10, 5, 3], dtype='bool')
fluid.layers.transpose(x1, perm=[1, 0, 2])
self.assertRaises(TypeError, test_x_dtype_check)
def test_perm_list_check():
# Input(perm)'s type must be list
fluid.layers.transpose(x, perm="[1, 0, 2]")
self.assertRaises(TypeError, test_perm_list_check)
def test_perm_length_and_x_dim_check():
# Input(perm) is the permutation of dimensions of Input(input)
# its length should be equal to dimensions of Input(input)
fluid.layers.transpose(x, perm=[1, 0, 2, 3, 4])
self.assertRaises(ValueError, test_perm_length_and_x_dim_check)
def test_each_elem_value_check():
# Each element in Input(perm) should be less than Input(x)'s dimension
fluid.layers.transpose(x, perm=[3, 5, 7])
self.assertRaises(ValueError, test_each_elem_value_check)
class TestTAPI(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
data_t = paddle.t(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data_np = np.random.random([10]).astype("float64")
result, = exe.run(feed={"data": data_np}, fetch_list=[data_t])
expected_result = np.transpose(data_np)
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10, 5], dtype="float64", name="data")
data_t = paddle.t(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data_np = np.random.random([10, 5]).astype("float64")
result, = exe.run(feed={"data": data_np}, fetch_list=[data_t])
expected_result = np.transpose(data_np)
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[1, 5], dtype="float64", name="data")
data_t = paddle.t(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data_np = np.random.random([1, 5]).astype("float64")
result, = exe.run(feed={"data": data_np}, fetch_list=[data_t])
expected_result = np.transpose(data_np)
self.assertEqual((result == expected_result).all(), True)
with fluid.dygraph.guard():
np_x = np.random.random([10]).astype("float64")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
with fluid.dygraph.guard():
np_x = np.random.random([10, 5]).astype("float64")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
with fluid.dygraph.guard():
np_x = np.random.random([1, 5]).astype("float64")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
def test_errors(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name='x', shape=[10, 5, 3], dtype='float64')
def test_x_dimension_check():
paddle.t(x)
self.assertRaises(ValueError, test_x_dimension_check)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册