提交 59de8e12 编写于 作者: L liym27 提交者: Aurelius84

Compatible int32 and int64 for attr in concat/split/unsqueeze. test=develop (#20912)

上级 7b4cb655
......@@ -116,13 +116,13 @@ class SplitOpKernel : public framework::OpKernel<T> {
bool need_resize_outs_dims = false;
if (ctx.HasInput("AxisTensor")) {
auto* axis_tensor = ctx.Input<framework::Tensor>("AxisTensor");
axis = GetDataFromTensor<int>(axis_tensor)[0];
axis = GetDataFromTensor(axis_tensor)[0];
need_resize_outs_dims = true;
}
auto sections_tensor_list =
ctx.MultiInput<framework::Tensor>("SectionsTensorList");
if (sections_tensor_list.size() > 0) {
sections = GetDataFromTensorList<int>(sections_tensor_list);
sections = GetDataFromTensorList(sections_tensor_list);
need_resize_outs_dims = true;
}
......
......@@ -19,43 +19,11 @@ limitations under the License. */
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device_context.h"
namespace paddle {
namespace operators {
template <typename T>
inline std::vector<T> GetDataFromTensorList(
const std::vector<const framework::Tensor *> &list_tensor) {
std::vector<T> vec_new_data;
for (size_t i = 0; i < list_tensor.size(); ++i) {
auto tensor = list_tensor[i];
PADDLE_ENFORCE_EQ(
tensor->dims(), framework::make_ddim({1}),
"ShapeError: If the element type is Tensor, "
"the element's shape must be [1]. But received the element's shape "
"is [%s]",
tensor->dims());
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_data.push_back((*temp.data<T>()));
} else {
vec_new_data.push_back((*tensor->data<T>()));
}
}
return vec_new_data;
}
template <typename T>
inline std::vector<T> GetDataFromTensor(const framework::Tensor *x) {
auto *data = x->data<T>();
framework::Tensor cpu_attr_tensor;
if (platform::is_gpu_place(x->place())) {
TensorCopySync(*x, platform::CPUPlace(), &cpu_attr_tensor);
data = cpu_attr_tensor.data<T>();
}
auto vec_data = std::vector<T>(data, data + x->numel());
return vec_data;
}
template <typename DeviceContext, typename T>
class UnsqueezeKernel : public framework::OpKernel<T> {
......
......@@ -20,35 +20,61 @@ limitations under the License. */
namespace paddle {
namespace operators {
template <typename T>
template <typename T = int32_t>
inline std::vector<T> GetDataFromTensor(const framework::Tensor* x) {
auto* data = x->data<T>();
framework::Tensor cpu_attr_tensor;
if (platform::is_gpu_place(x->place())) {
TensorCopySync(*x, platform::CPUPlace(), &cpu_attr_tensor);
data = cpu_attr_tensor.data<T>();
std::vector<T> vec_new_data;
if (x->type() == framework::proto::VarType::INT32) {
auto* data = x->data<int>();
if (platform::is_gpu_place(x->place())) {
framework::Tensor cpu_attr_tensor;
TensorCopySync(*x, platform::CPUPlace(), &cpu_attr_tensor);
data = cpu_attr_tensor.data<int>();
}
vec_new_data = std::vector<T>(data, data + x->numel());
} else if (x->type() == framework::proto::VarType::INT64) {
auto* data = x->data<int64_t>();
if (platform::is_gpu_place(x->place())) {
framework::Tensor cpu_attr_tensor;
TensorCopySync(*x, platform::CPUPlace(), &cpu_attr_tensor);
data = cpu_attr_tensor.data<int64_t>();
}
vec_new_data = std::vector<T>(data, data + x->numel());
} else {
PADDLE_THROW("The dtype of Tensor must be int32 or int64.");
}
auto vec_data = std::vector<T>(data, data + x->numel());
return vec_data;
return vec_new_data;
}
template <typename T>
template <typename T = int32_t>
inline std::vector<T> GetDataFromTensorList(
const std::vector<const framework::Tensor*>& list_tensor) {
std::vector<T> vec_new_data;
for (size_t i = 0; i < list_tensor.size(); ++i) {
auto tensor = list_tensor[i];
PADDLE_ENFORCE_EQ(
tensor->dims(), framework::make_ddim({1}),
"ShapeError: If the element type is Tensor, "
"the element's shape must be [1]. But received the element's shape "
"is [%s]",
tensor->dims());
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_data.push_back((*temp.data<T>()));
PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}),
"ShapeError: The shape of Tensor in list must be [1]. "
"But received the shape "
"is [%s]",
tensor->dims());
if (tensor->type() == framework::proto::VarType::INT32) {
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_data.push_back(static_cast<T>(*temp.data<int>()));
} else {
vec_new_data.push_back(static_cast<T>(*tensor->data<int>()));
}
} else if (tensor->type() == framework::proto::VarType::INT64) {
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_data.push_back(static_cast<T>(*temp.data<int64_t>()));
} else {
vec_new_data.push_back(static_cast<T>(*tensor->data<int64_t>()));
}
} else {
vec_new_data.push_back((*tensor->data<T>()));
PADDLE_THROW("The dtype of Tensor in list must be int32 or int64.");
}
}
return vec_new_data;
......
......@@ -186,19 +186,22 @@ class TestConcatAPI(OpTest):
input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1 = fluid.layers.fill_constant([1], "int32", 1)
positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1)
out_1 = fluid.layers.concat(input=[x_2, x_3], axis=1)
out_2 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1)
out_2 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int32)
out_3 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int64)
exe = fluid.Executor(place=fluid.CPUPlace())
[res_1, res_2] = exe.run(
[res_1, res_2, res_3] = exe.run(
fluid.default_main_program(),
feed={"x_1": input_2,
"x_2": input_2,
"x_3": input_3},
fetch_list=[out_1, out_2])
fetch_list=[out_1, out_2, out_3])
assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
if __name__ == '__main__':
......
......@@ -228,14 +228,19 @@ create_test_fp16(TestSplitOp)
class TestSplitAPI(OpTest):
def test_api(self):
input_1 = np.random.random([4, 5, 6]).astype("int32")
positive_1 = fluid.layers.fill_constant([1], "int32", 1)
positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2)
x_1 = fluid.data(shape=[4, 5, 6], dtype='int32', name='x_1')
x_2 = fluid.data(shape=[4, 5, None], dtype='int32', name='x_2')
out_0, out_1, out_2 = fluid.layers.split(
input=x_1, num_or_sections=[2, positive_1, -1], dim=1)
input=x_1,
num_or_sections=[positive_2_int64, positive_1_int32, -1],
dim=positive_1_int64)
out_3, out_4, out_5 = fluid.layers.split(
input=x_1, num_or_sections=[2, 1, 2], dim=positive_1)
input=x_1, num_or_sections=[2, 1, 2], dim=positive_1_int32)
fluid.layers.split(input=x_2, num_or_sections=2, dim=2)
exe = fluid.Executor(place=fluid.CPUPlace())
......
......@@ -207,27 +207,35 @@ class TestUnsqueezeAPI(OpTest):
def test_api(self):
input = np.random.random([3, 2, 5]).astype("float32")
x = fluid.data(name='x', shape=[3, 2, 5], dtype="float32")
positive_3 = fluid.layers.fill_constant([1], "int32", 3)
axes_tensor = fluid.data(name='axes_tensor', shape=[3], dtype="int32")
positive_3_int32 = fluid.layers.fill_constant([1], "int32", 3)
positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1)
axes_tensor_int32 = fluid.data(
name='axes_tensor_int32', shape=[3], dtype="int32")
axes_tensor_int64 = fluid.data(
name='axes_tensor_int64', shape=[3], dtype="int64")
out_1 = fluid.layers.unsqueeze(x, axes=[3, 1, 1])
out_2 = fluid.layers.unsqueeze(x, axes=[positive_3, 1, 1])
out_3 = fluid.layers.unsqueeze(x, axes=axes_tensor)
out_2 = fluid.layers.unsqueeze(
x, axes=[positive_3_int32, positive_1_int64, 1])
out_3 = fluid.layers.unsqueeze(x, axes=axes_tensor_int32)
out_4 = fluid.layers.unsqueeze(x, axes=3)
out_5 = fluid.layers.unsqueeze(x, axes=axes_tensor_int64)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3, res_4 = exe.run(
res_1, res_2, res_3, res_4, res_5 = exe.run(
fluid.default_main_program(),
feed={
"x": input,
"axes_tensor": np.array([3, 1, 1]).astype("int32")
"axes_tensor_int32": np.array([3, 1, 1]).astype("int32"),
"axes_tensor_int64": np.array([3, 1, 1]).astype("int64")
},
fetch_list=[out_1, out_2, out_3, out_4])
fetch_list=[out_1, out_2, out_3, out_4, out_5])
assert np.array_equal(res_1, input.reshape([3, 1, 1, 2, 5, 1]))
assert np.array_equal(res_2, input.reshape([3, 1, 1, 2, 5, 1]))
assert np.array_equal(res_3, input.reshape([3, 1, 1, 2, 5, 1]))
assert np.array_equal(res_4, input.reshape([3, 2, 5, 1]))
assert np.array_equal(res_5, input.reshape([3, 1, 1, 2, 5, 1]))
def test_error(self):
def test_axes_type():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册