未验证 提交 c93451f4 编写于 作者: A Aurelius84 提交者: GitHub

[OpAttr]Squeeze axes support Tensor (#45189)

* [OpAttr]Squeeze axes support Tensor

* add support_tensor

* fix unittest

* fix coverage
上级 41bdf41d
......@@ -171,7 +171,8 @@ class SqueezeOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>("axes",
"(std::vector<int>). List of integers,"
" indicating the dimensions to squeeze.")
.SetDefault({});
.SetDefault({})
.SupportTensor();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
......
......@@ -2536,7 +2536,7 @@
backward : squared_l2_norm_grad
- api : squeeze
args : (Tensor x, int[] axes)
args : (Tensor x, IntArray axes)
output : Tensor(out), Tensor(xshape)
infer_meta :
func : SqueezeWithXShapeInferMeta
......
......@@ -2347,14 +2347,14 @@
func : squared_l2_norm_grad
- backward_api : squeeze_double_grad
forward : squeeze_grad(Tensor xshape, Tensor grad_out, int[] axes) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] axes)
forward : squeeze_grad(Tensor xshape, Tensor grad_out, IntArray axes) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray axes)
output : Tensor(grad_out_grad)
invoke: squeeze(grad_x_grad, axes)
- backward_api : squeeze_grad
forward : squeeze(Tensor x, int[] axes) -> Tensor(out), Tensor(xshape)
args : (Tensor xshape, Tensor out_grad, int[] axes)
forward : squeeze(Tensor x, IntArray axes) -> Tensor(out), Tensor(xshape)
args : (Tensor xshape, Tensor out_grad, IntArray axes)
output : Tensor(x_grad)
infer_meta :
func : KernelWithXShapeInferMeta
......
......@@ -3122,8 +3122,9 @@ void SquaredL2NormInferMeta(const MetaTensor& x, MetaTensor* out) {
}
void SqueezeInferMeta(const MetaTensor& x,
const std::vector<int>& axes,
MetaTensor* out) {
const IntArray& axes,
MetaTensor* out,
MetaConfig config) {
const auto& x_dims = x.dims();
// Check input tensor dims (<6) Eigen limit.
PADDLE_ENFORCE_LE(x_dims.size(),
......@@ -3135,22 +3136,34 @@ void SqueezeInferMeta(const MetaTensor& x,
x_dims.size(),
x_dims));
auto out_dims = funcs::GetOutputSqueezeShape(axes, x_dims, false);
out->set_dims(out_dims);
if (x_dims[0] == out_dims[0]) {
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
out->share_lod(x);
if (!config.is_runtime && axes.FromTensor()) {
// compile time infershape, set all elements to -1.
int output_size = x.dims().size() - axes.GetData().size();
std::vector<int64_t> vec_out_dims(output_size, -1);
out->set_dims(phi::make_ddim(vec_out_dims));
} else {
std::vector<int32_t> tmp;
tmp.reserve(axes.GetData().size());
std::for_each(axes.GetData().begin(),
axes.GetData().end(),
[&tmp](const int64_t& t) { tmp.push_back(t); });
auto out_dims = funcs::GetOutputSqueezeShape(tmp, x_dims, false);
out->set_dims(out_dims);
if (x_dims[0] == out_dims[0]) {
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
out->share_lod(x);
}
}
out->set_dtype(x.dtype());
}
void SqueezeWithXShapeInferMeta(const MetaTensor& x,
const std::vector<int>& axes,
const IntArray& axes,
MetaTensor* out,
MetaTensor* xshape) {
SqueezeInferMeta(x, axes, out);
MetaTensor* xshape,
MetaConfig config) {
SqueezeInferMeta(x, axes, out, config);
const auto& x_dims = x.dims();
std::vector<int64_t> xshape_dims(x_dims.size() + 1);
xshape_dims[0] = 0;
......
......@@ -438,13 +438,15 @@ void SplitInferMeta(const MetaTensor& x_meta,
void SquaredL2NormInferMeta(const MetaTensor& x, MetaTensor* out);
void SqueezeInferMeta(const MetaTensor& x,
const std::vector<int>& axes,
MetaTensor* out);
const IntArray& axes,
MetaTensor* out,
MetaConfig config = MetaConfig());
void SqueezeWithXShapeInferMeta(const MetaTensor& x,
const std::vector<int>& axes,
const IntArray& axes,
MetaTensor* out,
MetaTensor* xshape);
MetaTensor* xshape,
MetaConfig config = MetaConfig());
void StridedSliceRawInferMeta(const MetaTensor& x,
const std::vector<int>& axes,
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/tensor_utils.h"
......@@ -21,7 +22,7 @@ template <typename T, typename Context>
void SqueezeGradKernel(const Context& dev_ctx,
const DenseTensor& xshape,
const DenseTensor& dout,
const std::vector<int>& axes,
const IntArray& axes,
DenseTensor* dx) {
auto xshape_dims = xshape.dims();
auto x_dims = phi::slice_ddim(xshape_dims, 1, xshape_dims.size());
......
......@@ -21,20 +21,22 @@ namespace phi {
template <typename T, typename Context>
void SqueezeKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axes,
const IntArray& axes,
DenseTensor* out) {
auto x_dims = x.dims();
auto out_dims = funcs::GetOutputSqueezeShape(axes, x_dims, true);
std::vector<int32_t> tmp(axes.GetData().begin(), axes.GetData().end());
auto out_dims = funcs::GetOutputSqueezeShape(tmp, x_dims, true);
out->Resize(out_dims);
dev_ctx.template Alloc<T>(out);
phi::Copy(dev_ctx, x, dev_ctx.GetPlace(), false, out);
out->Resize(out_dims);
out->Resize(out_dims); // copy will reset the dims.
}
template <typename T, typename Context>
void SqueezeWithXShapeKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axes,
const IntArray& axes,
DenseTensor* out,
DenseTensor* xshape) {
SqueezeKernel<T, Context>(dev_ctx, x, axes, out);
......
......@@ -15,6 +15,7 @@
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,6 +24,6 @@ template <typename T, typename Context>
void SqueezeGradKernel(const Context& dev_ctx,
const DenseTensor& xshape,
const DenseTensor& dout,
const std::vector<int>& axes,
const IntArray& axes,
DenseTensor* dx);
} // namespace phi
......@@ -15,6 +15,7 @@
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -22,13 +23,13 @@ namespace phi {
template <typename T, typename Context>
void SqueezeKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axes,
const IntArray& axes,
DenseTensor* out);
template <typename T, typename Context>
void SqueezeWithXShapeKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axes,
const IntArray& axes,
DenseTensor* out,
DenseTensor* xshape);
......
......@@ -6612,12 +6612,22 @@ def squeeze(input, axes, name=None):
'float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64',
'complex64', 'complex128'
], 'squeeze')
check_type(axes, 'axis/axes', (list, tuple), 'squeeze')
check_type(axes, 'axis/axes', (list, tuple, Variable), 'squeeze')
attrs = {}
if isinstance(axes, Variable):
axes.stop_gradient = True
attrs["axes"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
attrs["axes"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(type="squeeze2",
inputs={"X": input},
attrs={"axes": axes},
attrs=attrs,
outputs={
"Out": out,
"XShape": x_shape
......
......@@ -16,9 +16,12 @@ from __future__ import print_function
import unittest
import numpy as np
import os
from op_test import OpTest
import paddle
from paddle.fluid.framework import program_guard, Program
from test_attribute_var import UnittestBase
paddle.enable_static()
......@@ -82,5 +85,82 @@ class TestSqueezeOp3(TestSqueezeOp):
self.new_shape = (6, 5, 1, 4)
class TestSqueeze2AxesTensor(UnittestBase):
def init_info(self):
self.shapes = [[2, 3, 4]]
self.save_path = os.path.join(self.temp_dir.name, 'squeeze_tensor')
def test_static(self):
main_prog = Program()
starup_prog = Program()
with program_guard(main_prog, starup_prog):
fc = paddle.nn.Linear(4, 10)
x = paddle.randn([2, 3, 4])
x.stop_gradient = False
feat = fc(x) # [2,3,10]
feat = paddle.unsqueeze(feat, [0, 2]) # [1, 2, 3, 1, 10]
# axes is a Variable
axes = paddle.assign([0, 2])
out = paddle.squeeze(feat, axes)
out2 = paddle.fluid.layers.squeeze(feat, axes)
sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out))
self.assertTrue("Var[" in str(main_prog))
exe = paddle.static.Executor()
exe.run(starup_prog)
res = exe.run(fetch_list=[feat, out, out2])
self.assertEqual(res[0].shape, (1, 2, 1, 3, 10))
self.assertEqual(res[1].shape, (2, 3, 10))
self.assertEqual(res[2].shape, (2, 3, 10))
paddle.static.save_inference_model(self.save_path, [x], [out], exe)
# Test for Inference Predictor
infer_out = self.infer_prog()
self.assertEqual(infer_out.shape, (2, 3, 10))
class TestSqueeze2AxesTensorList(UnittestBase):
def init_info(self):
self.shapes = [[2, 3, 4]]
self.save_path = os.path.join(self.temp_dir.name, 'squeeze_tensor')
def test_static(self):
main_prog = Program()
starup_prog = Program()
with program_guard(main_prog, starup_prog):
fc = paddle.nn.Linear(4, 10)
x = paddle.randn([2, 3, 4])
x.stop_gradient = False
feat = fc(x) # [2,3,10]
feat = paddle.unsqueeze(feat, [0, 2]) # [1, 2, 3, 1, 10]
# axes is a list[Variable]
axes = [
paddle.full([1], 0, dtype='int32'),
paddle.full([1], 2, dtype='int32')
]
out = paddle.squeeze(feat, axes)
out2 = paddle.fluid.layers.squeeze(feat, axes)
sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out))
self.assertTrue("Vars[" in str(main_prog))
exe = paddle.static.Executor()
exe.run(starup_prog)
res = exe.run(fetch_list=[feat, out, out2])
self.assertEqual(res[0].shape, (1, 2, 1, 3, 10))
self.assertEqual(res[1].shape, (2, 3, 10))
self.assertEqual(res[2].shape, (2, 3, 10))
paddle.static.save_inference_model(self.save_path, [x], [out], exe)
# Test for Inference Predictor
infer_out = self.infer_prog()
self.assertEqual(infer_out.shape, (2, 3, 10))
if __name__ == "__main__":
unittest.main()
......@@ -2014,12 +2014,23 @@ def squeeze(x, axis=None, name=None):
'float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64',
'complex64', 'complex128'
], 'squeeze')
check_type(axes, 'axis/axes', (list, tuple), 'squeeze')
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'squeeze')
attrs = {}
if isinstance(axes, Variable):
axes.stop_gradient = True
attrs["axes"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
attrs["axes"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(type="squeeze2",
inputs={"X": input},
attrs={"axes": axes},
attrs=attrs,
outputs={
"Out": out,
"XShape": x_shape
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册