未验证 提交 db235bf0 编写于 作者: A Aurelius84 提交者: GitHub

[OpAttr]padding_value of Pad support Tensor type (#45514)

* [OpAttr]padding_value of Pad support Tensor type

* fix unittest

* fix unittest

* fix coverage
上级 97f43a8e
......@@ -54,7 +54,8 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("pad_value",
"(float, default 0.0) "
"The value to fill the padded areas.")
.SetDefault(0.0f);
.SetDefault(0.0f)
.SupportTensor();
AddComment(R"DOC(
Pad Operator.
......
......@@ -1931,7 +1931,7 @@
backward : p_norm_grad
- api : pad
args : (Tensor x, int[] paddings, float pad_value)
args : (Tensor x, int[] paddings, Scalar pad_value)
output : Tensor
infer_meta :
func : PadInferMeta
......
......@@ -1706,8 +1706,8 @@
backward : pad3d_double_grad
- backward_api : pad_double_grad
forward : pad_grad(Tensor x, Tensor grad_out, int[] paddings, float pad_value) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] paddings, float pad_value)
forward : pad_grad(Tensor x, Tensor grad_out, int[] paddings, Scalar pad_value) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] paddings, Scalar pad_value)
output : Tensor(grad_out_grad)
infer_meta :
func : PadInferMeta
......@@ -1715,8 +1715,8 @@
func : pad
- backward_api : pad_grad
forward : pad(Tensor x, int[] paddings, float pad_value) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] paddings, float pad_value)
forward : pad(Tensor x, int[] paddings, Scalar pad_value) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] paddings, Scalar pad_value)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......
......@@ -2121,7 +2121,7 @@ void OverlapAddInferMeta(const MetaTensor& x,
void PadInferMeta(const MetaTensor& input,
const std::vector<int>& paddings,
float pad_value,
const Scalar& padding_value,
MetaTensor* out,
MetaConfig config) {
auto x_dim = input.dims();
......
......@@ -305,7 +305,7 @@ void OverlapAddInferMeta(const MetaTensor& x,
void PadInferMeta(const MetaTensor& input,
const std::vector<int>& paddings,
float pad_value,
const Scalar& padding_value,
MetaTensor* out,
MetaConfig config = MetaConfig());
......
......@@ -13,6 +13,8 @@
// limitations under the License.
#pragma once
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/padding.h"
namespace phi {
......@@ -20,7 +22,7 @@ template <typename T, typename Context>
void PadGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const std::vector<int>& paddings,
float pad_value,
const Scalar& pad_value,
DenseTensor* d_x) {
if (d_x == nullptr) {
return;
......
......@@ -16,6 +16,7 @@
#include <utility>
#include <vector>
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/padding.h"
namespace phi {
......@@ -23,11 +24,11 @@ template <typename T, typename Context>
void PadKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& paddings,
float pad_value,
const Scalar& pad_value,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
int rank = x.dims().size();
funcs::PaddingFunctor<Context, T>(
rank, dev_ctx, paddings, static_cast<T>(pad_value), x, out);
rank, dev_ctx, paddings, pad_value.to<T>(), x, out);
}
} // namespace phi
......@@ -15,6 +15,7 @@
#pragma once
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,6 +24,6 @@ template <typename T, typename Context>
void PadGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const std::vector<int>& paddings,
float pad_value,
const Scalar& pad_value,
DenseTensor* d_x);
} // namespace phi
......@@ -15,6 +15,7 @@
#pragma once
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,6 +24,6 @@ template <typename T, typename Context>
void PadKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& paddings,
float pad_value,
const Scalar& pad_value,
DenseTensor* out);
} // namespace phi
......@@ -7062,6 +7062,10 @@ def pad(x, paddings, pad_value=0., name=None):
'complex128'
], "pad")
check_type(pad_value, 'pad_value', (float, int, Variable), 'pad')
if isinstance(pad_value, int):
pad_value = float(pad_value)
helper = LayerHelper('pad', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
......@@ -7070,7 +7074,7 @@ def pad(x, paddings, pad_value=0., name=None):
outputs={'Out': out},
attrs={
'paddings': paddings,
'pad_value': float(pad_value)
'pad_value': pad_value
})
return out
......
......@@ -14,6 +14,7 @@
from __future__ import print_function
import os
import unittest
import numpy as np
from op_test import OpTest
......@@ -22,6 +23,8 @@ import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from test_attribute_var import UnittestBase
class TestPadOp(OpTest):
......@@ -123,5 +126,68 @@ class TestPadOpError(unittest.TestCase):
fluid.layers.pad(x=data, paddings=[0, 1])
class TestPaddingValueTensor(UnittestBase):
def init_info(self):
self.shapes = [[2, 4]]
self.save_path = os.path.join(self.temp_dir.name, self.path_prefix())
def test_static(self):
main_prog = Program()
starup_prog = Program()
with program_guard(main_prog, starup_prog):
fc = paddle.nn.Linear(4, 10)
x = paddle.randn([2, 4])
x.stop_gradient = False
feat = fc(x) # [2,3,10]
out = self.call_func(feat)
sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out))
self.assertTrue(self.var_prefix() in str(main_prog))
exe = paddle.static.Executor()
exe.run(starup_prog)
res = exe.run(fetch_list=[feat, out])
gt = np.pad(res[0], [1, 1], 'constant', constant_values=[1., 1.])
np.testing.assert_allclose(res[1], gt)
paddle.static.save_inference_model(self.save_path, [x], [feat, out],
exe)
# Test for Inference Predictor
infer_outs = self.infer_prog()
gt = np.pad(infer_outs[0], [1, 1],
'constant',
constant_values=[1., 1.])
np.testing.assert_allclose(infer_outs[1], gt)
def path_prefix(self):
return 'padding_value'
def var_prefix(self):
return "Var["
def call_func(self, x):
padding_value = paddle.assign([1.0])
out = paddle.nn.functional.pad(x,
pad=[1, 1, 1, 1],
value=padding_value,
mode='constant')
return out
class TestPaddingValueTensor2(TestPaddingValueTensor):
def call_func(self, x):
padding_value = paddle.assign([1.0])
# test for int value
tmp = paddle.fluid.layers.pad(x, paddings=[1, 1, 1, 1], pad_value=1)
out = paddle.fluid.layers.pad(x,
paddings=[1, 1, 1, 1],
pad_value=padding_value)
return out
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -26,7 +26,7 @@ from ...tensor.manipulation import unsqueeze
from ...tensor import clip
from ...tensor import sum
from ...tensor import sqrt
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype, check_type
from ...fluid.framework import _varbase_creator, _in_legacy_dygraph, in_dygraph_mode, _non_static_mode
from ...fluid import dygraph_utils
......@@ -1477,6 +1477,10 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
'complex128'
], "pad")
check_type(pad_value, 'pad_value', (float, int, Variable), 'pad')
if isinstance(pad_value, int):
pad_value = float(pad_value)
helper = LayerHelper('pad', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
......@@ -1485,7 +1489,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
outputs={'Out': out},
attrs={
'paddings': paddings,
'pad_value': float(pad_value)
'pad_value': pad_value
})
return out
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册