提交 677e7144 编写于 作者: L liym27 提交者: Aurelius84

fix pow op, support tensor for agument factor. (#19313)

improve pow op according to reviews:
1. Delete unnecessary judgement statements in PowGradOpDescMaker;
2. Improve test of test_api;

overload GetKernelTypeForVar

add stop_gradient=True when attr(factor) is tensor Variable, change examples in API pow.
test=develop,test=document_preview
上级 bd89a273
...@@ -210,7 +210,7 @@ paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], ...@@ -210,7 +210,7 @@ paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'],
paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '6fc86ed23b420c8a0f6c043563cf3937')) paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '6fc86ed23b420c8a0f6c043563cf3937'))
paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308')) paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308'))
paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67')) paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67'))
paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '35fa2b79b1ae6968d4a69788051c1d27')) paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'ca34f88ff61cf2a7f4c97a493d6000d0'))
paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.6666666666666666, 1.7159, None)), ('document', '1e1efad868714425da15c785dfb533a1')) paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.6666666666666666, 1.7159, None)), ('document', '1e1efad868714425da15c785dfb533a1'))
paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591')) paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591'))
paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4')) paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4'))
......
...@@ -483,6 +483,11 @@ class PowOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -483,6 +483,11 @@ class PowOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput("X", "Input of Pow operator"); AddInput("X", "Input of Pow operator");
AddInput("FactorTensor",
"(Tensor<float>, optional). If provided, pow will use this"
"The shape of FactorTensor MUST BE [1]."
"it has higher priority than attr(factor).")
.AsDispensable();
AddOutput("Out", "Output of Pow operator"); AddOutput("Out", "Output of Pow operator");
AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f); AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f);
AddComment(R"DOC( AddComment(R"DOC(
...@@ -778,6 +783,75 @@ DECLARE_INPLACE_OP_INFERER(ActivationGradOpInplaceInference, ...@@ -778,6 +783,75 @@ DECLARE_INPLACE_OP_INFERER(ActivationGradOpInplaceInference,
{framework::GradVarName("Out"), {framework::GradVarName("Out"),
framework::GradVarName("X")}); framework::GradVarName("X")});
class PowGradOpDescMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDesc> Apply() const override {
std::unique_ptr<framework::OpDesc> op(new framework::OpDesc());
op->SetType("pow_grad");
op->SetInput("X", Input("X"));
op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
op->SetInput("FactorTensor", Input("FactorTensor"));
op->SetAttrMap(Attrs());
return op;
}
};
class PowOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out");
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this, "X");
}
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const override {
if (var_name == "FactorTensor") {
return expected_kernel_type;
}
return framework::OpKernelType(expected_kernel_type.data_type_,
tensor.place(), tensor.layout());
}
};
class PowOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
auto out_grad_name = framework::GradVarName("Out");
ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this, framework::GradVarName("Out"));
}
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const override {
if (var_name == "FactorTensor") {
return expected_kernel_type;
}
return framework::OpKernelType(expected_kernel_type.data_type_,
tensor.place(), tensor.layout());
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -907,3 +981,22 @@ REGISTER_OP_CPU_KERNEL( ...@@ -907,3 +981,22 @@ REGISTER_OP_CPU_KERNEL(
ops::SquareDoubleGradKernel<plat::CPUDeviceContext, ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
ops::SquareGradGradFunctor<plat::float16>>); ops::SquareGradGradFunctor<plat::float16>>);
/* ========================================================================== */ /* ========================================================================== */
/* ========================== pow register ============================ */
REGISTER_OPERATOR(
pow, ops::PowOp, ops::PowOpMaker, ops::ActivationOpInferVarType,
ops::PowGradOpDescMaker,
std::conditional<ops::CanInplaceAct<ops::PowGradFunctor<float>>(),
::paddle::framework::SingleOpInplaceInToOut, void>::type);
REGISTER_OPERATOR(pow_grad, ops::PowOpGrad,
ops::ActivationGradOpInplaceInference);
REGISTER_OP_CPU_KERNEL(
pow, ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<float>>,
ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<double>>);
REGISTER_OP_CPU_KERNEL(
pow_grad,
ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<float>>,
ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<double>>);
/* ========================================================================== */
...@@ -86,3 +86,17 @@ REGISTER_OP_CUDA_KERNEL( ...@@ -86,3 +86,17 @@ REGISTER_OP_CUDA_KERNEL(
ops::SquareDoubleGradKernel<plat::CUDADeviceContext, ops::SquareDoubleGradKernel<plat::CUDADeviceContext,
ops::SquareGradGradFunctor<plat::float16>>); ops::SquareGradGradFunctor<plat::float16>>);
/* ========================================================================== */ /* ========================================================================== */
/* ========================== pow register ============================ */
REGISTER_OP_CUDA_KERNEL(
pow, ops::PowKernel<plat::CUDADeviceContext, ops::PowFunctor<float>>,
ops::PowKernel<plat::CUDADeviceContext, ops::PowFunctor<double>>,
ops::PowKernel<plat::CUDADeviceContext, ops::PowFunctor<plat::float16>>);
REGISTER_OP_CUDA_KERNEL(
pow_grad,
ops::PowGradKernel<plat::CUDADeviceContext, ops::PowGradFunctor<float>>,
ops::PowGradKernel<plat::CUDADeviceContext, ops::PowGradFunctor<double>>,
ops::PowGradKernel<plat::CUDADeviceContext,
ops::PowGradFunctor<plat::float16>>);
/* ========================================================================== */
...@@ -1589,6 +1589,97 @@ class SqrtDoubleGradKernel ...@@ -1589,6 +1589,97 @@ class SqrtDoubleGradKernel
} }
}; };
template <typename DeviceContext, typename Functor>
class PowKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
public:
using T = typename Functor::ELEMENT_TYPE;
void Compute(const framework::ExecutionContext& context) const override {
const framework::Tensor* X = nullptr;
framework::Tensor* Out = nullptr;
ExtractActivationTensor(context, &X, &Out);
Out->mutable_data<T>(context.GetPlace());
auto x = framework::EigenVector<T>::Flatten(detail::Ref(X));
auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
auto* place =
context.template device_context<DeviceContext>().eigen_device();
Functor functor;
auto attrs = functor.GetAttrs();
for (auto& attr : attrs) {
*attr.second = context.Attr<float>(attr.first);
}
// get FactorTensor
auto* factor_tensor = context.HasInput("FactorTensor")
? context.Input<framework::Tensor>("FactorTensor")
: nullptr;
if (factor_tensor) {
auto* factor_data = factor_tensor->data<float>();
framework::Tensor cpu_factor_tensor;
if (platform::is_gpu_place(factor_tensor->place())) {
TensorCopySync(*factor_tensor, platform::CPUPlace(),
&cpu_factor_tensor);
factor_data = cpu_factor_tensor.data<float>();
}
auto factor =
std::vector<float>(factor_data, factor_data + factor_tensor->numel());
PADDLE_ENFORCE_EQ(factor.size(), 1,
"The shape of factor(tensor) MUST BE [1].");
for (auto& attr : attrs) {
*attr.second = factor[0];
}
}
functor(*place, x, out);
}
};
template <typename DeviceContext, typename Functor>
class PowGradKernel
: public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
public:
using T = typename Functor::ELEMENT_TYPE;
void Compute(const framework::ExecutionContext& context) const override {
const framework::Tensor *X, *Out, *dOut;
framework::Tensor* dX = nullptr;
X = Out = dOut = nullptr;
ExtractActivationGradTensor<Functor::FwdDeps()>(context, &X, &Out, &dOut,
&dX);
dX->mutable_data<T>(context.GetPlace());
auto dout = framework::EigenVector<T>::Flatten(detail::Ref(dOut));
auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
auto dx = framework::EigenVector<T>::Flatten(detail::Ref(dX));
auto x = framework::EigenVector<T>::Flatten(detail::Ref(X));
auto* place =
context.template device_context<DeviceContext>().eigen_device();
Functor functor;
auto attrs = functor.GetAttrs();
for (auto& attr : attrs) {
*attr.second = context.Attr<float>(attr.first);
}
// get FactorTensor
auto* factor_tensor =
context.HasInput("FactorTensor")
? context.Input<framework::LoDTensor>("FactorTensor")
: nullptr;
if (factor_tensor) {
auto* factor_data = factor_tensor->data<float>();
framework::Tensor cpu_factor_tensor;
if (platform::is_gpu_place(factor_tensor->place())) {
TensorCopySync(*factor_tensor, platform::CPUPlace(),
&cpu_factor_tensor);
factor_data = cpu_factor_tensor.data<float>();
}
auto factor =
std::vector<float>(factor_data, factor_data + factor_tensor->numel());
PADDLE_ENFORCE_EQ(factor.size(), 1,
"The shape of factor(tensor) MUST BE [1].");
for (auto& attr : attrs) {
*attr.second = factor[0];
}
}
functor(*place, x, out, dout, dx);
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -1613,7 +1704,6 @@ class SqrtDoubleGradKernel ...@@ -1613,7 +1704,6 @@ class SqrtDoubleGradKernel
__macro(log, Log, LogFunctor, LogGradFunctor); \ __macro(log, Log, LogFunctor, LogGradFunctor); \
__macro(brelu, BRelu, BReluFunctor, BReluGradFunctor); \ __macro(brelu, BRelu, BReluFunctor, BReluGradFunctor); \
__macro(soft_relu, SoftRelu, SoftReluFunctor, SoftReluGradFunctor); \ __macro(soft_relu, SoftRelu, SoftReluFunctor, SoftReluGradFunctor); \
__macro(pow, Pow, PowFunctor, PowGradFunctor); \
__macro(stanh, STanh, STanhFunctor, STanhGradFunctor); \ __macro(stanh, STanh, STanhFunctor, STanhGradFunctor); \
__macro(softplus, Softplus, SoftplusFunctor, SoftplusGradFunctor); \ __macro(softplus, Softplus, SoftplusFunctor, SoftplusGradFunctor); \
__macro(softsign, Softsign, SoftsignFunctor, SoftsignGradFunctor); \ __macro(softsign, Softsign, SoftsignFunctor, SoftsignGradFunctor); \
......
...@@ -9622,7 +9622,7 @@ def pow(x, factor=1.0, name=None): ...@@ -9622,7 +9622,7 @@ def pow(x, factor=1.0, name=None):
${comment} ${comment}
Args: Args:
x(${x_type}): ${x_comment} x(${x_type}): ${x_comment}
factor(${factor_type}|1.0): ${factor_comment} factor(float|Variable|1.0): The exponential factor of Pow.
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. will be named automatically.
...@@ -9634,16 +9634,28 @@ def pow(x, factor=1.0, name=None): ...@@ -9634,16 +9634,28 @@ def pow(x, factor=1.0, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32") x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32")
y = fluid.layers.pow(x, factor=2.0)
# example 1: argument factor is float
y_1 = fluid.layers.pow(x, factor=2.0)
# example 2: argument factor is Variable
factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
y_2 = fluid.layers.pow(x, factor=factor_tensor)
""" """
helper = LayerHelper('pow', **locals()) helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {}
if isinstance(factor, Variable):
factor.stop_gradient = True
inputs['FactorTensor'] = factor
else:
attrs['factor'] = factor
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='pow', type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
inputs={'X': x},
outputs={'Out': out},
attrs={'factor': factor})
return out return out
......
...@@ -259,7 +259,7 @@ class TestFloor(TestActivation): ...@@ -259,7 +259,7 @@ class TestFloor(TestActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
# the gradient on floor, ceil, round is undefined. # the gradient on floor, ceil, round is undefined.
# we return zero as gradient, but the numpy return nan # we return zero as gradient, but the numpy return nan
# The same reason with TestFloor # The same reason with TestFloor
def test_check_grad(self): def test_check_grad(self):
pass pass
...@@ -588,6 +588,51 @@ class TestPow(TestActivation): ...@@ -588,6 +588,51 @@ class TestPow(TestActivation):
self.check_grad(['X'], 'Out', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestPow_factor_tensor(TestActivation):
def setUp(self):
self.op_type = "pow"
self.init_dtype()
x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
out = np.power(x, 3)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(x),
'FactorTensor': np.array([3.0]).astype("float32")
}
self.attrs = {}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.02)
def test_api(self):
import paddle.fluid as fluid
input = np.random.uniform(1, 2, [11, 17]).astype("float32")
x = fluid.layers.data(
name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
factor_1 = 2.0
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
out_1 = fluid.layers.pow(x, factor=factor_1)
out_2 = fluid.layers.pow(x, factor=factor_2)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={"x": input},
fetch_list=[out_1, out_2])
assert np.array_equal(res_1, np.power(input, 2))
assert np.array_equal(res_2, np.power(input, 3))
class TestSTanh(TestActivation): class TestSTanh(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "stanh" self.op_type = "stanh"
...@@ -791,6 +836,7 @@ create_test_act_fp16_class(TestReciprocal) ...@@ -791,6 +836,7 @@ create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog) create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare) create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2) create_test_act_fp16_class(TestPow, atol=5e-2)
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
create_test_act_fp16_class(TestSTanh, grad_atol=0.9) create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus) create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign) create_test_act_fp16_class(TestSoftsign)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册