Created by: T8T9
PR types
Function optimization
PR changes
OPs
Describe
合并 elementwise_pow 和 pow,命名为 power,完全兼容elementwise_pow 和 pow。
测试代码
import paddle
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
# x ** y
def run_power(mode, x, y):
# dynamic mode
if mode=='dynamic':
paddle.enable_imperative()
# y is scalar
if isinstance(y, (int, long, float)):
x_ = paddle.imperative.to_variable(x)
y_ = y
res = paddle.power(x_, y)
print("{}, scalar, {} ** {} = {}".format(mode, x, y, res.numpy()))
# y is tensor
else:
x_ = paddle.imperative.to_variable(x)
y_ = paddle.imperative.to_variable(y)
res = paddle.power(x_, y_)
print("{}, tensor, {} ** {} = {}".format(mode, x, y, res.numpy()))
# static mode
elif mode=='static':
paddle.disable_imperative()
# y is scalar
if isinstance(y, (int, long, float)):
with program_guard(Program(), Program()):
x_ = paddle.nn.data(name="x", shape=x.shape, dtype=x.dtype)
y_ = y
res = paddle.power(x_, y_)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
outs = exe.run(feed={'x':x}, fetch_list=[res])
print("{}, scalar, {} ** {} = {}".format(mode, x, y, outs[0]))
# y is tensor
else:
with program_guard(Program(), Program()):
x_ = paddle.nn.data(name="x", shape=x.shape, dtype=x.dtype)
y_ = paddle.nn.data(name="y", shape=y.shape, dtype=y.dtype)
res = paddle.power(x_, y_)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
outs = exe.run(feed={'x':x, 'y': y}, fetch_list=[res])
print("{}, tensor, {} ** {} = {}".format(mode, x, y, outs[0]))
def batch_run(x, y):
run_power('dynamic', x, y)
run_power('dynamic', x, y)
run_power('static', x, y)
run_power('static', x, y)
print('-'*10)
x = np.array([1.5, 2.5, 3.5], dtype=np.int32)
y = 2.5
batch_run(x, y)
x = np.array([1.5, 2.5, 3.5], dtype=np.float32)
y = 2.5
batch_run(x, y)
x = np.array([1.5, 2.5, 3.5], dtype=np.float32)
y = np.array([2.5], dtype=np.float32)
batch_run(x, y)
x = np.array([1.5, 2.5, 3.5], dtype=np.int32)
y = np.array([2.5], dtype=np.float32)
try:
batch_run(x, y)
except Exception as e:
print(e)
输出:
dynamic, scalar, [1 2 3] ** 2.5 = [1 4 9]
dynamic, scalar, [1 2 3] ** 2.5 = [1 4 9]
static, scalar, [1 2 3] ** 2.5 = [1 4 9]
static, scalar, [1 2 3] ** 2.5 = [1 4 9]
----------
dynamic, scalar, [1.5 2.5 3.5] ** 2.5 = [ 2.755676 9.882117 22.917652]
dynamic, scalar, [1.5 2.5 3.5] ** 2.5 = [ 2.755676 9.882117 22.917652]
static, scalar, [1.5 2.5 3.5] ** 2.5 = [ 2.755676 9.882117 22.917652]
static, scalar, [1.5 2.5 3.5] ** 2.5 = [ 2.755676 9.882117 22.917652]
----------
dynamic, tensor, [1.5 2.5 3.5] ** [2.5] = [ 2.755676 9.882117 22.917652]
dynamic, tensor, [1.5 2.5 3.5] ** [2.5] = [ 2.755676 9.882117 22.917652]
static, tensor, [1.5 2.5 3.5] ** [2.5] = [ 2.755676 9.882117 22.917652]
static, tensor, [1.5 2.5 3.5] ** [2.5] = [ 2.755676 9.882117 22.917652]
----------
--------------------------------------------
C++ Call Stacks (More useful to developers):
--------------------------------------------
0 std::string paddle::platform::GetTraceBackString<std::string const&>(std::string const&, char const*, int)
1 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int)
2 int const* paddle::framework::Tensor::data<int>() const
3 void paddle::operators::ElementwiseComputeEx<paddle::operators::PowFunctor<int>, paddle::platform::CPUDeviceContext, int, int>(paddle::framework::ExecutionContext const&, paddle::framework::Tensor const*, paddle::framework::Tensor const*, int, paddle::operators::PowFunctor<int>, paddle::framework::Tensor*)
4 paddle::operators::ElementwisePowKernel<paddle::platform::CPUDeviceContext, int>::Compute(paddle::framework::ExecutionContext const&) const
5 std::_Function_handler<void (paddle::framework::ExecutionContext const&), paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CPUPlace, false, 2ul, paddle::operators::ElementwisePowKernel<paddle::platform::CPUDeviceContext, float>, paddle::operators::ElementwisePowKernel<paddle::platform::CPUDeviceContext, double>, paddle::operators::ElementwisePowKernel<paddle::platform::CPUDeviceContext, int>, paddle::operators::ElementwisePowKernel<paddle::platform::CPUDeviceContext, long> >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&)
6 paddle::imperative::PreparedOp::Run(paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&)
7 paddle::imperative::OpBase::Run(paddle::framework::OperatorBase const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&, paddle::platform::Place const&)
8 paddle::imperative::Tracer::TraceOp(std::string const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap, paddle::platform::Place const&, bool)
9 paddle::imperative::Tracer::TraceOp(std::string const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap)
----------------------
Error Message Summary:
----------------------
InvalidArgumentError: Tensor holds the wrong type, it holds float, but desires to be int.
[Hint: Expected valid == true, but received valid:0 != true:1.] at (/paddle/paddle/fluid/framework/tensor_impl.h:33)
[operator < elementwise_pow > error]