diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.h b/paddle/fluid/operators/elementwise/elementwise_pow_op.h index 535d838209d0e418554d7f6facc932af6fff6185..8cc4b166fc491e56d011560e02cc102f845ebf64 100755 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.h @@ -23,20 +23,17 @@ namespace operators { template struct PowFunctor { inline HOSTDEVICE T operator()(T a, T b) const { - // TODO(wujionghao): A potential speed improvement is supporting different - // types in C++. - // #ifdef __CUDA_ARCH__ - // // On CUDAPlace, std::pow(3, 1) calls pow(float, float), and - // // it will return a float number like 2.99... , which floor to 2 - // // when cast to int by default and it is wrong. - // // Use llrint to cast it to the nearest integer, which is 3. - // if (std::is_integral::value) { - // return std::llrint(std::pow(a, b)); - // } - // #endif +// TODO(wujionghao): A potential speed improvement is supporting different +// types in C++. +#ifdef __CUDA_ARCH__ + // On CUDAPlace, std::pow(3, 1) calls pow(float, float), and + // it will return a float number like 2.99... , which floor to 2 + // when cast to int by default and it is wrong. + // Use llrint to cast it to the nearest integer, which is 3. if (std::is_integral::value) { return std::llrint(std::pow(a, b)); } +#endif return std::pow(a, b); } }; diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index f6d5c83ef20ff7a52cc8cf7477065efe05d7132e..3a5dcd02fd786594ba1d858eee4d5e220becaada 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -172,12 +172,12 @@ def pow(x, y, name=None): x = paddle.to_tensor([1, 2, 3]) y = 2 res = paddle.pow(x, y) - print(res.numpy()) # [1 4 9] + print(res) # [1 4 9] # example 2: y is a Tensor y = paddle.full(shape=[1], fill_value=2, dtype='float32') res = paddle.pow(x, y) - print(res.numpy()) # [1 4 9] + print(res) # [1 4 9] """ # in dynamic graph mode @@ -185,14 +185,9 @@ def pow(x, y, name=None): if isinstance(y, (int, float)): return core.ops.pow(x, 'factor', y) elif isinstance(y, (paddle.Tensor, Variable)): - if x.dtype != y.dtype: y = cast(y, dtype='float64') x = cast(x, dtype='float64') - out_dygraph = _elementwise_op_in_dygraph( - x, y, axis=-1, act=None, op_name='elementwise_pow') - return out_dygraph - return _elementwise_op_in_dygraph( x, y, axis=-1, act=None, op_name='elementwise_pow') else: @@ -213,9 +208,7 @@ def pow(x, y, name=None): if x.dtype != y.dtype: y = cast(y, dtype='float64') x = cast(x, dtype='float64') - out = helper.create_variable_for_type_inference(dtype=x.dtype) - else: - out = helper.create_variable_for_type_inference(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) return _elementwise_op(LayerHelper('elementwise_pow', **locals())) else: raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))