未验证 提交 5bd84b22 编写于 作者: S ShenLiang 提交者: GitHub

revert divide (#27202)

上级 60c3ef3a
...@@ -49,8 +49,6 @@ REGISTER_OP_WITHOUT_GRADIENT(elementwise_floordiv, ops::ElementwiseOp, ...@@ -49,8 +49,6 @@ REGISTER_OP_WITHOUT_GRADIENT(elementwise_floordiv, ops::ElementwiseOp,
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_floordiv, elementwise_floordiv,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, int>, ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext,
int64_t>); int64_t>);
...@@ -19,7 +19,5 @@ namespace plat = paddle::platform; ...@@ -19,7 +19,5 @@ namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL(
elementwise_floordiv, elementwise_floordiv,
ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, int>, ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, int64_t>); ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, int64_t>);
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#pragma once #pragma once
#include <math.h>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
...@@ -62,15 +61,8 @@ void elementwise_floor_div(const framework::ExecutionContext &ctx, ...@@ -62,15 +61,8 @@ void elementwise_floor_div(const framework::ExecutionContext &ctx,
const framework::Tensor *x, const framework::Tensor *x,
const framework::Tensor *y, framework::Tensor *z) { const framework::Tensor *y, framework::Tensor *z) {
int axis = ctx.Attr<int>("axis"); int axis = ctx.Attr<int>("axis");
auto x_dims = x->dims(); ElementwiseComputeEx<FloorDivFunctor<T>, DeviceContext, T>(
auto y_dims = y->dims(); ctx, x, y, axis, FloorDivFunctor<T>(), z);
if (x_dims.size() >= y_dims.size()) {
ElementwiseComputeEx<FloorDivFunctor<T>, DeviceContext, T>(
ctx, x, y, axis, FloorDivFunctor<T>(), z);
} else {
ElementwiseComputeEx<InverseFloorDivFunctor<T>, DeviceContext, T>(
ctx, x, y, axis, InverseFloorDivFunctor<T>(), z);
}
} }
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
......
...@@ -19,7 +19,6 @@ from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator ...@@ -19,7 +19,6 @@ from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
from ..layers.layer_function_generator import OpProtoHolder from ..layers.layer_function_generator import OpProtoHolder
from ..layers import common_methods from ..layers import common_methods
from . import to_variable, no_grad from . import to_variable, no_grad
import paddle
import numpy as np import numpy as np
import six import six
...@@ -163,26 +162,6 @@ def monkey_patch_math_varbase(): ...@@ -163,26 +162,6 @@ def monkey_patch_math_varbase():
def _scalar_div_(var, value): def _scalar_div_(var, value):
return _scalar_elementwise_op_(var, 1.0 / value, 0.0) return _scalar_elementwise_op_(var, 1.0 / value, 0.0)
# TODO(shenliang03): currently, it supports divide, floor_divide, remainder
# for binary operator by using the api to achieve the type promotion
def _binary_method_creator_(op_type, reverse=False):
import paddle
def __impl__(self, other_var):
import paddle
op = getattr(paddle, op_type)
if reverse:
return op(other_var, self)
else:
return op(self, other_var)
__impl__.__doc__ = """
See paddle.{}""".format(op_type)
__impl__.__name__ = op_type
return __impl__
# for binary operator such as elementwise, compare # for binary operator such as elementwise, compare
def _binary_creator_(method_name, def _binary_creator_(method_name,
op_type, op_type,
...@@ -281,20 +260,22 @@ def monkey_patch_math_varbase(): ...@@ -281,20 +260,22 @@ def monkey_patch_math_varbase():
## a*b == b*a. Do not need to reverse explicitly ## a*b == b*a. Do not need to reverse explicitly
('__rmul__', ('__rmul__',
_binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
('__div__', _binary_creator_('__div__', 'elementwise_div', False,
_scalar_div_)),
('__truediv__', _binary_creator_('__truediv__', 'elementwise_div',
False, _scalar_div_)),
('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True,
None)),
('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True, ('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True,
None)), None)),
('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
None)), None)),
('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True, ('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
None)), None)),
# These binary use paddle.optype ('__floordiv__', _binary_creator_('__floordiv__',
('__div__', _binary_method_creator_('divide', False)), 'elementwise_floordiv', False, None)),
('__truediv__', _binary_method_creator_('divide', False)), ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False,
('__rtruediv__', _binary_method_creator_('divide', True)), None)),
('__rdiv__', _binary_method_creator_('divide', True)),
('__floordiv__', _binary_method_creator_('floor_divide', False)),
('__rfloordiv__', _binary_method_creator_('floor_divide', True)),
('__mod__', _binary_method_creator_('remainder', False)),
## for logical compare ## for logical compare
('__eq__', _binary_creator_('__eq__', 'equal', False, None)), ('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)), ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
......
...@@ -16,7 +16,6 @@ from __future__ import print_function ...@@ -16,7 +16,6 @@ from __future__ import print_function
import warnings import warnings
import inspect import inspect
import paddle
from .. import core from .. import core
from ..framework import Variable, unique_name from ..framework import Variable, unique_name
...@@ -46,7 +45,6 @@ EXPRESSION_MAP = { ...@@ -46,7 +45,6 @@ EXPRESSION_MAP = {
"__pow__": "A ** B", "__pow__": "A ** B",
"__rpow__": "A **= B", "__rpow__": "A **= B",
"__floordiv__": "A //B", "__floordiv__": "A //B",
"__rfloordiv__": "A //= B",
"__mod__": "A % B", "__mod__": "A % B",
"__eq__": "A == B", "__eq__": "A == B",
"__ne__": "A != B", "__ne__": "A != B",
...@@ -235,25 +233,6 @@ def monkey_patch_variable(): ...@@ -235,25 +233,6 @@ def monkey_patch_variable():
def _scalar_div_(var, value): def _scalar_div_(var, value):
return _scalar_op_(var, 1.0 / value, 0.0) return _scalar_op_(var, 1.0 / value, 0.0)
# TODO(shenliang03): currently, it supports divide, floor_divide, remainder
# for binary operator by using the api to achieve the type promotion
def _binary_method_creator_(op_type, reverse=False):
import paddle
def __impl__(self, other_var):
op = getattr(paddle, op_type)
if reverse:
return op(other_var, self)
else:
return op(self, other_var)
__impl__.__doc__ = """
See paddle.{}""".format(op_type)
__impl__.__name__ = op_type
return __impl__
def _binary_creator_(method_name, def _binary_creator_(method_name,
op_type, op_type,
reverse=False, reverse=False,
...@@ -360,18 +339,22 @@ def monkey_patch_variable(): ...@@ -360,18 +339,22 @@ def monkey_patch_variable():
# a*b == b*a. Do not need to reverse explicitly # a*b == b*a. Do not need to reverse explicitly
('__rmul__', ('__rmul__',
_binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
('__div__', _binary_creator_('__div__', 'elementwise_div', False,
_scalar_div_)),
('__truediv__', _binary_creator_('__truediv__', 'elementwise_div',
False, _scalar_div_)),
('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True,
None)),
('__rtruediv__', _binary_creator_('__rtruediv__', 'elementwise_div',
True, None)),
('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
None)), None)),
('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True, ('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
None)), None)),
# These binary use paddle.optype ('__floordiv__', _binary_creator_('__floordiv__',
('__div__', _binary_method_creator_('divide', False)), 'elementwise_floordiv', False, None)),
('__rdiv__', _binary_method_creator_('divide', True)), ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False,
('__truediv__', _binary_method_creator_('divide', False)), None)),
('__rtruediv__', _binary_method_creator_('divide', True)),
('__floordiv__', _binary_method_creator_('floor_divide', False)),
('__rfloordiv__', _binary_method_creator_('floor_divide', True)),
('__mod__', _binary_method_creator_('remainder', False)),
# for logical compare # for logical compare
('__eq__', _binary_creator_('__eq__', 'equal', False, None)), ('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)), ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
......
...@@ -113,8 +113,8 @@ class TranspilerAsyncLRDecayTest(unittest.TestCase): ...@@ -113,8 +113,8 @@ class TranspilerAsyncLRDecayTest(unittest.TestCase):
["listen_and_serv"]) ["listen_and_serv"])
# block1: sum,cast,scale,floor,fill_constant,elementwise_pow,scale # block1: sum,cast,scale,floor,fill_constant,elementwise_pow,scale
self.assertEqual([op.type for op in pserver.blocks[1].ops], [ self.assertEqual([op.type for op in pserver.blocks[1].ops], [
"sum", "cast", "fill_constant", "elementwise_div", "floor", "sum", "cast", "scale", "floor", "fill_constant", "elementwise_pow",
"fill_constant", "elementwise_pow", "scale" "scale"
]) ])
# block1~2: optimize pass # block1~2: optimize pass
......
...@@ -240,124 +240,25 @@ class TestElementwiseDivBroadcast(unittest.TestCase): ...@@ -240,124 +240,25 @@ class TestElementwiseDivBroadcast(unittest.TestCase):
self.assertEqual((out_result == (2 / x)).all(), True) self.assertEqual((out_result == (2 / x)).all(), True)
class TestDivideAPI(unittest.TestCase): class TestDivideOp(unittest.TestCase):
def setUp(self): def test_name(self):
paddle.set_default_dtype("float64") with fluid.program_guard(fluid.Program()):
self.places = [fluid.CPUPlace()] x = fluid.data(name="x", shape=[2, 3], dtype="float32")
if core.is_compiled_with_cuda(): y = fluid.data(name='y', shape=[2, 3], dtype='float32')
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
# rule 1
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = np.array([1, 2, 3])
self.assertRaises(TypeError, paddle.divide, x=x, y=y)
# rule 2: both the inputs are not Tensor
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = 2
y = 4
res = paddle.divide(x, y)
exe = fluid.Executor(place)
np_z = exe.run(fluid.default_main_program(),
feed={},
fetch_list=[res])
self.assertEqual(np_z[0] == 0.5, True)
# rule 3:
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = fluid.data(name="y", shape=[3], dtype="float32")
self.assertRaises(TypeError, paddle.divide, x=x, y=y)
# rule 4: x is Tensor, y is scalar
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = 2
exe = fluid.Executor(place)
res = x / y
np_z = exe.run(fluid.default_main_program(),
feed={"x": np.array([2, 3, 4]).astype('float64')},
fetch_list=[res])
z_expected = np.array([1., 1.5, 2.])
self.assertEqual((np_z[0] == z_expected).all(), True)
# rule 5: y is Tensor, x is scalar
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = 2
exe = fluid.Executor(place)
res = y / x
np_z = exe.run(fluid.default_main_program(),
feed={"x": np.array([2, 8, 4]).astype('float64')},
fetch_list=[res])
z_expected = np.array([1., 0.25, 0.5])
self.assertEqual((np_z[0] == z_expected).all(), True)
# rule 6: y is Tensor, x is Tensor
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = fluid.data(name="y", shape=[3], dtype="float64")
exe = fluid.Executor(place)
res = x / y
np_z = exe.run(fluid.default_main_program(),
feed={
"x": np.array([2, 3, 4]).astype('float64'),
"y": np.array([1, 5, 2]).astype('float64')
},
fetch_list=[res])
z_expected = np.array([2., 0.6, 2.])
self.assertEqual((np_z[0] == z_expected).all(), True)
def test_static(self): y_1 = paddle.divide(x, y, name='div_res')
for place in self.places: self.assertEqual(('div_res' in y_1.name), True)
self.check_static_result(place=place)
def test_dygraph(self): def test_dygraph(self):
for place in self.places: with fluid.dygraph.guard():
with fluid.dygraph.guard(place): np_x = np.array([2, 3, 4]).astype('float64')
# rule 1 : avoid numpy.ndarray np_y = np.array([1, 5, 2]).astype('float64')
np_x = np.array([2, 3, 4]) x = paddle.to_tensor(np_x)
np_y = np.array([1, 5, 2]) y = paddle.to_tensor(np_y)
x = paddle.to_tensor(np_x) z = paddle.divide(x, y)
self.assertRaises(TypeError, paddle.divide, x=x, y=np_y) np_z = z.numpy()
z_expected = np.array([2., 0.6, 2.])
# rule 2: both the inputs are not Tensor self.assertEqual((np_z == z_expected).all(), True)
z = paddle.divide(3, 2)
self.assertEqual(z.numpy()[0] == 1.5, True)
# rule 3: both the inputs are Tensor
np_x = np.array([2, 3, 4])
np_y = np.array([1, 5, 2])
x = paddle.to_tensor(np_x, dtype="float32")
y = paddle.to_tensor(np_y, dtype="float64")
self.assertRaises(TypeError, paddle.divide, x=x, y=y)
# rule 4: x is Tensor, y is scalar
np_x = np.array([2, 3, 4])
x = paddle.to_tensor(np_x, dtype="int32")
y = 2
z = x / y
z_expected = np.array([1., 1.5, 2.])
self.assertEqual((z_expected == z.numpy()).all(), True)
# rule 5: y is Tensor, x is scalar
np_x = np.array([2, 1, 4])
x = paddle.to_tensor(np_x, dtype="int32")
y = 2
z = y / x
z_expected = np.array([1., 2., 0.5])
self.assertEqual((z_expected == z.numpy()).all(), True)
# rule 6: y is Tensor, x is Tensor
np_x = np.array([2, 3, 4])
np_y = np.array([1, 5, 2])
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = x / y
z_expected = np.array([2., 0.6, 2.])
self.assertEqual((z_expected == z.numpy()).all(), True)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -58,13 +58,6 @@ class TestElementwiseModOp(OpTest): ...@@ -58,13 +58,6 @@ class TestElementwiseModOp(OpTest):
pass pass
class TestElementwiseModOpInverse(TestElementwiseModOp):
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype)
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
class TestElementwiseModOp_scalar(TestElementwiseModOp): class TestElementwiseModOp_scalar(TestElementwiseModOp):
def init_input_output(self): def init_input_output(self):
scale_x = random.randint(0, 100000000) scale_x = random.randint(0, 100000000)
...@@ -74,124 +67,25 @@ class TestElementwiseModOp_scalar(TestElementwiseModOp): ...@@ -74,124 +67,25 @@ class TestElementwiseModOp_scalar(TestElementwiseModOp):
self.out = np.floor_divide(self.x, self.y) self.out = np.floor_divide(self.x, self.y)
class TestFloorDivideAPI(unittest.TestCase): class TestFloorDivideOp(unittest.TestCase):
def setUp(self): def test_name(self):
paddle.set_default_dtype("float64") with fluid.program_guard(fluid.Program()):
self.places = [fluid.CPUPlace()] x = fluid.data(name="x", shape=[2, 3], dtype="int64")
if core.is_compiled_with_cuda(): y = fluid.data(name='y', shape=[2, 3], dtype='int64')
self.places.append(fluid.CUDAPlace(0))
y_1 = paddle.floor_divide(x, y, name='div_res')
def check_static_result(self, place): self.assertEqual(('div_res' in y_1.name), True)
# rule 1
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = np.array([1, 2, 3])
self.assertRaises(TypeError, paddle.floor_divide, x=x, y=y)
# rule 2: both the inputs are not Tensor
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = 2
y = 4
res = paddle.floor_divide(x, y)
exe = fluid.Executor(place)
np_z = exe.run(fluid.default_main_program(),
feed={},
fetch_list=[res])
self.assertEqual(np_z[0] == 0., True)
# rule 3:
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = fluid.data(name="y", shape=[3], dtype="float32")
self.assertRaises(TypeError, paddle.floor_divide, x=x, y=y)
# rule 4: x is Tensor, y is scalar
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = 2
exe = fluid.Executor(place)
res = x // y
np_z = exe.run(fluid.default_main_program(),
feed={"x": np.array([2, 3, 4]).astype('float64')},
fetch_list=[res])
z_expected = np.array([1., 1., 2.])
self.assertEqual((np_z[0] == z_expected).all(), True)
# rule 5: y is Tensor, x is scalar
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = 2
exe = fluid.Executor(place)
res = y // x
np_z = exe.run(fluid.default_main_program(),
feed={"x": np.array([2, 8, 4]).astype('float64')},
fetch_list=[res])
z_expected = np.array([1., 0., 0.])
self.assertEqual((np_z[0] == z_expected).all(), True)
# rule 6: y is Tensor, x is Tensor
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = fluid.data(name="y", shape=[3], dtype="float64")
exe = fluid.Executor(place)
res = x // y
np_z = exe.run(fluid.default_main_program(),
feed={
"x": np.array([2, 3, 4]).astype('float64'),
"y": np.array([1, 5, 2]).astype('float64')
},
fetch_list=[res])
z_expected = np.array([2., 0., 2.])
self.assertEqual((np_z[0] == z_expected).all(), True)
def test_static(self):
for place in self.places:
self.check_static_result(place=place)
def test_dygraph(self): def test_dygraph(self):
for place in self.places: with fluid.dygraph.guard():
with fluid.dygraph.guard(place): np_x = np.array([2, 3, 8, 7]).astype('int64')
# rule 1 : avoid numpy.ndarray np_y = np.array([1, 5, 3, 3]).astype('int64')
np_x = np.array([2, 3, 4]) x = paddle.to_tensor(np_x)
np_y = np.array([1, 5, 2]) y = paddle.to_tensor(np_y)
x = paddle.to_tensor(np_x) z = paddle.floor_divide(x, y)
self.assertRaises(TypeError, paddle.floor_divide, x=x, y=np_y) np_z = z.numpy()
z_expected = np.array([2, 0, 2, 2])
# rule 2: both the inputs are not Tensor self.assertEqual((np_z == z_expected).all(), True)
z = paddle.floor_divide(3, 2)
self.assertEqual(z.numpy()[0] == 1., True)
# rule 3: both the inputs are Tensor
np_x = np.array([2, 3, 4])
np_y = np.array([1, 5, 2])
x = paddle.to_tensor(np_x, dtype="float32")
y = paddle.to_tensor(np_y, dtype="float64")
self.assertRaises(TypeError, paddle.floor_divide, x=x, y=y)
# rule 4: x is Tensor, y is scalar
np_x = np.array([2, 3, 4])
x = paddle.to_tensor(np_x, dtype="int32")
y = 2
z = x // y
z_expected = np.array([1, 1, 2])
self.assertEqual((z_expected == z.numpy()).all(), True)
# rule 5: y is Tensor, x is scalar
np_x = np.array([2, 1, 4])
x = paddle.to_tensor(np_x, dtype="int32")
y = 2
z = y // x
z_expected = np.array([1, 2, 0])
self.assertEqual((z_expected == z.numpy()).all(), True)
# rule 6: y is Tensor, x is Tensor
np_x = np.array([2, 3, 4])
np_y = np.array([1, 5, 2])
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = x // y
z_expected = np.array([2., 0., 2.])
self.assertEqual((z_expected == z.numpy()).all(), True)
with fluid.dygraph.guard(fluid.CPUPlace()): with fluid.dygraph.guard(fluid.CPUPlace()):
# divide by zero # divide by zero
......
...@@ -84,149 +84,41 @@ class TestElementwiseModOpDouble(TestElementwiseModOpFloat): ...@@ -84,149 +84,41 @@ class TestElementwiseModOpDouble(TestElementwiseModOpFloat):
self.dtype = np.float64 self.dtype = np.float64
class TestRemainderAPI(unittest.TestCase): class TestRemainderOp(unittest.TestCase):
def setUp(self): def test_name(self):
paddle.set_default_dtype("float64") with fluid.program_guard(fluid.Program()):
self.places = [fluid.CPUPlace()] x = fluid.data(name="x", shape=[2, 3], dtype="int64")
if core.is_compiled_with_cuda(): y = fluid.data(name='y', shape=[2, 3], dtype='int64')
self.places.append(fluid.CUDAPlace(0))
y_1 = paddle.remainder(x, y, name='div_res')
def check_static_result(self, place): self.assertEqual(('div_res' in y_1.name), True)
# rule 1
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = np.array([1, 2, 3])
self.assertRaises(TypeError, paddle.remainder, x=x, y=y)
# rule 3:
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = fluid.data(name="y", shape=[3], dtype="float32")
self.assertRaises(TypeError, paddle.remainder, x=x, y=y)
# rule 4: x is Tensor, y is scalar
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = 2
exe = fluid.Executor(place)
res = x % y
np_z = exe.run(fluid.default_main_program(),
feed={"x": np.array([2, 3, 4]).astype('float64')},
fetch_list=[res])
z_expected = np.array([0., 1., 0.])
self.assertEqual((np_z[0] == z_expected).all(), True)
# rule 5: y is Tensor, x is scalar
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = 3
y = fluid.data(name="y", shape=[3], dtype="float32")
self.assertRaises(TypeError, paddle.remainder, x=x, y=y)
# rule 6: y is Tensor, x is Tensor
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = fluid.data(name="y", shape=[1], dtype="float64")
exe = fluid.Executor(place)
res = x % y
np_z = exe.run(fluid.default_main_program(),
feed={
"x": np.array([1., 2., 4]).astype('float64'),
"y": np.array([1.5]).astype('float64')
},
fetch_list=[res])
z_expected = np.array([1., 0.5, 1.0])
self.assertEqual((np_z[0] == z_expected).all(), True)
# rule 6: y is Tensor, x is Tensor
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[6], dtype="float64")
y = fluid.data(name="y", shape=[1], dtype="float64")
exe = fluid.Executor(place)
res = x % y
np_z = exe.run(
fluid.default_main_program(),
feed={
"x": np.array([-3., -2, -1, 1, 2, 3]).astype('float64'),
"y": np.array([2]).astype('float64')
},
fetch_list=[res])
z_expected = np.array([1., 0., 1., 1., 0., 1.])
self.assertEqual((np_z[0] == z_expected).all(), True)
def test_static(self):
for place in self.places:
self.check_static_result(place=place)
def test_dygraph(self): def test_dygraph(self):
for place in self.places: with fluid.dygraph.guard():
with fluid.dygraph.guard(place): np_x = np.array([2, 3, 8, 7]).astype('int64')
# rule 1 : avoid numpy.ndarray np_y = np.array([1, 5, 3, 3]).astype('int64')
np_x = np.array([2, 3, 4]) x = paddle.to_tensor(np_x)
np_y = np.array([1, 5, 2]) y = paddle.to_tensor(np_y)
x = paddle.to_tensor(np_x) z = paddle.remainder(x, y)
self.assertRaises(TypeError, paddle.remainder, x=x, y=np_y) np_z = z.numpy()
z_expected = np.array([0, 3, 2, 1])
# rule 3: both the inputs are Tensor self.assertEqual((np_z == z_expected).all(), True)
np_x = np.array([2, 3, 4])
np_y = np.array([1, 5, 2]) np_x = np.array([-3.3, 11.5, -2, 3.5])
x = paddle.to_tensor(np_x, dtype="float32") np_y = np.array([-1.2, 2., 3.3, -2.3])
y = paddle.to_tensor(np_y, dtype="float64") x = paddle.to_tensor(np_x)
self.assertRaises(TypeError, paddle.remainder, x=x, y=y) y = paddle.to_tensor(np_y)
z = x % y
# rule 4: x is Tensor, y is scalar z_expected = np.array([-0.9, 1.5, 1.3, -1.1])
np_x = np.array([2, 3, 4]) self.assertEqual(np.allclose(z_expected, z.numpy()), True)
x = paddle.to_tensor(np_x, dtype="int32")
y = 2 np_x = np.array([-3, 11, -2, 3])
z = x % y np_y = np.array([-1, 2, 3, -2])
z_expected = np.array([0, 1, 0]) x = paddle.to_tensor(np_x, dtype="int64")
self.assertEqual((z_expected == z.numpy()).all(), True) y = paddle.to_tensor(np_y, dtype="int64")
z = x % y
# rule 5: y is Tensor, x is scalar z_expected = np.array([0, 1, 1, -1])
np_x = np.array([2, 3, 4]) self.assertEqual(np.allclose(z_expected, z.numpy()), True)
x = paddle.to_tensor(np_x)
self.assertRaises(TypeError, paddle.remainder, x=3, y=x)
# rule 6: y is Tensor, x is Tensor
np_x = np.array([1., 2., 4])
np_y = np.array([1.5])
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = x % y
z_expected = np.array([1., 0.5, 1.0])
self.assertEqual((z_expected == z.numpy()).all(), True)
# rule 6: y is Tensor, x is Tensor
np_x = np.array([-3., -2, -1, 1, 2, 3])
np_y = np.array([2.])
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = x % y
z_expected = np.array([1., 0., 1., 1., 0., 1.])
self.assertEqual((z_expected == z.numpy()).all(), True)
np_x = np.array([-3.3, 11.5, -2, 3.5])
np_y = np.array([-1.2, 2., 3.3, -2.3])
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = x % y
z_expected = np.array([-0.9, 1.5, 1.3, -1.1])
self.assertEqual(np.allclose(z_expected, z.numpy()), True)
np_x = np.array([-3, 11, -2, 3])
np_y = np.array([-1, 2, 3, -2])
x = paddle.to_tensor(np_x, dtype="int64")
y = paddle.to_tensor(np_y, dtype="int64")
z = x % y
z_expected = np.array([0, 1, 1, -1])
self.assertEqual(np.allclose(z_expected, z.numpy()), True)
np_x = np.array([-3, 3])
np_y = np.array([[2, 3], [-2, -1]])
x = paddle.to_tensor(np_x, dtype="int64")
y = paddle.to_tensor(np_y, dtype="int64")
z = x % y
z_expected = np.array([[1, 0], [-1, 0]])
self.assertEqual(np.allclose(z_expected, z.numpy()), True)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -189,15 +189,15 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -189,15 +189,15 @@ class TestMathOpPatches(unittest.TestCase):
@prog_scope() @prog_scope()
def test_integer_div(self): def test_integer_div(self):
a = fluid.layers.data(name="a", shape=[1], dtype='int64') a = fluid.layers.data(name="a", shape=[1], dtype='int64')
b = a / 2 b = a / 7
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = numpy.array([3, 4, 10, 14, 9, 18]) a_np = numpy.array([3, 4, 10, 14, 9, 18]).astype('int64')
b_np, = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
# for paddle2.0, use true_divide
b_np_actual = (a_np / 2.0) b_np_actual = (a_np / 7).astype('int64')
self.assertTrue(numpy.array_equal(b_np, b_np_actual)) self.assertTrue(numpy.array_equal(b_np, b_np_actual))
@prog_scope() @prog_scope()
......
...@@ -248,8 +248,7 @@ class PolicyGradient(object): ...@@ -248,8 +248,7 @@ class PolicyGradient(object):
func=reward_func, x=[action, length], out=reward) func=reward_func, x=[action, length], out=reward)
neg_log_prob = layers.cross_entropy(act_prob, action) neg_log_prob = layers.cross_entropy(act_prob, action)
cost = neg_log_prob * reward cost = neg_log_prob * reward
cost = (layers.reduce_sum(cost) / cost = (layers.reduce_sum(cost) / layers.reduce_sum(length)
layers.cast(layers.reduce_sum(length), "float32")
) if length is not None else layers.reduce_mean(cost) ) if length is not None else layers.reduce_mean(cost)
optimizer = fluid.optimizer.Adam(self.lr) optimizer = fluid.optimizer.Adam(self.lr)
optimizer.minimize(cost) optimizer.minimize(cost)
......
...@@ -1009,8 +1009,7 @@ def ctc_loss(log_probs, ...@@ -1009,8 +1009,7 @@ def ctc_loss(log_probs,
loss_out = fluid.layers.squeeze(loss_out, [-1]) loss_out = fluid.layers.squeeze(loss_out, [-1])
assert reduction in ['mean', 'sum', 'none'] assert reduction in ['mean', 'sum', 'none']
if reduction == 'mean': if reduction == 'mean':
loss_out = paddle.mean(loss_out / paddle.cast(label_lengths, loss_out = paddle.mean(loss_out / label_lengths)
loss_out.dtype))
elif reduction == 'sum': elif reduction == 'sum':
loss_out = paddle.sum(loss_out) loss_out = paddle.sum(loss_out)
return loss_out return loss_out
......
...@@ -64,7 +64,6 @@ from ..fluid.layers import increment #DEFINE_ALIAS ...@@ -64,7 +64,6 @@ from ..fluid.layers import increment #DEFINE_ALIAS
from ..fluid.layers import multiplex #DEFINE_ALIAS from ..fluid.layers import multiplex #DEFINE_ALIAS
from ..fluid.layers import sums #DEFINE_ALIAS from ..fluid.layers import sums #DEFINE_ALIAS
from ..fluid import layers from ..fluid import layers
import paddle
__all__ = [ __all__ = [
...@@ -343,69 +342,9 @@ def divide(x, y, name=None): ...@@ -343,69 +342,9 @@ def divide(x, y, name=None):
axis = -1 axis = -1
act = None act = None
if in_dygraph_mode(): if in_dygraph_mode():
# rule 1 : avoid numpy.ndarray
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
raise TypeError("divide(): arguments must be Tensor or scalar, not numpy.ndarray.")
# rule 2: both the inputs are not Tensor
elif not isinstance(x, paddle.Tensor) and not isinstance(y, paddle.Tensor):
x = paddle.full(shape=[1], dtype=paddle.get_default_dtype(), fill_value=x)
y = paddle.full(shape=[1], dtype=paddle.get_default_dtype(), fill_value=y)
# rule 3: both the inputs are Tensor
elif isinstance(x, paddle.Tensor) and isinstance(y, paddle.Tensor):
if y.dtype != x.dtype:
raise TypeError("divide(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}".format(x.dtype, y.dtype))
elif x.dtype in _supported_int_dtype_:
x = x.astype(paddle.get_default_dtype())
y = y.astype(paddle.get_default_dtype())
# rule 4: x is Tensor, y is scalar
elif isinstance(x, paddle.Tensor) and not isinstance(y, paddle.Tensor):
if x.dtype in _supported_int_dtype_:
x = x.astype(paddle.get_default_dtype())
y = paddle.full(shape=[1], dtype=x.dtype, fill_value=y)
# rule 5: x is scalar, y is Tensor
elif not isinstance(x, paddle.Tensor) and isinstance(y, paddle.Tensor):
if y.dtype in _supported_int_dtype_:
y = y.astype(paddle.get_default_dtype())
x = paddle.full(shape=[1], dtype=y.dtype, fill_value=x)
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type) x, y, axis=axis, act=act, op_name=op_type)
# rule 1 : avoid numpy.ndarray
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
raise TypeError("divide(): arguments must be Tensor or scalar, not numpy.ndarray.")
# rule 2: both the inputs are not Tensor
elif not isinstance(x, Variable) and not isinstance(y, Variable):
x = paddle.fill_constant(shape=[1], dtype=paddle.get_default_dtype(), value=x)
y = paddle.fill_constant(shape=[1], dtype=paddle.get_default_dtype(), value=y)
# rule 3: both the inputs are Tensor
elif isinstance(x, Variable) and isinstance(y, Variable):
if y.dtype != x.dtype:
raise TypeError("divide(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}".format(x.dtype, y.dtype))
elif x.dtype in _supported_int_dtype_:
x = paddle.cast(x, paddle.get_default_dtype())
y = paddle.cast(y, paddle.get_default_dtype())
# rule 4: x is Tensor, y is scalar
elif isinstance(x, Variable) and not isinstance(y, Variable):
if x.dtype in _supported_int_dtype_:
x = paddle.cast(x, paddle.get_default_dtype())
y = paddle.fill_constant(shape=[1], dtype=x.dtype, value=y)
# rule 5: x is scalar, y is Tensor
elif not isinstance(x, Variable) and isinstance(y, Variable):
if y.dtype in _supported_int_dtype_:
y = paddle.cast(y, paddle.get_default_dtype())
x = paddle.fill_constant(shape=[1], dtype=y.dtype, value=x)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -444,55 +383,9 @@ def floor_divide(x, y, name=None): ...@@ -444,55 +383,9 @@ def floor_divide(x, y, name=None):
op_type = 'elementwise_floordiv' op_type = 'elementwise_floordiv'
axis = -1 axis = -1
if in_dygraph_mode(): if in_dygraph_mode():
# rule 1 : avoid numpy.ndarray
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
raise TypeError("floor_divide(): arguments must be Tensor or scalar, not numpy.ndarray.")
# rule 2: both the inputs are not Tensor
elif not isinstance(x, paddle.Tensor) and not isinstance(y, paddle.Tensor):
x = paddle.full(shape=[1], dtype=paddle.get_default_dtype(), fill_value=x)
y = paddle.full(shape=[1], dtype=paddle.get_default_dtype(), fill_value=y)
# rule 3: both the inputs are Tensor
elif isinstance(x, paddle.Tensor) and isinstance(y, paddle.Tensor):
if y.dtype != x.dtype:
raise TypeError("floor_divide(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}".format(x.dtype, y.dtype))
# rule 4: x is Tensor, y is scalar
elif isinstance(x, paddle.Tensor) and not isinstance(y, paddle.Tensor):
y = paddle.full(shape=[1], dtype=x.dtype, fill_value=y)
# rule 5: x is scalar, y is Tensor
elif not isinstance(x, paddle.Tensor) and isinstance(y, paddle.Tensor):
x = paddle.full(shape=[1], dtype=y.dtype, fill_value=x)
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type) x, y, axis=axis, op_name=op_type)
# rule 1 : avoid numpy.ndarray
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
raise TypeError("divide(): arguments must be Tensor or scalar, not numpy.ndarray.")
# rule 2: both the inputs are not Tensor
elif not isinstance(x, Variable) and not isinstance(y, Variable):
x = paddle.fill_constant(shape=[1], dtype=paddle.get_default_dtype(), value=x)
y = paddle.fill_constant(shape=[1], dtype=paddle.get_default_dtype(), value=y)
# rule 3: both the inputs are Tensor
elif isinstance(x, Variable) and isinstance(y, Variable):
if y.dtype != x.dtype:
raise TypeError("divide(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}".format(x.dtype, y.dtype))
# rule 4: x is Tensor, y is scalar
elif isinstance(x, Variable) and not isinstance(y, Variable):
y = paddle.fill_constant(shape=[1], dtype=x.dtype, value=y)
# rule 5: x is scalar, y is Tensor
elif not isinstance(x, Variable) and isinstance(y, Variable):
x = paddle.fill_constant(shape=[1], dtype=y.dtype, value=x)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -531,43 +424,9 @@ def remainder(x, y, name=None): ...@@ -531,43 +424,9 @@ def remainder(x, y, name=None):
op_type = 'elementwise_mod' op_type = 'elementwise_mod'
axis = -1 axis = -1
if in_dygraph_mode(): if in_dygraph_mode():
# rule 1 : avoid numpy.ndarray
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
raise TypeError("remainder(): arguments must be Tensor or scalar, not numpy.ndarray.")
elif not isinstance(x, paddle.Tensor):
raise TypeError("remainder(): arguments position 1 must be Tensor, not {}".format(type(x)))
# rule 3: both the inputs are Tensor
elif isinstance(y, paddle.Tensor):
if y.dtype != x.dtype:
raise TypeError("remainder(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}".format(x.dtype, y.dtype))
# rule 4: x is Tensor, y is scalar
elif not isinstance(y, paddle.Tensor):
y = paddle.full(shape=[1], dtype=x.dtype, fill_value=y)
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type) x, y, axis=axis, op_name=op_type)
# rule 1 : avoid numpy.ndarray
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
raise TypeError("remainder(): arguments must be Tensor or scalar, not numpy.ndarray.")
elif not isinstance(x, Variable):
raise TypeError("remainder(): arguments position 1 must be Tensor, not {}".format(type(x)))
# rule 3: both the inputs are Tensor
elif isinstance(y, Variable):
if y.dtype != x.dtype:
raise TypeError("remainder(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}".format(x.dtype, y.dtype))
# rule 4: x is Tensor, y is scalar
elif not isinstance(y, paddle.Tensor):
y = paddle.fill_constant(shape=[1], dtype=x.dtype, value=y)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册