diff --git a/paddle/fluid/operators/clip_op.h b/paddle/fluid/operators/clip_op.h index 03abfe7eb703b021dac2261dcd9c87d440b04001..68f5d5460efd16a79d6e1553c2fb78da31fc704a 100644 --- a/paddle/fluid/operators/clip_op.h +++ b/paddle/fluid/operators/clip_op.h @@ -66,7 +66,7 @@ template class ClipKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto max = context.Attr("max"); + auto max = static_cast(context.Attr("max")); Tensor max_cpu; if (context.HasInput("Max")) { auto* max_t = context.Input("Max"); @@ -77,8 +77,9 @@ class ClipKernel : public framework::OpKernel { } max = max_data[0]; } + max = static_cast(max); - auto min = context.Attr("min"); + auto min = context.Attr("min"); Tensor min_cpu; if (context.HasInput("Min")) { auto* min_t = context.Input("Min"); @@ -141,7 +142,7 @@ template class ClipGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto max = context.Attr("max"); + auto max = static_cast(context.Attr("max")); Tensor max_cpu; if (context.HasInput("Max")) { auto* max_t = context.Input("Max"); @@ -152,8 +153,9 @@ class ClipGradKernel : public framework::OpKernel { } max = max_data[0]; } + max = static_cast(max); - auto min = context.Attr("min"); + auto min = context.Attr("min"); Tensor min_cpu; if (context.HasInput("Min")) { auto* min_t = context.Input("Min"); @@ -164,6 +166,7 @@ class ClipGradKernel : public framework::OpKernel { } min = min_data[0]; } + min = static_cast(min); auto* d_out = context.Input(framework::GradVarName("Out")); diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index 2e1f9d41747e3a99b4b4a0650a52973459b85c7b..b56d9f6668e8bcbd37443fb88b1f5f4dd40a2511 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -138,8 +138,9 @@ class TestClipAPI(unittest.TestCase): out_6 = paddle.clip(images, max=max) out_7 = paddle.clip(images, max=-1.) out_8 = paddle.clip(images) + out_9 = paddle.clip(paddle.cast(images, 'float64'), min=0.2, max=0.9) - res1, res2, res3, res4, res5, res6, res7, res8 = exe.run( + res1, res2, res3, res4, res5, res6, res7, res8, res9 = exe.run( fluid.default_main_program(), feed={ "image": data, @@ -147,7 +148,7 @@ class TestClipAPI(unittest.TestCase): "max": np.array([0.8]).astype('float32') }, fetch_list=[ - out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8 + out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8, out_9 ]) self.assertTrue(np.allclose(res1, data.clip(0.2, 0.8))) @@ -158,6 +159,8 @@ class TestClipAPI(unittest.TestCase): self.assertTrue(np.allclose(res6, data.clip(max=0.8))) self.assertTrue(np.allclose(res7, data.clip(max=-1))) self.assertTrue(np.allclose(res8, data)) + self.assertTrue( + np.allclose(res9, data.astype(np.float64).clip(0.2, 0.9))) def test_clip_dygraph(self): place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index d2db2a7cb71945e137e46d6793f8cba1f7adf12f..b6314ef1ba37937a39073ec68cf1cf540b27bf64 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1611,11 +1611,8 @@ def clip(x, min=None, max=None, name=None): # [[4.5, 6.4] """ - np_dtype = np.float32 - if x.dtype == VarDesc.VarType.FP64: - np_dtype = np.float64 - fmin = float(np.finfo(np_dtype).min) - fmax = float(np.finfo(np_dtype).max) + fmin = float(np.finfo(np.float32).min) + fmax = float(np.finfo(np.float32).max) if in_dygraph_mode(): if isinstance(min, Variable):