diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index f3414c33b5ab3cc8dffee640fd85b9625b3f237b..b1f09fb0029affe671d63874cf3d3db86476c367 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -102,3 +102,5 @@ REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y"); REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); REGISTER_LOGICAL_OP(equal, "Out = X == Y"); REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor); +REGISTER_LOGICAL_OP(not_equal, "Out = X != Y"); +REGISTER_LOGICAL_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.cu b/paddle/fluid/operators/compare_op.cu index 3507af2ae3add8cf02f5b9f3b3d89b40d73bcb0d..00263a2ade4502e732d53b871665185f8d0fa9f1 100644 --- a/paddle/fluid/operators/compare_op.cu +++ b/paddle/fluid/operators/compare_op.cu @@ -17,3 +17,4 @@ limitations under the License. */ REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); +REGISTER_LOGICAL_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/compare_op.h index 4b2ee5a9d68f5f1fd3d2d374669763855659f1db..c651335268fee08c08bcac6247f5a2ff92784330 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/compare_op.h @@ -48,6 +48,14 @@ struct EqualFunctor { } }; +template +struct NotEqualFunctor { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T& a, const T& b) const { + return !EqualFunctor()(a, b); + } +}; + template class CompareOpKernel : public framework::OpKernel { diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index 00e4e6907804c7a460e60d960b4aa94ca23b4886..d829bba1b101cc802ea29f32e0b7ecdb1ac448f5 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -152,7 +152,12 @@ def monkey_patch_variable(): ("__div__", "elementwise_div", False), ("__rdiv__", "elementwise_div", True), ("__pow__", "elementwise_pow", False), - ("__rpow__", "elementwise_pow", True)): + ("__rpow__", "elementwise_pow", True), + # for logical compare + ("__eq__", "equal", False), + ("__ne__", "not_equal", False), + ("__lt__", "less_than", False), + ("__le__", "less_equal", False)): setattr(Variable, method_name, _elemwise_method_creator_(method_name, op_type, reverse)) diff --git a/python/paddle/v2/fluid/learning_rate_decay.py b/python/paddle/v2/fluid/learning_rate_decay.py index 2a2a29fd9cbedc138dc82ca75ccd78208fd33195..0826d3da79a96590f00159a2d2e6f069792909c4 100644 --- a/python/paddle/v2/fluid/learning_rate_decay.py +++ b/python/paddle/v2/fluid/learning_rate_decay.py @@ -179,7 +179,7 @@ def polynomial_decay(learning_rate, shape=[1], dtype='float32', value=1.0) with layers.Switch() as switch: - with switch.case(layers.equal(x=global_step, y=zero_var)): + with switch.case(global_step == zero_var): layers.assign(input=one_var, output=div_res) decay_steps = decay_steps * div_res else: @@ -229,7 +229,7 @@ def piecewise_decay(global_step, boundaries, values): shape=[1], dtype='float32', value=float(boundaries[i])) value_var = layers.fill_constant( shape=[1], dtype='float32', value=float(values[i])) - with switch.case(layers.less_than(global_step, boundary_val)): + with switch.case(global_step < boundary_val): layers.assign(value_var, lr) last_value_var = layers.fill_constant( shape=[1], diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index aea43c2517a02c72c1ee3307afdd3b21910f0064..fa46f86973e82e451d7671d554627359528f5131 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -161,8 +161,8 @@ class TestBook(unittest.TestCase): label=label, chunk_scheme="IOB", num_chunk_types=(label_dict_len - 1) / 2) - self.assertNotEqual(crf, None) - self.assertNotEqual(crf_decode, None) + self.assertFalse(crf is None) + self.assertFalse(crf_decode is None) print(str(program)) diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py new file mode 100644 index 0000000000000000000000000000000000000000..e5198ec17d027f007b4a831ef2e427481f8ff8c4 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py @@ -0,0 +1,76 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid as fluid + + +class TestPythonOperatorOverride(unittest.TestCase): + def check_result(self, fn, place, dtype): + shape = [9, 10] + + x_data = np.random.random(size=shape).astype(dtype) + y_data = np.random.random(size=shape).astype(dtype) + python_out = fn(x_data, y_data) + + x_var = layers.create_global_var( + name='x', shape=shape, value=0.0, dtype=dtype, persistable=True) + y_var = layers.create_global_var( + name='y', shape=shape, value=0.0, dtype=dtype, persistable=True) + out = fn(x_var, y_var) + + exe = fluid.Executor(place) + + exe.run(fluid.default_startup_program()) + fluid_out = exe.run(fluid.default_main_program(), + feed={'x': x_data, + 'y': y_data}, + fetch_list=[out]) + + np.testing.assert_array_equal(python_out, fluid_out[0]) + + def test_override(self): + # compare func to check + compare_fns = [ + lambda _a, _b: _a == _b, + lambda _a, _b: _a != _b, + lambda _a, _b: _a < _b, + lambda _a, _b: _a <= _b, + lambda _a, _b: _a > _b, + lambda _a, _b: _a >= _b, + ] + + # places to check + places = [fluid.CPUPlace()] + if fluid.core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + + # dtypes to check + dtypes = ['int32', 'float32'] + + for place in places: + for dtype in dtypes: + for compare_fn in compare_fns: + with framework.program_guard(framework.Program(), + framework.Program()): + self.check_result(compare_fn, place, dtype) + + +if __name__ == '__main__': + unittest.main()