提交 acb13e7f 编写于 作者: L Luo Tao

Merge branch 'develop' into fluid_infer

...@@ -39,6 +39,11 @@ N-dim tensor. X and Y could be any type. The each element of the Out tensor is ...@@ -39,6 +39,11 @@ N-dim tensor. X and Y could be any type. The each element of the Out tensor is
calculated by %s calculated by %s
)DOC", )DOC",
comment.type, comment.equation)); comment.type, comment.equation));
AddAttr<int>("axis",
"(int, default -1). The start dimension index "
"for broadcasting Y onto X.")
.SetDefault(-1)
.EqualGreaterThan(-1);
} }
}; };
...@@ -95,11 +100,5 @@ REGISTER_LOGICAL_OP(less_than, "Out = X < Y"); ...@@ -95,11 +100,5 @@ REGISTER_LOGICAL_OP(less_than, "Out = X < Y");
REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor); REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor);
REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y"); REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y");
REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor);
REGISTER_LOGICAL_OP(greater_than, "Out = X > Y");
REGISTER_LOGICAL_KERNEL(greater_than, CPU,
paddle::operators::GreaterThanFunctor);
REGISTER_LOGICAL_OP(greater_equal, "Out = X >= Y");
REGISTER_LOGICAL_KERNEL(greater_equal, CPU,
paddle::operators::GreaterEqualFunctor);
REGISTER_LOGICAL_OP(equal, "Out = X == Y"); REGISTER_LOGICAL_OP(equal, "Out = X == Y");
REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor); REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor);
...@@ -16,8 +16,4 @@ limitations under the License. */ ...@@ -16,8 +16,4 @@ limitations under the License. */
REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor);
REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor);
REGISTER_LOGICAL_KERNEL(greater_than, CUDA,
paddle::operators::GreaterThanFunctor);
REGISTER_LOGICAL_KERNEL(greater_equal, CUDA,
paddle::operators::GreaterEqualFunctor);
REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor);
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include <math.h> #include <math.h>
#include <type_traits> #include <type_traits>
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/elementwise_op_function.h"
#include "paddle/platform/transform.h" #include "paddle/platform/transform.h"
namespace paddle { namespace paddle {
...@@ -33,18 +34,6 @@ struct LessEqualFunctor { ...@@ -33,18 +34,6 @@ struct LessEqualFunctor {
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; } HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; }
}; };
template <typename T>
struct GreaterThanFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a > b; }
};
template <typename T>
struct GreaterEqualFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a >= b; }
};
template <typename T> template <typename T>
struct EqualFunctor { struct EqualFunctor {
using ELEM_TYPE = T; using ELEM_TYPE = T;
...@@ -65,14 +54,7 @@ class CompareOpKernel ...@@ -65,14 +54,7 @@ class CompareOpKernel
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
using T = typename Functor::ELEM_TYPE; using T = typename Functor::ELEM_TYPE;
auto* x = context.Input<framework::Tensor>("X"); ElementwiseComputeEx<Functor, DeviceContext, T, bool>(context);
auto* y = context.Input<framework::Tensor>("Y");
auto* out = context.Output<framework::Tensor>("Out");
Functor binary_func;
platform::Transform<DeviceContext> trans;
trans(context.template device_context<DeviceContext>(), x->data<T>(),
x->data<T>() + x->numel(), y->data<T>(),
out->mutable_data<bool>(context.GetPlace()), binary_func);
} }
}; };
......
...@@ -176,14 +176,15 @@ class MidWiseTransformIterator<T, platform::CUDADeviceContext> ...@@ -176,14 +176,15 @@ class MidWiseTransformIterator<T, platform::CUDADeviceContext>
}; };
#endif #endif
template <typename Functor, typename T, typename DeviceContext> template <typename Functor, typename T, typename DeviceContext,
typename OutType = T>
class TransformFunctor { class TransformFunctor {
public: public:
TransformFunctor(const framework::Tensor* x, const framework::Tensor* y, TransformFunctor(const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z, const DeviceContext& ctx, Functor func) framework::Tensor* z, const DeviceContext& ctx, Functor func)
: x_(x->data<T>()), : x_(x->data<T>()),
y_(y->data<T>()), y_(y->data<T>()),
z_(z->mutable_data<T>(ctx.GetPlace())), z_(z->mutable_data<OutType>(ctx.GetPlace())),
nx_(x->numel()), nx_(x->numel()),
ctx_(ctx), ctx_(ctx),
func_(func) {} func_(func) {}
...@@ -208,7 +209,7 @@ class TransformFunctor { ...@@ -208,7 +209,7 @@ class TransformFunctor {
private: private:
const T* x_; const T* x_;
const T* y_; const T* y_;
T* z_; OutType* z_;
int64_t nx_; int64_t nx_;
const DeviceContext& ctx_; const DeviceContext& ctx_;
Functor func_; Functor func_;
...@@ -364,15 +365,16 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { ...@@ -364,15 +365,16 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) {
} }
} }
template <typename Functor, typename DeviceContext, typename T> template <typename Functor, typename DeviceContext, typename T,
typename OutType = T>
void ElementwiseComputeEx(const framework::ExecutionContext& ctx) { void ElementwiseComputeEx(const framework::ExecutionContext& ctx) {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
auto* x = ctx.Input<Tensor>("X"); auto* x = ctx.Input<Tensor>("X");
auto* y = ctx.Input<Tensor>("Y"); auto* y = ctx.Input<Tensor>("Y");
auto* z = ctx.Output<Tensor>("Out"); auto* z = ctx.Output<Tensor>("Out");
z->mutable_data<T>(ctx.GetPlace()); z->mutable_data<OutType>(ctx.GetPlace());
TransformFunctor<Functor, T, DeviceContext> functor( TransformFunctor<Functor, T, DeviceContext, OutType> functor(
x, y, z, ctx.template device_context<DeviceContext>(), Functor()); x, y, z, ctx.template device_context<DeviceContext>(), Functor());
auto x_dims = x->dims(); auto x_dims = x->dims();
......
...@@ -11,13 +11,13 @@ ...@@ -11,13 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import contextlib
from ..layer_helper import LayerHelper, unique_name from layer_function_generator import autodoc
from ..framework import Program, Variable, Operator
from .. import core
from tensor import assign, fill_constant from tensor import assign, fill_constant
import contextlib from .. import core
from ..registry import autodoc from ..framework import Program, Variable, Operator
from ..layer_helper import LayerHelper, unique_name
__all__ = [ __all__ = [
'split_lod_tensor', 'split_lod_tensor',
...@@ -1477,7 +1477,7 @@ class DynamicRNN(object): ...@@ -1477,7 +1477,7 @@ class DynamicRNN(object):
method)) method))
@autodoc @autodoc()
def reorder_lod_tensor_by_rank(x, rank_table): def reorder_lod_tensor_by_rank(x, rank_table):
helper = LayerHelper('reorder_lod_tensor_by_rank', **locals()) helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())
helper.is_instance('x', Variable) helper.is_instance('x', Variable)
......
...@@ -15,14 +15,14 @@ ...@@ -15,14 +15,14 @@
All util layers. All util layers.
""" """
from ..layer_helper import LayerHelper from layer_function_generator import autodoc
from ..framework import unique_name from ..framework import unique_name
from ..registry import autodoc from ..layer_helper import LayerHelper
__all__ = ['get_places'] __all__ = ['get_places']
@autodoc @autodoc()
def get_places(device_count=None, device_type=None): def get_places(device_count=None, device_type=None):
helper = LayerHelper('get_places', **locals()) helper = LayerHelper('get_places', **locals())
out_places = helper.create_variable(name=unique_name(helper.name + ".out")) out_places = helper.create_variable(name=unique_name(helper.name + ".out"))
......
...@@ -13,17 +13,19 @@ ...@@ -13,17 +13,19 @@
# limitations under the License. # limitations under the License.
import re import re
import cStringIO import cStringIO
import warnings
import functools import functools
import inspect import warnings
from .. import proto
import proto.framework_pb2 as framework_pb2 framework_pb2 = proto.framework_pb2
from framework import OpProtoHolder, Variable, Program, Operator
from paddle.v2.fluid.layer_helper import LayerHelper, unique_name from ..framework import OpProtoHolder, Variable
from ..layer_helper import LayerHelper
__all__ = [ __all__ = [
'deprecated', 'deprecated',
'register_layer', 'generate_layer_fn',
'autodoc', 'autodoc',
] ]
...@@ -96,7 +98,7 @@ def _generate_doc_string_(op_proto): ...@@ -96,7 +98,7 @@ def _generate_doc_string_(op_proto):
return buf.getvalue() return buf.getvalue()
def register_layer(op_type): def generate_layer_fn(op_type):
"""Register the Python layer for an Operator. """Register the Python layer for an Operator.
Args: Args:
...@@ -207,7 +209,10 @@ def deprecated(func_or_class): ...@@ -207,7 +209,10 @@ def deprecated(func_or_class):
return func_wrapper return func_wrapper
def autodoc(func): def autodoc(comment=""):
func.__doc__ = _generate_doc_string_(OpProtoHolder.instance().get_op_proto( def __impl__(func):
func.__name__)) func.__doc__ = _generate_doc_string_(OpProtoHolder.instance(
).get_op_proto(func.__name__)) + comment
return func return func
return __impl__
...@@ -11,8 +11,7 @@ ...@@ -11,8 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from layer_function_generator import generate_layer_fn
from ..registry import register_layer
__activations__ = [ __activations__ = [
'sigmoid', 'sigmoid',
...@@ -53,4 +52,4 @@ __all__ = [ ...@@ -53,4 +52,4 @@ __all__ = [
] + __activations__ ] + __activations__
for _OP in set(__all__): for _OP in set(__all__):
globals()[_OP] = register_layer(_OP) globals()[_OP] = generate_layer_fn(_OP)
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.v2.fluid as fluid
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function from __future__ import print_function
import sys import sys
......
...@@ -38,8 +38,6 @@ def create_test_class(op_type, typename, callback): ...@@ -38,8 +38,6 @@ def create_test_class(op_type, typename, callback):
for _type_name in {'float32', 'float64', 'int32', 'int64'}: for _type_name in {'float32', 'float64', 'int32', 'int64'}:
create_test_class('less_than', _type_name, lambda _a, _b: _a < _b) create_test_class('less_than', _type_name, lambda _a, _b: _a < _b)
create_test_class('less_equal', _type_name, lambda _a, _b: _a <= _b) create_test_class('less_equal', _type_name, lambda _a, _b: _a <= _b)
create_test_class('greater_than', _type_name, lambda _a, _b: _a > _b)
create_test_class('greater_equal', _type_name, lambda _a, _b: _a >= _b)
create_test_class('equal', _type_name, lambda _a, _b: _a == _b) create_test_class('equal', _type_name, lambda _a, _b: _a == _b)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -11,26 +11,21 @@ ...@@ -11,26 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest import unittest
import warnings
import paddle.v2.fluid as fluid import paddle.v2.fluid as fluid
import paddle.v2.fluid.framework as framework import numpy as np
import paddle.v2.fluid.layers as layers import decorators
import paddle.v2.fluid.registry as registry
class TestRegistry(unittest.TestCase): class TestRegistry(unittest.TestCase):
@decorators.prog_scope()
def test_registry_layer(self): def test_registry_layer(self):
self.layer_type = "mean"
program = framework.Program()
x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32')
output = layers.mean(x) output = fluid.layers.mean(x=x)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
X = np.random.random((10, 10)).astype("float32") X = np.random.random((10, 10)).astype("float32")
mean_out = exe.run(program, feed={"X": X}, fetch_list=[output]) mean_out = exe.run(feed={"X": X}, fetch_list=[output])
self.assertAlmostEqual(np.mean(X), mean_out) self.assertAlmostEqual(np.mean(X), mean_out[0])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册