From a64b312e3a922ea1e0520d59950e81189748c7f4 Mon Sep 17 00:00:00 2001 From: Krzysztof Binias Date: Tue, 20 Mar 2018 11:22:12 +0100 Subject: [PATCH] Correcting for PR comments --- paddle/fluid/framework/operator.h | 8 --- .../fluid/operators/activation_mkldnn_op.cc | 11 ++-- paddle/fluid/operators/activation_op.cc | 28 -------- paddle/fluid/operators/activation_op.h | 40 ------------ paddle/fluid/operators/mkldnn_activation_op.h | 64 +++++++++++++++++++ paddle/fluid/platform/mkldnn_helper.h | 1 - .../paddle/fluid/tests/unittests/op_test.py | 12 +--- .../tests/unittests/test_activation_op.py | 8 +-- 8 files changed, 75 insertions(+), 97 deletions(-) create mode 100644 paddle/fluid/operators/mkldnn_activation_op.h diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index d354714d0ea..41214b41cb6 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -84,10 +84,6 @@ class OperatorBase { return boost::get(attrs_.at(name)); } - inline bool HasAttr(const std::string& name) const { - return attrs_.count(name) != 0; - } - /// if scope is not null, also show dimensions of arguments virtual std::string DebugStringEx(const Scope* scope) const; @@ -199,10 +195,6 @@ class ExecutionContext { return op_.Attr(name); } - inline bool HasAttr(const std::string& name) const { - return op_.HasAttr(name); - } - size_t InputSize(const std::string& name) const { return op_.Inputs(name).size(); } diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index 65cf2fceb70..6ff363d766d 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "mkldnn.hpp" +#include "mkldnn_activation_op.h" #include "paddle/fluid/operators/activation_op.h" namespace paddle { @@ -183,10 +184,10 @@ namespace ops = paddle::operators; act_type##_grad, MKLDNN, ::paddle::platform::CPUPlace, \ ops::MKLDNNActivationGradKernel>); -#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \ - __macro(relu, ReluMkldnnFunctor, ReluMkldnnGradFunctor) \ - __macro(tanh, TanhMkldnnFunctor, TanhMkldnnGradFunctor) \ - __macro(sqrt, SqrtMkldnnFunctor, SqrtMkldnnGradFunctor) \ - __macro(abs, AbsMkldnnFunctor, AbsMkldnnGradFunctor); +#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \ + __macro(relu, ReluMkldnnFunctor, ReluMkldnnGradFunctor); \ + __macro(tanh, TanhMkldnnFunctor, TanhMkldnnGradFunctor); \ + __macro(sqrt, SqrtMkldnnFunctor, SqrtMkldnnGradFunctor); \ + __macro(abs, AbsMkldnnFunctor, AbsMkldnnGradFunctor); FOR_EACH_MKLDNN_KERNEL_FUNCTOR(REGISTER_ACTIVATION_MKLDNN_KERNEL); diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index ae9ca9d4ff5..043ffb01fc0 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -100,13 +100,6 @@ class ReluOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") .SetDefault(false); - AddAttr( - "data_format", - "(string, default NCHW) Only used in " - "An optional string from: \"NHWC\", \"NCHW\". " - "Defaults to \"NHWC\". Specify the data format of the output data, " - "the input will be transformed automatically. ") - .SetDefault("AnyLayout"); AddComment(R"DOC( Relu Activation Operator. @@ -163,13 +156,6 @@ class TanhOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") .SetDefault(false); - AddAttr( - "data_format", - "(string, default NCHW) Only used in " - "An optional string from: \"NHWC\", \"NCHW\". " - "Defaults to \"NHWC\". Specify the data format of the output data, " - "the input will be transformed automatically. ") - .SetDefault("AnyLayout"); AddComment(R"DOC( Tanh Activation Operator. @@ -226,13 +212,6 @@ class SqrtOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") .SetDefault(false); - AddAttr( - "data_format", - "(string, default NCHW) Only used in " - "An optional string from: \"NHWC\", \"NCHW\". " - "Defaults to \"NHWC\". Specify the data format of the output data, " - "the input will be transformed automatically. ") - .SetDefault("AnyLayout"); AddComment(R"DOC( Sqrt Activation Operator. @@ -251,13 +230,6 @@ class AbsOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") .SetDefault(false); - AddAttr( - "data_format", - "(string, default NCHW) Only used in " - "An optional string from: \"NHWC\", \"NCHW\". " - "Defaults to \"NHWC\". Specify the data format of the output data, " - "the input will be transformed automatically. ") - .SetDefault("AnyLayout"); AddComment(R"DOC( Abs Activation Operator. diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 084b6bace75..e607a5554f4 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -37,10 +37,6 @@ class ActivationHelper { } #endif framework::DataLayout layout = framework::DataLayout::kAnyLayout; - if (ctx.HasAttr("data_format")) { - std::string data_format = ctx.Attr("data_format"); - layout = framework::StringToDataLayout(data_format); - } return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), layout, library); @@ -76,27 +72,6 @@ class ActivationKernel } }; -template -class MKLDNNActivationKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - PADDLE_ENFORCE(!context.HasAttr("X"), - "Cannot find input tensor X, variable name = %s", - context.op().Input("X")); - PADDLE_ENFORCE(!context.HasAttr("Out"), - "Cannot find output tensor Out, variable name = %s", - context.op().Output("Out")); - Functor functor; - - auto attrs = functor.GetAttrs(); - for (auto& attr : attrs) { - *attr.second = context.Attr(attr.first); - } - functor(context); - } -}; - template class ActivationGradKernel : public framework::OpKernel { @@ -125,21 +100,6 @@ class ActivationGradKernel } }; -template -class MKLDNNActivationGradKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - Functor functor; - - auto attrs = functor.GetAttrs(); - for (auto& attr : attrs) { - *attr.second = context.Attr(attr.first); - } - functor(context); - } -}; - template struct BaseActivationFunctor { using ELEMENT_TYPE = T; diff --git a/paddle/fluid/operators/mkldnn_activation_op.h b/paddle/fluid/operators/mkldnn_activation_op.h new file mode 100644 index 00000000000..976e362911e --- /dev/null +++ b/paddle/fluid/operators/mkldnn_activation_op.h @@ -0,0 +1,64 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" + +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +template +class MKLDNNActivationKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + PADDLE_ENFORCE(context.Input("X") != nullptr, + "Cannot get input tensor X, variable name = %s", + context.op().Input("X")); + PADDLE_ENFORCE(context.Output("Out") != nullptr, + "Cannot find output tensor Out, variable name = %s", + context.op().Output("Out")); + Functor functor; + + auto attrs = functor.GetAttrs(); + for (auto& attr : attrs) { + *attr.second = context.Attr(attr.first); + } + functor(context); + } +}; + +template +class MKLDNNActivationGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + Functor functor; + + auto attrs = functor.GetAttrs(); + for (auto& attr : attrs) { + *attr.second = context.Attr(attr.first); + } + functor(context); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/platform/mkldnn_helper.h b/paddle/fluid/platform/mkldnn_helper.h index 281d38cb8a0..90b78142b84 100644 --- a/paddle/fluid/platform/mkldnn_helper.h +++ b/paddle/fluid/platform/mkldnn_helper.h @@ -42,7 +42,6 @@ inline mkldnn::memory::desc MKLDNNMemDesc(const std::vector& dims, } inline bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx) { - if (!ctx.HasAttr("use_mkldnn")) return false; bool use_mkldnn = ctx.Attr("use_mkldnn"); return use_mkldnn && platform::is_cpu_place(ctx.GetPlace()); } diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 2b10f16688b..8393f7827b1 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -215,8 +215,7 @@ class OpTest(unittest.TestCase): '''Fix random seeds to remove randomness from tests''' cls._np_rand_state = np.random.get_state() cls._py_rand_state = random.getstate() - cls.use_mkldnn = False - cls.data_format = 'AnyLayout' + np.random.seed(123) random.seed(124) @@ -341,14 +340,7 @@ class OpTest(unittest.TestCase): "Output (" + out_name + ") has different lod at " + str(place)) - def fill_attrs(self): - attrs = self.attrs if hasattr(self, "attrs") else dict() - attrs["use_mkldnn"] = self.use_mkldnn - attrs["data_format"] = self.data_format - return attrs - def check_output(self, atol=1e-5): - self.attrs = self.fill_attrs() places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): places.append(core.CUDAPlace(0)) @@ -356,7 +348,6 @@ class OpTest(unittest.TestCase): self.check_output_with_place(place, atol) def check_output_customized(self, checker): - self.attrs = self.fill_attrs() places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): places.append(core.CUDAPlace(0)) @@ -392,7 +383,6 @@ class OpTest(unittest.TestCase): in_place=False, max_relative_error=0.005, user_defined_grads=None): - self.attrs = self.fill_attrs() places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): places.append(core.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index c6c86a59694..1d53737ac13 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -515,7 +515,7 @@ class TestMKLDNNRelu(OpTest): x[np.abs(x) < 0.005] = 0.02 self.inputs = {'X': x} self.outputs = {'Out': np.maximum(self.inputs['X'], 0)} - self.use_mkldnn = True + self.attrs = {"use_mkldnn": True} def test_check_output(self): self.check_output() @@ -531,7 +531,7 @@ class TestMKLDNNTanh(OpTest): 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") } self.outputs = {'Out': np.tanh(self.inputs['X'])} - self.use_mkldnn = True + self.attrs = {"use_mkldnn": True} def test_check_output(self): self.check_output() @@ -547,7 +547,7 @@ class TestMKLDNNSqrt(OpTest): 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") } self.outputs = {'Out': np.sqrt(self.inputs['X'])} - self.use_mkldnn = True + self.attrs = {"use_mkldnn": True} def test_check_output(self): self.check_output() @@ -564,7 +564,7 @@ class TestMKLDNNAbs(OpTest): x[np.abs(x) < 0.005] = 0.02 self.inputs = {'X': x} self.outputs = {'Out': np.abs(self.inputs['X'])} - self.use_mkldnn = True + self.attrs = {"use_mkldnn": True} def test_check_output(self): self.check_output() -- GitLab