diff --git a/paddle/fluid/framework/ngraph_bridge.cc b/paddle/fluid/framework/ngraph_bridge.cc index 5fcb17b9f3ac390548aba33db7d0b8350cde7e00..42190b52289bfc6fc510f13cb5190a0d3e03b836 100644 --- a/paddle/fluid/framework/ngraph_bridge.cc +++ b/paddle/fluid/framework/ngraph_bridge.cc @@ -31,10 +31,12 @@ std::map>>)>> NgraphBridge::NG_NODE_MAP = { + {"fill_constant", paddle::operators::ngraphs::BuildFillConstantNode}, {"mul", paddle::operators::ngraphs::BuildMulNode}, {"mul_grad", paddle::operators::ngraphs::BuildMulGradNode}, {"relu", paddle::operators::ngraphs::BuildUnaryNode}, - {"tanh", paddle::operators::ngraphs::BuildUnaryNode}}; + {"tanh", paddle::operators::ngraphs::BuildUnaryNode}, + {"top_k", paddle::operators::ngraphs::BuildTopKNode}}; void NgraphBridge::BuildNgNode(const std::shared_ptr& op) { auto& op_type = op->Type(); diff --git a/paddle/fluid/operators/dequantize_mkldnn_op.cc b/paddle/fluid/operators/dequantize_mkldnn_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..262b7408a7f5f65c4d97120914c16f38ce5fdbe7 --- /dev/null +++ b/paddle/fluid/operators/dequantize_mkldnn_op.cc @@ -0,0 +1,88 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "mkldnn.hpp" +#include "paddle/fluid/framework/data_layout_transform.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/dequantize_op.h" +#include "paddle/fluid/platform/mkldnn_helper.h" + +namespace paddle { +namespace operators { + +using mkldnn::memory; +using mkldnn::primitive; +using mkldnn::reorder; +using platform::to_void_cast; +using Tensor = framework::Tensor; +using framework::DataLayout; +using mkldnn::stream; +using platform::GetMKLDNNFormat; + +template +class DeQuantOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("Input"); + auto scale_data = ctx.Attr("Scale"); + auto* output = ctx.Output("Output"); + auto& dev_ctx = + ctx.template device_context(); + const auto& engine = dev_ctx.GetEngine(); + + const T* input_data = input->data(); + float* output_data = output->mutable_data(ctx.GetPlace()); + std::vector reorder_scale = {1.0f / scale_data}; + + std::vector pipeline; + std::vector src_tz = paddle::framework::vectorize2int(input->dims()); + std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + mkldnn::memory::data_type src_dt = + paddle::framework::ToMKLDNNDataType(input->type()); + mkldnn::memory::format src_fmt = input->format(); + + mkldnn::primitive_attr attri; + int mask = 0; + attri.set_output_scales(mask, reorder_scale); + + auto src_md = platform::MKLDNNMemDesc({src_tz}, src_dt, src_fmt); + auto src_pd = mkldnn::memory::primitive_desc(src_md, engine); + auto src_memory = + std::make_shared(src_pd, to_void_cast(input_data)); + std::shared_ptr src_memory_p = + std::shared_ptr(new primitive::at(*src_memory)); + + auto dst_md = platform::MKLDNNMemDesc({dst_tz}, memory::data_type::f32, + memory::format::nchw); + auto dst_pd = mkldnn::memory::primitive_desc(dst_md, engine); + auto dst_memory = mkldnn::memory(dst_pd, to_void_cast(output_data)); + + auto reorder_pd = std::shared_ptr( + new reorder::primitive_desc(src_pd, dst_pd, attri)); + auto reorder_p = std::shared_ptr( + new reorder(*reorder_pd, *src_memory_p, dst_memory)); + pipeline.push_back(*reorder_p); + stream(stream::kind::eager).submit(pipeline).wait(); + + output->set_format(GetMKLDNNFormat(dst_memory)); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_KERNEL(dequantize, MKLDNN, ::paddle::platform::CPUPlace, + ops::DeQuantOpKernel, ops::DeQuantOpKernel); diff --git a/paddle/fluid/operators/dequantize_op.cc b/paddle/fluid/operators/dequantize_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..38159f84a0d56f45cfef233a3c70c3c6cef17d9f --- /dev/null +++ b/paddle/fluid/operators/dequantize_op.cc @@ -0,0 +1,45 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/dequantize_op.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +framework::OpKernelType DeQuantOp::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + framework::LibraryType library_ = framework::LibraryType::kMKLDNN; + framework::DataLayout layout_ = framework::DataLayout::kMKLDNN; + + return framework::OpKernelType(ctx.Input("Input")->type(), + ctx.GetPlace(), layout_, library_); +} + +void DeQuantOpMaker::Make() { + AddInput("Input", "input data"); + AddOutput("Output", "output data"); + AddAttr("Scale", "scale data").SetDefault({1.0f}); + AddComment(R"DOC(This op will dequantize data from INT8 to FP32)DOC"); +} + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(dequantize, ops::DeQuantOp, ops::DeQuantOpMaker, + paddle::framework::DefaultGradOpDescMaker); diff --git a/paddle/fluid/operators/dequantize_op.h b/paddle/fluid/operators/dequantize_op.h new file mode 100644 index 0000000000000000000000000000000000000000..75c27a06c210f2d0e4d7cf52aa16f4c123f8ad8e --- /dev/null +++ b/paddle/fluid/operators/dequantize_op.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using framework::OpKernelType; +using framework::Tensor; + +class DeQuantOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + ctx->SetOutputDim("Output", ctx->GetInputDim("Input")); + ctx->ShareLoD("Input", /*->*/ "Output"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + +class DeQuantOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override; +}; + +class DeQuantGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override {} +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ngraph_ops.h b/paddle/fluid/operators/ngraph/ngraph_ops.h index 0ed77ff5577cf4f45a8865db9b42e8bda9839478..8e7457dd56c2413f84008ce467537e07b3e80cc7 100644 --- a/paddle/fluid/operators/ngraph/ngraph_ops.h +++ b/paddle/fluid/operators/ngraph/ngraph_ops.h @@ -22,4 +22,6 @@ limitations under the License. */ #pragma once #include "ops/binary_unnary_op.h" +#include "ops/fill_constant_op.h" #include "ops/mul_op.h" +#include "ops/top_k_op.h" diff --git a/paddle/fluid/operators/ngraph/ops/binary_unnary_op.h b/paddle/fluid/operators/ngraph/ops/binary_unnary_op.h index 4e2f5e231c16cd0fad6db287aa19430c56b534fd..6610380fcf432d0019f7e844fa9304e151b20efd 100644 --- a/paddle/fluid/operators/ngraph/ops/binary_unnary_op.h +++ b/paddle/fluid/operators/ngraph/ops/binary_unnary_op.h @@ -45,7 +45,6 @@ static void BuildUnaryNode( auto out = std::make_shared(input); paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); } - } // namespace ngraphs } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h new file mode 100644 index 0000000000000000000000000000000000000000..5eff69e7b165fa19c775926914b7b3e8fcb043e5 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h @@ -0,0 +1,61 @@ +/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_NGRAPH +#pragma once + +#include +#include +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +void BuildFillConstantNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + auto vsp = op_attrs.Get>("shape"); + ngraph::Shape shape; + for (auto& sp : vsp) { + shape.push_back(sp); + } + float value = op_attrs.Get("value"); + ngraph::element::Type ng_dtype; + auto data_type = static_cast( + op_attrs.Get("dtype")); + if (data_type == paddle::framework::proto::VarType::FP32) { + ng_dtype = ngraph::element::f32; + } else if (data_type == paddle::framework::proto::VarType::FP64) { + ng_dtype = ngraph::element::f64; + } else if (data_type == paddle::framework::proto::VarType::INT64) { + ng_dtype = ngraph::element::i64; + } else if (data_type == paddle::framework::proto::VarType::INT32) { + ng_dtype = ngraph::element::i32; + } else if (data_type == paddle::framework::proto::VarType::BOOL) { + ng_dtype = ngraph::element::boolean; + } else { + PADDLE_THROW("unsupported data type: %s", data_type); + } + auto out = ngraph::op::Constant::create(ng_dtype, shape, {value}); + paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); +} +} // namespace ngraphs +} // namespace operators +} // namespace paddle +#endif diff --git a/paddle/fluid/operators/ngraph/ops/top_k_op.h b/paddle/fluid/operators/ngraph/ops/top_k_op.h new file mode 100644 index 0000000000000000000000000000000000000000..2b7254497c0e1aab2e653e69e6461f262b929703 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/top_k_op.h @@ -0,0 +1,51 @@ +/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_NGRAPH +#pragma once + +#include +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +void BuildTopKNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + int k = op_attrs.Get("k"); + auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto top_k = std::make_shared( + input, input->get_shape().size() - 1, ngraph::element::i64, k); + std::shared_ptr indices = + std::make_shared(top_k, 0); + std::shared_ptr out = + std::make_shared(top_k, 1); + auto dummy_out = paddle::platform::GetOutputNode(op, "Out", ngb_node_map); + if (dummy_out && dummy_out->get_element_type() != out->get_element_type()) { + out = std::make_shared(out, + dummy_out->get_element_type()); + } + paddle::platform::SetOutputNode(op, "Indices", indices, ngb_node_map); + paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); +} +} // namespace ngraphs +} // namespace operators +} // namespace paddle +#endif diff --git a/paddle/fluid/operators/quantize_mkldnn_op.cc b/paddle/fluid/operators/quantize_mkldnn_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..0638e42873376bcec6e4de61494da46d1f0073d1 --- /dev/null +++ b/paddle/fluid/operators/quantize_mkldnn_op.cc @@ -0,0 +1,89 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "mkldnn.hpp" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/quantize_op.h" +#include "paddle/fluid/platform/mkldnn_helper.h" +#include "paddle/fluid/platform/mkldnn_reuse.h" + +namespace paddle { +namespace operators { + +using mkldnn::memory; +using mkldnn::primitive; +using mkldnn::reorder; +using platform::to_void_cast; +using Tensor = framework::Tensor; +using framework::DataLayout; +using mkldnn::stream; +using platform::GetMKLDNNFormat; + +template +class QuantOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("Input"); + auto scale_data = ctx.Attr("Scale"); + auto* output = ctx.Output("Output"); + auto& dev_ctx = + ctx.template device_context(); + const auto& engine = dev_ctx.GetEngine(); + + std::vector pipeline; + std::vector src_tz = paddle::framework::vectorize2int(input->dims()); + std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + + const T* input_data = input->data(); + + mkldnn::primitive_attr attri; + int mask = 0; + attri.set_output_scales(mask, {scale_data}); + + auto src_md = platform::MKLDNNMemDesc({src_tz}, memory::data_type::f32, + input->format()); + auto src_pd = mkldnn::memory::primitive_desc(src_md, engine); + auto src_memory = + std::make_shared(src_pd, to_void_cast(input_data)); + std::shared_ptr src_memory_p = + std::shared_ptr(new primitive::at(*src_memory)); + + bool is_negative = ctx.Attr("is_negative_input"); + std::shared_ptr dst_pd; + std::shared_ptr dst_memory; + if (is_negative) { + platform::ConvMKLDNNHandler::SetDstMemory( + ctx, output, dst_tz, engine, dst_pd, dst_memory); + } else { + platform::ConvMKLDNNHandler::SetDstMemory( + ctx, output, dst_tz, engine, dst_pd, dst_memory); + } + auto reorder_pd = std::shared_ptr( + new reorder::primitive_desc(src_pd, *dst_pd, attri)); + auto reorder_p = std::shared_ptr( + new reorder(*reorder_pd, *src_memory_p, *dst_memory)); + pipeline.push_back(*reorder_p); + stream(stream::kind::eager).submit(pipeline).wait(); + output->set_layout(DataLayout::kMKLDNN); + output->set_format(GetMKLDNNFormat(*dst_memory)); + } +}; +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; + +// TODO(Xiaoli) Support FP32->S8 quantization. + +REGISTER_OP_KERNEL(quantize, MKLDNN, ::paddle::platform::CPUPlace, + ops::QuantOpKernel); diff --git a/paddle/fluid/operators/quantize_op.cc b/paddle/fluid/operators/quantize_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..bf70c08bdb82218a2d0f63f3e70a2a1093e6a542 --- /dev/null +++ b/paddle/fluid/operators/quantize_op.cc @@ -0,0 +1,47 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include "paddle/fluid/operators/quantize_op.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +framework::OpKernelType QuantOp::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + framework::LibraryType library_ = framework::LibraryType::kMKLDNN; + framework::DataLayout layout_ = framework::DataLayout::kMKLDNN; + + return framework::OpKernelType(ctx.Input("Input")->type(), + ctx.GetPlace(), layout_, library_); +} + +void QuantOpMaker::Make() { + AddInput("Input", "input data"); + AddOutput("Output", "output data"); + AddAttr("is_negative_input", + "(bool, default false) Only used in mkldnn INT8 kernel") + .SetDefault(false); + AddAttr("Scale", "scale data").SetDefault({1.0f}); + AddComment(R"DOC(This op will quantize data from FP32 to INT8)DOC"); +} + +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; + +REGISTER_OPERATOR(quantize, ops::QuantOp, ops::QuantOpMaker, + paddle::framework::DefaultGradOpDescMaker); diff --git a/paddle/fluid/operators/quantize_op.h b/paddle/fluid/operators/quantize_op.h new file mode 100644 index 0000000000000000000000000000000000000000..091306e4637c7e2393b6736f0e1edf9dd7fd2c8a --- /dev/null +++ b/paddle/fluid/operators/quantize_op.h @@ -0,0 +1,46 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using framework::OpKernelType; +using framework::Tensor; + +class QuantOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + ctx->SetOutputDim("Output", ctx->GetInputDim("Input")); + ctx->ShareLoD("Input", /*->*/ "Output"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + +class QuantOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override; +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 23f00406de6190dfef91e259d6af358b5dac1713..584df85e80203c383a89954aac73dd1dcd723f7c 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -15,6 +15,7 @@ limitations under the License. */ #include #include +#include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/mkldnn_helper.h" #include "paddle/fluid/platform/place.h" @@ -181,6 +182,21 @@ class MKLDNNHandler { return dims2str(operand_dims) + suffix; } + template + static void SetDstMemory( + const framework::ExecutionContext& ctx, framework::Tensor* output, + std::vector dst_tz, const mkldnn::engine& engine, + std::shared_ptr& dst_pd, // NOLINT + std::shared_ptr& dst_memory) { // NOLINT + M* output_data = output->mutable_data(ctx.GetPlace()); + auto dst_md = platform::MKLDNNMemDesc( + {dst_tz}, paddle::framework::ToMKLDNNDataType( + framework::DataTypeTrait::DataType), + mkldnn::memory::format::nhwc); + dst_pd.reset(new mkldnn::memory::primitive_desc(dst_md, engine)); + dst_memory.reset(new mkldnn::memory(*dst_pd, to_void_cast(output_data))); + } + protected: static std::string dims2str(const mkldnn::memory::dims& operand_dims) { std::string dstr = ""; diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py new file mode 100644 index 0000000000000000000000000000000000000000..835376ffe78f9119a9be6c379998e3a3b50aab43 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py @@ -0,0 +1,37 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +from paddle.fluid.tests.unittests.test_fill_constant_op import TestFillConstantOp1, TestFillConstantOp2, TestFillConstantOpWithSelectedRows + + +class TestNGRAPHFillConstantOp1(TestFillConstantOp1): + def setUp(self): + super(TestNGRAPHFillConstantOp1, self).setUp() + + +class TestNGRAPHFillConstantOp2(TestFillConstantOp2): + def setUp(self): + super(TestNGRAPHFillConstantOp2, self).setUp() + + +class TestNGRAPHFillConstantOpWithSelectedRows( + TestFillConstantOpWithSelectedRows): + def setUp(self): + super(TestFillConstantOpWithSelectedRows, self).setUp() + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0171087dce5d4c7b72eca7f7e4fb955af94812 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py @@ -0,0 +1,41 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function + +import unittest +from paddle.fluid.tests.unittests.test_top_k_op import TestTopkOp, TestTopkOp3d, TestTopkOp2, TestTopkOp3, TestTopkOp4 + + +class TestNGRAPHTopkOp(TestTopkOp): + def setUp(self): + super(TestNGRAPHTopkOp, self).setUp() + + +class TestNGRAPHTopkOp2(TestTopkOp2): + def setUp(self): + super(TestNGRAPHTopkOp2, self).setUp() + + +class TestNGRAPHTopkOp3(TestTopkOp3): + def setUp(self): + super(TestNGRAPHTopkOp3, self).setUp() + + +class TestNGRAPHTopkOp4(TestTopkOp4): + def setUp(self): + super(TestNGRAPHTopkOp4, self).setUp() + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dequantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_dequantize_mkldnn_op.py new file mode 100644 index 0000000000000000000000000000000000000000..0c5e1abd7c8fb010357998c0ceaebaf21619fda9 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dequantize_mkldnn_op.py @@ -0,0 +1,73 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest + + +class TestDeQuantizeOp(OpTest): + def setUp(self): + self.op_type = 'dequantize' + self.scale = 2.0 + self.input_size = [1, 1, 5, 5] #Naive nChw16c + self.data_type = 'int8' + self.set_scale() + self.set_data_type() + + if self.data_type == 'int8': + input = (np.random.randint(0, 100, self.input_size) - 50 + ).astype(self.data_type) + output = (input * (1 / self.scale)).astype('float') + else: + input = (np.random.randint(0, 100, + self.input_size)).astype(self.data_type) + output = (input * (1 / self.scale)).astype('float') + + self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(input)} + + self.outputs = {'Output': output} + + self.attrs = {'Scale': self.scale, } + + def test_check_output(self): + self.check_output() + + def set_scale(self): + pass + + def set_data_type(OpTest): + pass + + +class TestDeQuantizeOp1(TestDeQuantizeOp): + def set_scale(self): + self.scale = 1.5 + + def set_data_type(self): + self.data_type = 'int8' + + +class TestDeQuantizeOp2(TestDeQuantizeOp): + def set_scale(self): + self.scale = 0.8 + + def set_data_type(self): + self.data_type = 'uint8' + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 7ade135ec3dbb49ba95fc66100c47f7f7c64658b..bc4005122e369dd53c8f874884b09bd86c1ce9fb 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1018,9 +1018,8 @@ class TestBook(unittest.TestCase): def test_shuffle_channel(self): program = Program() with program_guard(program): - x = layers.data(name="x", shape=[1, 4, 2, 2], dtype="float32") - group = layers.data(name="group", shape=[1], dtype="int32") - out = layers.shuffle_channel(x, group) + x = layers.data(name="x", shape=[10, 16, 4, 4], dtype="float32") + out = layers.shuffle_channel(x, group=2) self.assertIsNotNone(out) print(str(program)) diff --git a/python/paddle/fluid/tests/unittests/test_quantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_quantize_mkldnn_op.py new file mode 100644 index 0000000000000000000000000000000000000000..99607928648be437b7f944f86a0c28b99d1775c4 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_quantize_mkldnn_op.py @@ -0,0 +1,76 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest + + +class TestQuantizeOp(OpTest): + def setUp(self): + self.op_type = 'quantize' + self.scale = 2.0 + self.input_size = [1, 1, 5, 5] #Naive nChw16c + self.is_negative = False + self.set_scale() + self.set_is_negative() + + if self.is_negative: + input = (100 * np.random.random_sample(self.input_size) - 50 + ).astype('float32') + output = np.round(input * self.scale).astype('int8') + else: + input = (100 * + np.random.random_sample(self.input_size)).astype('float32') + output = np.round(input * self.scale).astype('uint8') + + self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(input)} + + self.outputs = {'Output': output} + + self.attrs = { + 'Scale': self.scale, + 'is_negative_input': self.is_negative + } + + def test_check_output(self): + self.check_output() + + def set_scale(self): + pass + + def set_is_negative(self): + pass + + +class TestQuantizeOp1(TestQuantizeOp): + def set_scale(self): + self.scale = 1.5 + + def set_is_negative(self): + self.is_nagative = True + + +class TestQuantizeOp2(TestQuantizeOp): + def set_scale(self): + self.scale = 0.1 + + def set_is_negative(self): + self.is_nagative = False + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_shuffle_channel_op.py b/python/paddle/fluid/tests/unittests/test_shuffle_channel_op.py index 4fabe424fa77dfb9b82af488ef971ac9edaa09b1..aeaae9058187be1c9191bcbec21237c69fefe6e6 100644 --- a/python/paddle/fluid/tests/unittests/test_shuffle_channel_op.py +++ b/python/paddle/fluid/tests/unittests/test_shuffle_channel_op.py @@ -25,11 +25,11 @@ import paddle.fluid.core as core class TestShuffleChannelOp(OpTest): def setUp(self): self.op_type = "shuffle_channel" - self.batch_size = 1 - self.input_channels = 4 - self.layer_h = 2 - self.layer_w = 2 - self.group = 2 + self.batch_size = 10 + self.input_channels = 16 + self.layer_h = 4 + self.layer_w = 4 + self.group = 4 self.x = np.random.random( (self.batch_size, self.input_channels, self.layer_h, self.layer_w)).astype('float32')