test_conv_mkldnn_nhwc.cc 3.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <algorithm>
#include <cstdlib>
#include <memory>
#include <random>

#include "gtest/gtest.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/naive_executor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"

USE_OP_ITSELF(conv2d);
PD_DECLARE_KERNEL(conv2d, OneDNN, ONEDNN);

template <typename DataType>
void AddVarToScope(const std::string var_name,
                   paddle::framework::Scope* scope,
                   const paddle::framework::DDim& dims) {
  std::random_device seed;
  std::default_random_engine engine(seed());
  std::uniform_real_distribution<float> dist(0, 100);

  phi::DenseTensor tmp_tensor;
  auto* tmp_data =
      tmp_tensor.mutable_data<DataType>(dims, paddle::platform::CPUPlace());
  auto* tensor = scope->Var(var_name)->GetMutable<phi::DenseTensor>();
  tensor->mutable_data<DataType>(dims, paddle::platform::CPUPlace());
  for (auto i = 0; i < tensor->numel(); ++i) {
    tmp_data[i] = static_cast<DataType>(dist(engine));
  }
  paddle::framework::TensorCopySync(
      tmp_tensor, paddle::platform::CPUPlace(), tensor);
}
TEST(test_conv2d_output, fp32) {
  paddle::framework::Scope scope;
  paddle::platform::CPUPlace cpu_place;

  paddle::framework::OpDesc conv2d_op(nullptr);
  conv2d_op.SetType("conv2d");
  conv2d_op.SetInput("Input", {"conv2d-X"});
  conv2d_op.SetInput("Filter", {"conv2d-Y"});
  conv2d_op.SetOutput("Output", {"conv2d-Out"});

  AddVarToScope<float>("conv2d-X", &scope, {1, 3, 224, 224});
  AddVarToScope<float>("conv2d-Y", &scope, {64, 3, 7, 7});
  AddVarToScope<float>("conv2d-Out", &scope, {1, 64, 218, 218});

  const std::vector<int> strides({1, 1});
  const std::vector<int> paddings({1, 1});
  const std::vector<int> dilations({1, 1});
  const int groups = 1;

  conv2d_op.SetAttr("strides", strides);
  conv2d_op.SetAttr("paddings", paddings);
  conv2d_op.SetAttr("dilations", dilations);
  conv2d_op.SetAttr("groups", groups);
  conv2d_op.SetAttr("use_mkldnn", true);

  auto op = paddle::framework::OpRegistry::CreateOp(conv2d_op);

  op->Run(scope, cpu_place);
}
TEST(test_conv2d_output, int8) {
  paddle::framework::Scope scope;
  paddle::platform::CPUPlace cpu_place;

  paddle::framework::OpDesc conv2d_op(nullptr);
  conv2d_op.SetType("conv2d");
  conv2d_op.SetInput("Input", {"conv2d-X"});
  conv2d_op.SetInput("Filter", {"conv2d-Y"});
  conv2d_op.SetOutput("Output", {"conv2d-Out"});

  AddVarToScope<int8_t>("conv2d-X", &scope, {1, 3, 224, 224});
  AddVarToScope<int8_t>("conv2d-Y", &scope, {64, 3, 7, 7});
  AddVarToScope<int8_t>("conv2d-Out", &scope, {1, 64, 218, 218});

  const std::vector<int> strides({1, 1});
  const std::vector<int> paddings({1, 1});
  const std::vector<int> dilations({1, 1});
  const int groups = 1;

  conv2d_op.SetAttr("strides", strides);
  conv2d_op.SetAttr("paddings", paddings);
  conv2d_op.SetAttr("dilations", dilations);
  conv2d_op.SetAttr("groups", groups);
  conv2d_op.SetAttr("use_mkldnn", true);
  conv2d_op.SetAttr("mkldnn_data_type", std::string("int8"));
  conv2d_op.SetAttr("force_fp32_output", false);

  auto op = paddle::framework::OpRegistry::CreateOp(conv2d_op);

  op->Run(scope, cpu_place);
}