detection_output_op.h 7.8 KB
Newer Older
S
sweetsky0901 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Indicesou may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include "paddle/framework/op_registry.h"
#include "paddle/framework/tensor.h"
#include "paddle/operators/math/detection_util.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/softmax.h"
S
sweetsky0901 已提交
21
#include "paddle/operators/strided_memcpy.h"
S
sweetsky0901 已提交
22 23 24
namespace paddle {
namespace operators {
template <typename Place, typename T>
S
sweetsky0901 已提交
25 26 27
inline void transpose_fun(const platform::DeviceContext& context,
                          const framework::Tensor& src,
                          framework::Tensor* dst) {
S
sweetsky0901 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
  int input_nums = src.dims()[0];
  int offset = 0;
  for (int j = 0; j < input_nums; ++j) {
    framework::Tensor in_p_tensor = src.Slice(j, j + 1);
    std::vector<int64_t> shape_vec(
        {in_p_tensor.dims()[0], in_p_tensor.dims()[1], in_p_tensor.dims()[3],
         in_p_tensor.dims()[4], in_p_tensor.dims()[2]});
    framework::DDim shape(framework::make_ddim(shape_vec));
    framework::Tensor in_p_tensor_transpose;
    in_p_tensor_transpose.mutable_data<T>(shape, context.GetPlace());
    std::vector<int> shape_axis({0, 1, 3, 4, 2});
    math::Transpose<Place, T, 5> trans5;
    trans5(context, in_p_tensor, &in_p_tensor_transpose, shape_axis);
    auto dst_stride = framework::stride(dst->dims());
    auto src_stride = framework::stride(in_p_tensor_transpose.dims());
    StridedMemcpy<T>(context, in_p_tensor_transpose.data<T>(), src_stride,
                     in_p_tensor_transpose.dims(), dst_stride,
                     dst->data<T>() + offset);
    offset += in_p_tensor_transpose.dims()[4] * src_stride[4];
  }
}
template <typename Place, typename T>
S
sweetsky0901 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
class Detection_output_Kernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const framework::Tensor* in_loc = context.Input<framework::Tensor>("Loc");
    const framework::Tensor* in_conf = context.Input<framework::Tensor>("Conf");
    const framework::Tensor* in_priorbox =
        context.Input<framework::Tensor>("PriorBox");
    auto* out = context.Output<framework::Tensor>("Out");
    int num_classes = context.template Attr<int>("num_classes");
    int top_k = context.template Attr<int>("top_k");
    int nms_top_k = context.template Attr<int>("nms_top_k");
    int background_label_id = context.template Attr<int>("background_label_id");
    float nms_threshold = context.template Attr<float>("nms_threshold");
    float confidence_threshold =
        context.template Attr<float>("confidence_threshold");
S
sweetsky0901 已提交
65
    int batch_size = in_conf->dims()[1];
S
sweetsky0901 已提交
66
    int conf_sum_size = in_conf->numel();
S
sweetsky0901 已提交
67 68
    // for softmax
    std::vector<int64_t> conf_shape_softmax_vec(
S
sweetsky0901 已提交
69
        {conf_sum_size / num_classes, num_classes});
S
sweetsky0901 已提交
70 71 72 73 74 75 76 77
    framework::DDim conf_shape_softmax(
        framework::make_ddim(conf_shape_softmax_vec));
    // for knchw => nhwc
    std::vector<int64_t> loc_shape_vec({1, in_loc->dims()[1], in_loc->dims()[3],
                                        in_loc->dims()[4], in_loc->dims()[2]});
    std::vector<int64_t> conf_shape_vec({1, in_conf->dims()[1],
                                         in_conf->dims()[3], in_conf->dims()[4],
                                         in_conf->dims()[2]});
S
sweetsky0901 已提交
78 79 80 81 82 83
    framework::DDim loc_shape(framework::make_ddim(loc_shape_vec));
    framework::DDim conf_shape(framework::make_ddim(conf_shape_vec));
    framework::Tensor loc_tensor;
    framework::Tensor conf_tensor;
    loc_tensor.mutable_data<T>(loc_shape, context.GetPlace());
    conf_tensor.mutable_data<T>(conf_shape, context.GetPlace());
S
sweetsky0901 已提交
84
    // for cpu
85 86 87 88
    framework::Tensor loc_cpu;
    framework::Tensor conf_cpu;
    framework::Tensor priorbox_cpu;
    const T* priorbox_data = in_priorbox->data<T>();
S
sweetsky0901 已提交
89 90 91 92 93 94 95
    transpose_fun<Place, T>(context.device_context(), *in_loc, &loc_tensor);
    transpose_fun<Place, T>(context.device_context(), *in_conf, &conf_tensor);
    conf_tensor.Resize(conf_shape_softmax);
    math::SoftmaxFunctor<Place, T>()(context.device_context(), &conf_tensor,
                                     &conf_tensor);
    T* loc_data = loc_tensor.data<T>();
    T* conf_data = conf_tensor.data<T>();
96
    if (platform::is_gpu_place(context.GetPlace())) {
S
sweetsky0901 已提交
97 98
      loc_cpu.mutable_data<T>(loc_tensor.dims(), platform::CPUPlace());
      framework::CopyFrom(loc_tensor, platform::CPUPlace(),
99
                          context.device_context(), &loc_cpu);
S
sweetsky0901 已提交
100 101 102
      loc_data = loc_cpu.data<T>();
      conf_cpu.mutable_data<T>(conf_tensor.dims(), platform::CPUPlace());
      framework::CopyFrom(conf_tensor, platform::CPUPlace(),
103
                          context.device_context(), &conf_cpu);
S
sweetsky0901 已提交
104
      conf_data = conf_cpu.data<T>();
105 106 107 108
      priorbox_cpu.mutable_data<T>(in_priorbox->dims(), platform::CPUPlace());
      framework::CopyFrom(*in_priorbox, platform::CPUPlace(),
                          context.device_context(), &priorbox_cpu);
      priorbox_data = priorbox_cpu.data<T>();
S
sweetsky0901 已提交
109 110 111 112 113 114 115 116 117 118
    }
    // get decode bboxes
    size_t num_priors = in_priorbox->numel() / 8;
    std::vector<std::vector<operators::math::BBox<T>>> all_decoded_bboxes;
    for (size_t n = 0; n < batch_size; ++n) {
      std::vector<operators::math::BBox<T>> decoded_bboxes;
      for (size_t i = 0; i < num_priors; ++i) {
        size_t prior_offset = i * 8;
        size_t loc_pred_offset = n * num_priors * 4 + i * 4;
        std::vector<math::BBox<T>> prior_bbox_vec;
119
        math::getBBoxFromPriorData<T>(priorbox_data + prior_offset, 1,
S
sweetsky0901 已提交
120 121
                                      prior_bbox_vec);
        std::vector<std::vector<T>> prior_bbox_var;
122 123
        math::getBBoxVarFromPriorData<T>(priorbox_data + prior_offset, 1,
                                         prior_bbox_var);
S
sweetsky0901 已提交
124 125
        std::vector<T> loc_pred_data;
        for (size_t j = 0; j < 4; ++j)
126
          loc_pred_data.push_back(*(loc_data + loc_pred_offset + j));
S
sweetsky0901 已提交
127 128 129 130 131 132 133 134
        math::BBox<T> bbox = math::decodeBBoxWithVar<T>(
            prior_bbox_vec[0], prior_bbox_var[0], loc_pred_data);
        decoded_bboxes.push_back(bbox);
      }
      all_decoded_bboxes.push_back(decoded_bboxes);
    }
    std::vector<std::map<size_t, std::vector<size_t>>> all_indices;
    int num_kept = math::getDetectionIndices<T>(
135 136
        conf_data, num_priors, num_classes, background_label_id, batch_size,
        confidence_threshold, nms_top_k, nms_threshold, top_k,
S
sweetsky0901 已提交
137 138 139 140 141 142 143 144 145 146 147
        all_decoded_bboxes, &all_indices);

    if (num_kept <= 0) {
      std::vector<int64_t> out_shape_vec({0, 0});
      framework::DDim out_shape(framework::make_ddim(out_shape_vec));
      out->Resize(out_shape);
      return;
    }
    std::vector<int64_t> out_shape_vec({num_kept, 7});
    framework::DDim out_shape(framework::make_ddim(out_shape_vec));
    out->mutable_data<T>(out_shape, context.GetPlace());
148 149 150 151 152 153 154 155 156 157 158 159 160
    framework::Tensor out_cpu;
    T* out_data = out->data<T>();
    if (platform::is_gpu_place(context.GetPlace())) {
      out_cpu.mutable_data<T>(out->dims(), platform::CPUPlace());
      out_data = out_cpu.data<T>();
    }
    math::getDetectionOutput<T>(conf_data, num_kept, num_priors, num_classes,
                                batch_size, all_indices, all_decoded_bboxes,
                                out_data);
    if (platform::is_gpu_place(context.GetPlace())) {
      framework::CopyFrom(out_cpu, platform::GPUPlace(),
                          context.device_context(), out);
    }
S
sweetsky0901 已提交
161 162 163 164
  }
};
}  // namespace operators
}  // namespace paddle