rbox_iou_op.cc 3.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
//   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
C
cnn 已提交
16

17
#include "rbox_iou_op.h"
C
cnn 已提交
18 19 20
#include "paddle/extension.h"


21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
template <typename T>
void rbox_iou_cpu_kernel(
    const int rbox1_num,
    const int rbox2_num,
    const T* rbox1_data_ptr,
    const T* rbox2_data_ptr,
    T* output_data_ptr) {

    int i, j;
    for (i = 0; i < rbox1_num; i++) {
        for (j = 0; j < rbox2_num; j++) {
		int offset = i * rbox2_num + j;
		output_data_ptr[offset] = rbox_iou_single<T>(rbox1_data_ptr + i * 5, rbox2_data_ptr + j * 5);
        }
    }
}


#define CHECK_INPUT_CPU(x) PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")

std::vector<paddle::Tensor> RboxIouCPUForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2) {
    CHECK_INPUT_CPU(rbox1);
    CHECK_INPUT_CPU(rbox2);

    auto rbox1_num = rbox1.shape()[0];
    auto rbox2_num = rbox2.shape()[0];

W
wangxinxin08 已提交
48
    auto output = paddle::Tensor(paddle::PlaceType::kCPU, {rbox1_num, rbox2_num});
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

    PD_DISPATCH_FLOATING_TYPES(
        rbox1.type(),
        "rbox_iou_cpu_kernel",
        ([&] {
            rbox_iou_cpu_kernel<data_t>(
                rbox1_num,
                rbox2_num,
                rbox1.data<data_t>(),
                rbox2.data<data_t>(),
                output.mutable_data<data_t>());
        }));
    
    return {output};
}


#ifdef PADDLE_WITH_CUDA
C
cnn 已提交
67
std::vector<paddle::Tensor> RboxIouCUDAForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2);
68
#endif
C
cnn 已提交
69 70 71


#define CHECK_INPUT_SAME(x1, x2) PD_CHECK(x1.place() == x2.place(), "input must be smae pacle.")
72

C
cnn 已提交
73 74 75 76
std::vector<paddle::Tensor> RboxIouForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2) {
    CHECK_INPUT_SAME(rbox1, rbox2);
    if (rbox1.place() == paddle::PlaceType::kCPU) {
        return RboxIouCPUForward(rbox1, rbox2);
77 78
#ifdef PADDLE_WITH_CUDA
    } else if (rbox1.place() == paddle::PlaceType::kGPU) {
C
cnn 已提交
79
        return RboxIouCUDAForward(rbox1, rbox2);
80
#endif
C
cnn 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    }
}

std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> rbox1_shape, std::vector<int64_t> rbox2_shape) {
    return {{rbox1_shape[0], rbox2_shape[0]}};
}

std::vector<paddle::DataType> InferDtype(paddle::DataType t1, paddle::DataType t2) {
    return {t1};
}

PD_BUILD_OP(rbox_iou)
    .Inputs({"RBOX1", "RBOX2"})
    .Outputs({"Output"})
    .SetKernelFn(PD_KERNEL(RboxIouForward))
    .SetInferShapeFn(PD_INFER_SHAPE(InferShape))
    .SetInferDtypeFn(PD_INFER_DTYPE(InferDtype));