netOpenCv.cpp 6.4 KB
Newer Older
G
gineshidalgo99 已提交
1 2 3
// TODO: After completely adding the OpenCV DNN module, add this flag to CMake as alternative to USE_CAFFE
// #define USE_OPEN_CV_DNN

4
#include <openpose/net/netOpenCv.hpp>
G
gineshidalgo99 已提交
5 6 7 8
// Note: OpenCV only uses CPU or OpenCL (for Intel GPUs). Used CUDA for following blobs (Resize + NMS)
#ifdef USE_CAFFE
    #include <caffe/net.hpp>
#endif
9
#include <openpose_private/utilities/openCvMultiversionHeaders.hpp> // OPEN_CV_IS_4_OR_HIGHER
G
gineshidalgo99 已提交
10 11 12 13 14 15 16 17
#ifdef USE_OPEN_CV_DNN
    #if defined(USE_CAFFE) && defined(USE_CUDA) && defined(OPEN_CV_IS_4_OR_HIGHER)
        #include <opencv2/opencv.hpp>
        #include <openpose/gpu/cuda.hpp>
    #else
        #error In order to enable OpenCV DNN module in OpenPose, the CMake flags of Caffe and CUDA must be \
               enabled, and OpenCV version must be at least 4.0.0.
    #endif
G
gineshidalgo99 已提交
18 19 20 21 22 23 24 25
#endif
#include <numeric> // std::accumulate
#include <openpose/utilities/fileSystem.hpp>

namespace op
{
    struct NetOpenCv::ImplNetOpenCv
    {
G
gineshidalgo99 已提交
26
        #ifdef USE_OPEN_CV_DNN
G
gineshidalgo99 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
            // Init with constructor
            const int mGpuId;
            const std::string mCaffeProto;
            const std::string mCaffeTrainedModel;
            // OpenCV DNN
            cv::dnn::Net mNet;
            cv::Mat mNetOutputBlob;
            boost::shared_ptr<caffe::Blob<float>> spOutputBlob;

            ImplNetOpenCv(const std::string& caffeProto, const std::string& caffeTrainedModel, const int gpuId) :
                mGpuId{gpuId},
                mCaffeProto{caffeProto},
                mCaffeTrainedModel{caffeTrainedModel},
                mNet{cv::dnn::readNetFromCaffe(caffeProto, caffeTrainedModel)},
                spOutputBlob{new caffe::Blob<float>(1,1,1,1)}
            {
G
Gines Hidalgo 已提交
43 44 45 46 47 48
                    const std::string message{".\nPossible causes:\n"
                        "\t1. Not downloading the OpenPose trained models.\n"
                        "\t2. Not running OpenPose from the root directory (i.e., where the `model` folder is located, but do not move the `model` folder!). E.g.,\n"
                        "\t\tRight example for the Windows portable binary: `cd {OpenPose_root_path}; bin/openpose.exe`\n"
                        "\t\tWrong example for the Windows portable binary: `cd {OpenPose_root_path}/bin; openpose.exe`\n"
                        "\t3. Using paths with spaces."};
G
gineshidalgo99 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
                if (!existFile(mCaffeProto))
                    error("Prototxt file not found: " + mCaffeProto + message, __LINE__, __FUNCTION__, __FILE__);
                if (!existFile(mCaffeTrainedModel))
                    error("Caffe trained model file not found: " + mCaffeTrainedModel + message,
                          __LINE__, __FUNCTION__, __FILE__);

                // Set GPU
                mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU); // 1.7 sec at -1x160
                // mNet.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL); // 1.2 sec at -1x160
                // mNet.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL_FP16);
                // mNet.setPreferableTarget(cv::dnn::DNN_TARGET_MYRIAD);
                // mNet.setPreferableTarget(cv::dnn::DNN_TARGET_VULKAN);
                // // Set backen
                // mNet.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
                // mNet.setPreferableBackend(cv::dnn::DNN_BACKEND_HALIDE);
                // mNet.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE);
                // mNet.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
                // mNet.setPreferableBackend(cv::dnn::DNN_BACKEND_VKCOM);
            }
        #endif
    };

G
gineshidalgo99 已提交
71
    #ifdef USE_OPEN_CV_DNN
G
gineshidalgo99 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
        inline void reshapeNetOpenCv(caffe::Net<float>* caffeNet, const std::vector<int>& dimensions)
        {
            try
            {
                caffeNet->blobs()[0]->Reshape(dimensions);
                caffeNet->Reshape();
                #ifdef USE_CUDA
                    cudaCheck(__LINE__, __FUNCTION__, __FILE__);
                #endif
            }
            catch (const std::exception& e)
            {
                error(e.what(), __LINE__, __FUNCTION__, __FILE__);
            }
        }
    #endif

    NetOpenCv::NetOpenCv(const std::string& caffeProto, const std::string& caffeTrainedModel, const int gpuId)
G
gineshidalgo99 已提交
90
        #ifdef USE_OPEN_CV_DNN
G
gineshidalgo99 已提交
91 92 93 94 95
            : upImpl{new ImplNetOpenCv{caffeProto, caffeTrainedModel, gpuId}}
        #endif
    {
        try
        {
G
gineshidalgo99 已提交
96
            #ifndef USE_OPEN_CV_DNN
G
gineshidalgo99 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
                UNUSED(caffeProto);
                UNUSED(caffeTrainedModel);
                UNUSED(gpuId);
                error("OpenPose must be compiled with the `USE_CAFFE` macro definition in order to use this"
                      " functionality.", __LINE__, __FUNCTION__, __FILE__);
            #endif
        }
        catch (const std::exception& e)
        {
            error(e.what(), __LINE__, __FUNCTION__, __FILE__);
        }
    }

    NetOpenCv::~NetOpenCv()
    {
    }

    void NetOpenCv::initializationOnThread()
    {
    }

    void NetOpenCv::forwardPass(const Array<float>& inputData) const
    {
        try
        {
G
gineshidalgo99 已提交
122
            #ifdef USE_OPEN_CV_DNN
G
gineshidalgo99 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
                upImpl->mNet.setInput(inputData.getConstCvMat());
                upImpl->mNetOutputBlob = upImpl->mNet.forward(); // 99% of the runtime here
                std::vector<int> outputSize(upImpl->mNetOutputBlob.dims,0);
                for (auto i = 0u ; i < outputSize.size() ; i++)
                    outputSize[i] = upImpl->mNetOutputBlob.size[i];
                upImpl->spOutputBlob->Reshape(outputSize);
                auto* gpuImagePtr = upImpl->spOutputBlob->mutable_gpu_data();
                cudaMemcpy(gpuImagePtr, (float*)upImpl->mNetOutputBlob.data,
                           upImpl->spOutputBlob->count() * sizeof(float),
                           cudaMemcpyHostToDevice);
            #else
                UNUSED(inputData);
            #endif
        }
        catch (const std::exception& e)
        {
            error(e.what(), __LINE__, __FUNCTION__, __FILE__);
        }
    }

143
    std::shared_ptr<ArrayCpuGpu<float>> NetOpenCv::getOutputBlobArray() const
G
gineshidalgo99 已提交
144 145 146
    {
        try
        {
G
gineshidalgo99 已提交
147
            #ifdef USE_OPEN_CV_DNN
148
                return std::make_shared<ArrayCpuGpu<float>>(upImpl->spOutputBlob.get());
G
gineshidalgo99 已提交
149 150 151 152 153 154 155 156 157 158 159
            #else
                return nullptr;
            #endif
        }
        catch (const std::exception& e)
        {
            error(e.what(), __LINE__, __FUNCTION__, __FILE__);
            return nullptr;
        }
    }
}