提交 d414c815 编写于 作者: G gineshidalgo99

Added #185 fix

上级 4412720b
......@@ -38,6 +38,6 @@ You might select multiple topics, delete the rest:
**cuDNN version**:
**GPU model** (`nvidia-smi` in Ubuntu):
**Caffe version**: Default from OpenPose or custom version.
**OpenCV version**: installed with `apt-get install libopencv-dev` (Ubuntu) or default from OpenPose (Windows) or OpenCV 2.X or OpenCV 3.X.
**OpenCV version**: installed with `apt-get install libopencv-dev` (Ubuntu) or default from OpenPose (Windows) or OpenCV 2.X or OpenCV 3.X. Especify **full version** (e.g. 3.3.1)
Generation mode (only for Ubuntu): Makefile + Makefile.config (default, Ubuntu) or CMake (Ubuntu, Windows) or Visual Studio (Windows).
Compiler (`gcc --version` in Ubuntu):
......@@ -18,12 +18,12 @@ cv::Mat pointGreyToCvMat(const Spinnaker::ImagePtr &imagePtr)
const auto colsize = imagePtr->GetHeight();
// image data contains padding. When allocating cv::Mat container size, you need to account for the X,Y image data padding.
return cv::Mat{(int)(colsize + YPadding), (int)(rowsize + XPadding), CV_8UC3, imagePtr->GetData(), imagePtr->GetStride()};
return cv::Mat((int)(colsize + YPadding), (int)(rowsize + XPadding), CV_8UC3, imagePtr->GetData(), imagePtr->GetStride());
}
catch (const std::exception& e)
{
op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
......
......@@ -108,7 +108,7 @@ void reconstructArray(op::Array<float>& keypoints3D, const std::vector<op::Array
// 3D points to pose
// OpenCV alternative:
// // http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#triangulatepoints
// cv::Mat reconstructedcv::Points{4, firstcv::Points.size(), CV_64F};
// cv::Mat reconstructedPoints{4, firstcv::Points.size(), CV_64F};
// cv::triangulatecv::Points(cv::Mat::eye(3,4, CV_64F), M_3_1, firstcv::Points, secondcv::Points, reconstructedcv::Points);
keypoints3D = op::Array<float>{ { 1, numberBodyParts, 4 }, 0 };
for (auto index = 0u; index < indexesUsed.size(); index++)
......
......@@ -341,7 +341,7 @@ WRender3D::WRender3D(const op::PoseModel poseModel)
// Update sPoseModel
sPoseModel = poseModel;
// Init display
cv::imshow(GUI_NAME, cv::Mat{ 500, 500, CV_8UC3, cv::Scalar{ 0,0,0 } });
cv::imshow(GUI_NAME, cv::Mat( 500, 500, CV_8UC3, cv::Scalar{ 0,0,0 } ));
//Run OpenGL
mRenderThread = std::thread{ &WRender3D::visualizationThread, this };
}
......
......@@ -14,7 +14,7 @@ namespace op
cv::Mat formatToCvMat(const Array<float>& outputData) const;
private:
const Point<int> mOutputResolution;
const std::array<int, 3> mOutputResolution;
};
}
......
......@@ -20,7 +20,7 @@ namespace op
void initializationOnThread();
void update(const cv::Mat& cvOutputData = cv::Mat{});
void update(const cv::Mat& cvOutputData = cv::Mat());
private:
// Frames display
......
......@@ -69,7 +69,7 @@ namespace op
// T* to T
auto& tDatumsNoPtr = *tDatums;
// Refresh GUI
const auto cvOutputData = (!tDatumsNoPtr.empty() ? tDatumsNoPtr[0].cvOutputData : cv::Mat{});
const auto cvOutputData = (!tDatumsNoPtr.empty() ? tDatumsNoPtr[0].cvOutputData : cv::Mat());
spGui->update(cvOutputData);
// Profiling speed
if (!tDatumsNoPtr.empty())
......
......@@ -11,7 +11,8 @@ namespace op
COCO_18 = 0, /**< COCO model, with 18+1 components (see poseParameters.hpp for details). */
MPI_15 = 1, /**< MPI model, with 15+1 components (see poseParameters.hpp for details). */
MPI_15_4 = 2, /**< Variation of the MPI model, reduced number of CNN stages to 4: faster but less accurate.*/
BODY_22 = 3, /**< Experimental. Do not use. */
BODY_18 = 3, /**< Experimental. Do not use. */
BODY_22 = 4, /**< Experimental. Do not use. */
Size,
};
......
......@@ -101,6 +101,15 @@ namespace op
0.f, 85.f, 255.f, \
0.f, 0.f, 255.f
const std::vector<float> POSE_MPI_COLORS_RENDER{POSE_MPI_COLORS_RENDER_GPU};
// BODY_18
const std::map<unsigned int, std::string> POSE_BODY_18_BODY_PARTS {POSE_COCO_BODY_PARTS};
const unsigned int POSE_BODY_18_NUMBER_PARTS {POSE_COCO_NUMBER_PARTS};
const std::vector<unsigned int> POSE_BODY_18_MAP_IDX {POSE_COCO_MAP_IDX};
#define POSE_BODY_18_PAIRS_RENDER_GPU POSE_COCO_PAIRS_RENDER_GPU
const std::vector<unsigned int> POSE_BODY_18_PAIRS_RENDER {POSE_BODY_18_PAIRS_RENDER_GPU};
const std::vector<unsigned int> POSE_BODY_18_PAIRS {POSE_COCO_PAIRS};
#define POSE_BODY_18_COLORS_RENDER_GPU POSE_COCO_COLORS_RENDER_GPU
const std::vector<float> POSE_BODY_18_COLORS_RENDER {POSE_BODY_18_COLORS_RENDER_GPU};
// BODY_22 (experimental, do not use)
const std::map<unsigned int, std::string> POSE_BODY_22_BODY_PARTS {
{0, "Nose"},
......@@ -161,30 +170,32 @@ namespace op
POSE_MAX_PEOPLE, POSE_MAX_PEOPLE, POSE_MAX_PEOPLE, POSE_MAX_PEOPLE
};
const std::array<unsigned int, (int)PoseModel::Size> POSE_NUMBER_BODY_PARTS{
POSE_COCO_NUMBER_PARTS, POSE_MPI_NUMBER_PARTS, POSE_MPI_NUMBER_PARTS, POSE_BODY_22_NUMBER_PARTS
POSE_COCO_NUMBER_PARTS, POSE_MPI_NUMBER_PARTS, POSE_MPI_NUMBER_PARTS, POSE_BODY_18_NUMBER_PARTS, POSE_BODY_22_NUMBER_PARTS
};
const std::array<std::vector<unsigned int>, (int)PoseModel::Size> POSE_BODY_PART_PAIRS{
POSE_COCO_PAIRS, POSE_MPI_PAIRS, POSE_MPI_PAIRS, POSE_BODY_22_PAIRS
POSE_COCO_PAIRS, POSE_MPI_PAIRS, POSE_MPI_PAIRS, POSE_BODY_18_PAIRS, POSE_BODY_22_PAIRS
};
const std::array<std::vector<unsigned int>, (int)PoseModel::Size> POSE_BODY_PART_PAIRS_RENDER{
POSE_COCO_PAIRS_RENDER, POSE_MPI_PAIRS, POSE_MPI_PAIRS, POSE_BODY_22_PAIRS_RENDER
POSE_COCO_PAIRS_RENDER, POSE_MPI_PAIRS, POSE_MPI_PAIRS, POSE_BODY_18_PAIRS_RENDER, POSE_BODY_22_PAIRS_RENDER
};
const std::array<std::vector<unsigned int>, (int)PoseModel::Size> POSE_MAP_IDX{
POSE_COCO_MAP_IDX, POSE_MPI_MAP_IDX, POSE_MPI_MAP_IDX, POSE_BODY_22_MAP_IDX
POSE_COCO_MAP_IDX, POSE_MPI_MAP_IDX, POSE_MPI_MAP_IDX, POSE_BODY_18_MAP_IDX, POSE_BODY_22_MAP_IDX
};
const std::array<std::vector<float>, (int)PoseModel::Size> POSE_COLORS{
POSE_COCO_COLORS_RENDER,POSE_MPI_COLORS_RENDER, POSE_MPI_COLORS_RENDER, POSE_BODY_22_COLORS_RENDER
POSE_COCO_COLORS_RENDER,POSE_MPI_COLORS_RENDER, POSE_MPI_COLORS_RENDER, POSE_BODY_18_COLORS_RENDER, POSE_BODY_22_COLORS_RENDER
};
const std::array<std::string, (int)PoseModel::Size> POSE_PROTOTXT{
"pose/coco/pose_deploy_linevec.prototxt",
"pose/mpi/pose_deploy_linevec.prototxt",
"pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt",
"pose/body_18/pose_deploy.prototxt",
"pose/body_22/pose_deploy.prototxt"
};
const std::array<std::string, (int)PoseModel::Size> POSE_TRAINED_MODEL{
"pose/coco/pose_iter_440000.caffemodel",
"pose/mpi/pose_iter_160000.caffemodel",
"pose/mpi/pose_iter_160000.caffemodel",
"pose/body_18/pose_iter_XXXXXX.caffemodel",
"pose/body_22/pose_iter_40000.caffemodel"
};
// POSE_BODY_PART_MAPPING crashes on Windows at dynamic initialization, to avoid this crash:
......@@ -197,19 +208,19 @@ namespace op
// Default Model Parameters
// They might be modified on running time
const std::array<float, (int)PoseModel::Size> POSE_DEFAULT_NMS_THRESHOLD{
0.05f, 0.6f, 0.3f, 0.05f
0.05f, 0.6f, 0.3f, 0.05f, 0.05f
};
const std::array<unsigned int, (int)PoseModel::Size> POSE_DEFAULT_CONNECT_INTER_MIN_ABOVE_THRESHOLD{
9, 8, 8, 9
9, 8, 8, 9, 9
};
const std::array<float, (int)PoseModel::Size> POSE_DEFAULT_CONNECT_INTER_THRESHOLD{
0.05f, 0.01f, 0.01f, 0.05f
0.05f, 0.01f, 0.01f, 0.05f, 0.05f
};
const std::array<unsigned int, (int)PoseModel::Size> POSE_DEFAULT_CONNECT_MIN_SUBSET_CNT{
3, 3, 3, 3
3, 3, 3, 3, 3
};
const std::array<float, (int)PoseModel::Size> POSE_DEFAULT_CONNECT_MIN_SUBSET_SCORE{
0.4f, 0.4f, 0.4f, 0.4f
0.4f, 0.4f, 0.4f, 0.4f, 0.4f
};
// Rendering parameters
......
......@@ -9,11 +9,11 @@ namespace op
{
OP_API void putTextOnCvMat(cv::Mat& cvMat, const std::string& textToDisplay, const Point<int>& position, const cv::Scalar& color, const bool normalizeWidth);
OP_API void floatPtrToUCharCvMat(cv::Mat& cvMat, const float* const floatImage, const Point<int>& resolutionSize, const int resolutionChannels);
OP_API void floatPtrToUCharCvMat(cv::Mat& uCharCvMat, const float* const floatPtrImage, const std::array<int, 3> resolutionSize);
OP_API void unrollArrayToUCharCvMat(cv::Mat& cvMatResult, const Array<float>& array);
OP_API void uCharCvMatToFloatPtr(float* floatImage, const cv::Mat& cvImage, const bool normalize);
OP_API void uCharCvMatToFloatPtr(float* floatPtrImage, const cv::Mat& cvImage, const bool normalize);
OP_API double resizeGetScaleFactor(const Point<int>& initialSize, const Point<int>& targetSize);
......
......@@ -176,7 +176,7 @@ namespace op
mVolume = 0;
spData.reset();
// cv::Mat available but empty
mCvMatData = std::make_pair(true, cv::Mat{});
mCvMatData = std::make_pair(true, cv::Mat());
}
}
catch (const std::exception& e)
......@@ -460,9 +460,9 @@ namespace op
mCvMatData.first = false;
if (mCvMatData.first)
mCvMatData.second = cv::Mat{(int)mSize.size(), mSize.data(), cvFormat, spData.get()};
mCvMatData.second = cv::Mat((int)mSize.size(), mSize.data(), cvFormat, spData.get());
else
mCvMatData.second = cv::Mat{};
mCvMatData.second = cv::Mat();
}
catch (const std::exception& e)
{
......
......@@ -4,7 +4,7 @@
namespace op
{
OpOutputToCvMat::OpOutputToCvMat(const Point<int>& outputResolution) :
mOutputResolution{outputResolution}
mOutputResolution{outputResolution.x, outputResolution.y, 3}
{
}
......@@ -17,14 +17,14 @@ namespace op
error("Wrong input element (empty outputData).", __LINE__, __FUNCTION__, __FILE__);
cv::Mat cvMat;
floatPtrToUCharCvMat(cvMat, outputData.getConstPtr(), mOutputResolution, 3);
floatPtrToUCharCvMat(cvMat, outputData.getConstPtr(), mOutputResolution);
return cvMat;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
}
......@@ -83,7 +83,7 @@ namespace op
// Save cv::Mat data
cv::FileStorage fileStorage{getFullName(fileNameNoExtension, format), cv::FileStorage::WRITE};
for (auto i = 0 ; i < cvMats.size() ; i++)
fileStorage << cvMatNames[i] << (cvMats[i].empty() ? cv::Mat{} : cvMats[i]);
fileStorage << cvMatNames[i] << (cvMats[i].empty() ? cv::Mat() : cvMats[i]);
// Release file
fileStorage.release();
}
......@@ -242,7 +242,7 @@ namespace op
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
......
......@@ -16,7 +16,7 @@ namespace op
{
setGuiDisplayMode(mGuiDisplayMode);
const cv::Mat blackFrame{mWindowedSize.y, mWindowedSize.x, CV_32FC3, {0,0,0}};
const cv::Mat blackFrame(mWindowedSize.y, mWindowedSize.x, CV_32FC3, {0,0,0});
FrameDisplayer::displayFrame(blackFrame);
cv::waitKey(1); // This one will show most probably a white image (I guess the program does not have time to render in 1 msec)
// cv::waitKey(1000); // This one will show the desired black image
......
......@@ -7,14 +7,16 @@
namespace op
{
void updateFps(unsigned long long& lastId, double& fps, unsigned int& fpsCounter, std::queue<std::chrono::high_resolution_clock::time_point>& fpsQueue,
void updateFps(unsigned long long& lastId, double& fps, unsigned int& fpsCounter,
std::queue<std::chrono::high_resolution_clock::time_point>& fpsQueue,
const unsigned long long id, const int numberGpus)
{
try
{
// If only 1 GPU -> update fps every frame.
// If > 1 GPU:
// We updated fps every (3*numberGpus) frames. This is due to the variability introduced by using > 1 GPU at the same time.
// We updated fps every (3*numberGpus) frames. This is due to the variability introduced by
// using > 1 GPU at the same time.
// However, we update every frame during the first few frames to have an initial estimator.
// In any of the previous cases, the fps value is estimated during the last several frames.
// In this way, a sudden fps drop will be quickly visually identified.
......@@ -34,7 +36,9 @@ namespace op
}
if (updatePrintedFps)
{
const auto timeSec = (double)std::chrono::duration_cast<std::chrono::nanoseconds>(fpsQueue.back()-fpsQueue.front()).count() * 1e-9;
const auto timeSec = (double)std::chrono::duration_cast<std::chrono::nanoseconds>(
fpsQueue.back()-fpsQueue.front()
).count() * 1e-9;
fps = (fpsQueue.size()-1) / (timeSec != 0. ? timeSec : 1.);
}
}
......@@ -56,17 +60,16 @@ namespace op
{
}
void GuiInfoAdder::addInfo(cv::Mat& cvOutputData, const Array<float>& poseKeypoints, const unsigned long long id, const std::string& elementRenderedName)
void GuiInfoAdder::addInfo(cv::Mat& cvOutputData, const Array<float>& poseKeypoints, const unsigned long long id,
const std::string& elementRenderedName)
{
try
{
// Security checks
if (cvOutputData.empty())
error("Wrong input element (empty cvOutputData).", __LINE__, __FUNCTION__, __FILE__);
// Update fps
updateFps(mLastId, mFps, mFpsCounter, mFpsQueue, id, mNumberGpus);
// Used colors
const cv::Scalar white{255, 255, 255};
// Fps or s/gpu
......@@ -74,27 +77,32 @@ namespace op
std::snprintf(charArrayAux, 15, "%4.1f fps", mFps);
// Recording inverse: sec/gpu
// std::snprintf(charArrayAux, 15, "%4.2f s/gpu", (mFps != 0. ? mNumberGpus/mFps : 0.));
putTextOnCvMat(cvOutputData, charArrayAux, {mBorderMargin,mBorderMargin}, white, false);
putTextOnCvMat(cvOutputData, charArrayAux, {intRound(mOutputSize.x - mBorderMargin), mBorderMargin},
white, true);
// Part to show
// Allowing some buffer when changing the part to show (if >= 2 GPUs)
// I.e. one GPU might return a previous part after the other GPU returns the new desired part, it looks like a mini-bug on screen
// I.e. one GPU might return a previous part after the other GPU returns the new desired part, it looks
// like a mini-bug on screen
// Difference between Titan X (~110 ms) & 1050 Ti (~290ms)
if (mNumberGpus == 1 || (elementRenderedName != mLastElementRenderedName && mLastElementRenderedCounter > 4))
if (mNumberGpus == 1 || (elementRenderedName != mLastElementRenderedName
&& mLastElementRenderedCounter > 4))
{
mLastElementRenderedName = elementRenderedName;
mLastElementRenderedCounter = 0;
}
mLastElementRenderedCounter = fastMin(mLastElementRenderedCounter, std::numeric_limits<int>::max() - 5);
mLastElementRenderedCounter++;
// Display element to display or help
std::string message = (!mLastElementRenderedName.empty() ? mLastElementRenderedName : (mGuiEnabled ? "'h' for help" : ""));
if (!message.empty())
putTextOnCvMat(cvOutputData, message, {intRound(mOutputSize.x - mBorderMargin), mBorderMargin}, white, true);
// OpenPose name as well as help or part to show
putTextOnCvMat(cvOutputData, "OpenPose - " +
(!mLastElementRenderedName.empty() ?
mLastElementRenderedName : (mGuiEnabled ? "'h' for help" : "")),
{mBorderMargin, mBorderMargin}, white, false);
// Frame number
putTextOnCvMat(cvOutputData, "Frame " + std::to_string(id), {mBorderMargin, (int)(mOutputSize.y - mBorderMargin)}, white, false);
putTextOnCvMat(cvOutputData, "Frame: " + std::to_string(id),
{mBorderMargin, (int)(mOutputSize.y - mBorderMargin)}, white, false);
// Number people
const auto textToDisplay = std::to_string(poseKeypoints.getSize(0)) + " people";
putTextOnCvMat(cvOutputData, textToDisplay, {(int)(mOutputSize.x - mBorderMargin), (int)(mOutputSize.y - mBorderMargin)}, white, true);
putTextOnCvMat(cvOutputData, "People: " + std::to_string(poseKeypoints.getSize(0)),
{(int)(mOutputSize.x - mBorderMargin), (int)(mOutputSize.y - mBorderMargin)}, white, true);
}
catch (const std::exception& e)
{
......
......@@ -40,7 +40,7 @@ namespace op
// Change w.r.t. other
if (nB != 0)
{
if (poseModel == PoseModel::COCO_18 || poseModel == PoseModel::BODY_22)
if (poseModel == PoseModel::COCO_18 || poseModel == PoseModel::BODY_18 || poseModel == PoseModel::BODY_22)
{
for (auto i = 1; i <= nB; i++)
{
......@@ -81,7 +81,7 @@ namespace op
}
else if (nA != 0)
{
if (poseModel == PoseModel::COCO_18 || poseModel == PoseModel::BODY_22)
if (poseModel == PoseModel::COCO_18 || poseModel == PoseModel::BODY_18 || poseModel == PoseModel::BODY_22)
{
for (auto i = 1; i <= nA; i++)
{
......
......@@ -3,7 +3,7 @@
namespace op
{
const std::array<std::map<unsigned int, std::string>, (int)PoseModel::Size> POSE_BODY_PART_MAPPING{
POSE_COCO_BODY_PARTS, POSE_MPI_BODY_PARTS, POSE_MPI_BODY_PARTS, POSE_BODY_22_BODY_PARTS
POSE_COCO_BODY_PARTS, POSE_MPI_BODY_PARTS, POSE_MPI_BODY_PARTS, POSE_BODY_18_BODY_PARTS, POSE_BODY_22_BODY_PARTS
};
unsigned int poseBodyPartMapStringToKey(const PoseModel poseModel, const std::vector<std::string>& strings)
......
......@@ -9,9 +9,11 @@ namespace op
// PI digits: http://www.piday.org/million/
__constant__ const float PI = 3.14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651328230664709384460955058223172535940812848111745f;
__constant__ const unsigned int COCO_PAIRS_GPU[] = POSE_COCO_PAIRS_RENDER_GPU;
__constant__ const unsigned int BODY_18_PAIRS_GPU[] = POSE_BODY_18_PAIRS_RENDER_GPU;
__constant__ const unsigned int BODY_22_PAIRS_GPU[] = POSE_BODY_22_PAIRS_RENDER_GPU;
__constant__ const unsigned int MPI_PAIRS_GPU[] = POSE_MPI_PAIRS_RENDER_GPU;
__constant__ const float COCO_COLORS[] = {POSE_COCO_COLORS_RENDER_GPU};
__constant__ const float BODY_18_COLORS[] = {POSE_BODY_18_COLORS_RENDER_GPU};
__constant__ const float BODY_22_COLORS[] = {POSE_BODY_22_COLORS_RENDER_GPU};
__constant__ const float MPI_COLORS[] = {POSE_MPI_COLORS_RENDER_GPU};
......@@ -120,6 +122,32 @@ namespace op
radius, stickwidth, threshold, alphaColorToAdd, blendOriginalFrame, (googlyEyes ? 14 : -1), (googlyEyes ? 15 : -1));
}
__global__ void renderPoseBody18(float* targetPtr, const int targetWidth, const int targetHeight, const float* const posePtr,
const int numberPeople, const float threshold, const bool googlyEyes, const bool blendOriginalFrame,
const float alphaColorToAdd)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto globalIdx = threadIdx.y * blockDim.x + threadIdx.x;
// Shared parameters
__shared__ float2 sharedMins[POSE_MAX_PEOPLE];
__shared__ float2 sharedMaxs[POSE_MAX_PEOPLE];
__shared__ float sharedScaleF[POSE_MAX_PEOPLE];
// Other parameters
const auto numberPartPairs = sizeof(BODY_18_PAIRS_GPU) / (2*sizeof(BODY_18_PAIRS_GPU[0]));
const auto numberColors = sizeof(BODY_18_COLORS) / (3*sizeof(BODY_18_COLORS[0]));
const auto radius = fastMin(targetWidth, targetHeight) / 100.f;
const auto stickwidth = fastMin(targetWidth, targetHeight) / 120.f;
// Render key points
renderKeypoints(targetPtr, sharedMaxs, sharedMins, sharedScaleF,
globalIdx, x, y, targetWidth, targetHeight, posePtr, BODY_18_PAIRS_GPU, numberPeople,
POSE_BODY_18_NUMBER_PARTS, numberPartPairs, BODY_18_COLORS, numberColors,
radius, stickwidth, threshold, alphaColorToAdd, blendOriginalFrame, (googlyEyes ? 14 : -1), (googlyEyes ? 15 : -1));
}
__global__ void renderPoseBody22(float* targetPtr, const int targetWidth, const int targetHeight, const float* const posePtr,
const int numberPeople, const float threshold, const bool googlyEyes, const bool blendOriginalFrame,
const float alphaColorToAdd)
......@@ -325,7 +353,7 @@ namespace op
//framePtr = width * height * 3
//heatMapPtr = heatMapSize.x * heatMapSize.y * #body parts
//posePtr = 3 (x,y,score) * #Body parts * numberPeople
if (googlyEyes && poseModel != PoseModel::COCO_18)
if (googlyEyes && poseModel != PoseModel::COCO_18 && poseModel != PoseModel::BODY_18)
error("Bool googlyEyes only compatible with PoseModel::COCO_18.", __LINE__, __FUNCTION__, __FILE__);
dim3 threadsPerBlock;
......@@ -335,9 +363,12 @@ namespace op
if (poseModel == PoseModel::COCO_18)
renderPoseCoco<<<threadsPerBlock, numBlocks>>>(framePtr, frameSize.x, frameSize.y, posePtr, numberPeople,
renderThreshold, googlyEyes, blendOriginalFrame, alphaBlending);
else if (poseModel == PoseModel::BODY_18)
renderPoseBody18<<<threadsPerBlock, numBlocks>>>(framePtr, frameSize.x, frameSize.y, posePtr, numberPeople,
renderThreshold, googlyEyes, blendOriginalFrame, alphaBlending);
else if (poseModel == PoseModel::BODY_22)
renderPoseBody22<<<threadsPerBlock, numBlocks>>>(framePtr, frameSize.x, frameSize.y, posePtr, numberPeople,
renderThreshold, googlyEyes, blendOriginalFrame, alphaBlending);
renderThreshold, googlyEyes, blendOriginalFrame, alphaBlending);
else if (poseModel == PoseModel::MPI_15 || poseModel == PoseModel::MPI_15_4)
renderPoseMpi29Parts<<<threadsPerBlock, numBlocks>>>(framePtr, frameSize.x, frameSize.y, posePtr,
numberPeople, renderThreshold, blendOriginalFrame, alphaBlending);
......
......@@ -54,7 +54,7 @@ namespace op
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
......
......@@ -57,7 +57,7 @@ namespace op
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
......@@ -155,7 +155,7 @@ namespace op
if (mType != ProducerType::ImageDirectory && (frame.cols != get(CV_CAP_PROP_FRAME_WIDTH) || frame.rows != get(CV_CAP_PROP_FRAME_HEIGHT)))
{
log("Frame size changed. Returning empty frame.", Priority::Max, __LINE__, __FUNCTION__, __FILE__);
frame = cv::Mat{};
frame = cv::Mat();
}
}
}
......
......@@ -74,7 +74,7 @@ namespace op
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
......
......@@ -31,7 +31,7 @@ namespace op
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
}
......@@ -133,7 +133,7 @@ namespace op
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
......
......@@ -18,6 +18,8 @@ namespace op
return PoseModel::MPI_15;
else if (poseModeString == "MPI_4_layers")
return PoseModel::MPI_15_4;
else if (poseModeString == "BODY_18")
return PoseModel::BODY_18;
else if (poseModeString == "BODY_22")
return PoseModel::BODY_22;
// else
......
......@@ -146,9 +146,9 @@ namespace op
const auto width = frame.size[2];
const auto height = frame.size[1];
const auto area = width * height;
cv::Mat frameB{height, width, CV_32FC1, &frame.data[0]};
cv::Mat frameG{height, width, CV_32FC1, &frame.data[area * sizeof(float) / sizeof(uchar)]};
cv::Mat frameR{height, width, CV_32FC1, &frame.data[2 * area * sizeof(float) / sizeof(uchar)]};
cv::Mat frameB(height, width, CV_32FC1, &frame.data[0]);
cv::Mat frameG(height, width, CV_32FC1, &frame.data[area * sizeof(float) / sizeof(uchar)]);
cv::Mat frameR(height, width, CV_32FC1, &frame.data[2 * area * sizeof(float) / sizeof(uchar)]);
// Parameters
const auto lineType = 8;
......
......@@ -24,25 +24,32 @@ namespace op
}
}
void floatPtrToUCharCvMat(cv::Mat& cvMat, const float* const floatImage, const Point<int>& resolutionSize, const int resolutionChannels)
void floatPtrToUCharCvMat(cv::Mat& uCharCvMat, const float* const floatPtrImage, const std::array<int, 3> resolutionSize)
{
try
{
// float* (deep net format): C x H x W
// cv::Mat (OpenCV format): H x W x C
if (cvMat.rows != resolutionSize.y || cvMat.cols != resolutionSize.x || cvMat.type() != CV_8UC3)
cvMat = cv::Mat(resolutionSize.y, resolutionSize.x, CV_8UC3);
const auto offsetBetweenChannels = resolutionSize.x * resolutionSize.y;
for (auto c = 0; c < resolutionChannels; c++)
// Info:
// float* (deep net format): C x H x W
// cv::Mat (OpenCV format): H x W x C
// Allocate cv::Mat if it was not initialized yet
if (uCharCvMat.empty() || uCharCvMat.rows != resolutionSize[1] || uCharCvMat.cols != resolutionSize[0] || uCharCvMat.type() != CV_8UC3)
uCharCvMat = cv::Mat(resolutionSize[1], resolutionSize[0], CV_8UC3);
// Fill uCharCvMat from floatPtrImage
auto* uCharPtrCvMat = (unsigned char*)(uCharCvMat.data);
const auto offsetBetweenChannels = resolutionSize[0] * resolutionSize[1];
const auto stepSize = uCharCvMat.step; // step = cols * channels
for (auto c = 0; c < resolutionSize[2]; c++)
{
const auto offsetChannelC = c*offsetBetweenChannels;
for (auto y = 0; y < resolutionSize.y; y++)
for (auto y = 0; y < resolutionSize[1]; y++)
{
const auto floatImageOffsetY = offsetChannelC + y*resolutionSize.x;
for (auto x = 0; x < resolutionSize.x; x++)
const auto yOffset = y * stepSize;
const auto floatPtrImageOffsetY = offsetChannelC + y*resolutionSize[0];
for (auto x = 0; x < resolutionSize[0]; x++)
{
const auto value = uchar( fastTruncate(intRound(floatImage[floatImageOffsetY + x]), 0, 255) );
*(cvMat.ptr<uchar>(y) + x*resolutionChannels + c) = value;
const auto value = uchar( fastTruncate(intRound(floatPtrImage[floatPtrImageOffsetY + x]), 0, 255) );
uCharPtrCvMat[yOffset + x * resolutionSize[2] + c] = value;
// *(uCharCvMat.ptr<uchar>(y, x) + c) = value; // Slower but safer and cleaner equivalent
}
}
}
......@@ -67,17 +74,14 @@ namespace op
const auto width = array.getSize(2);
const auto areaInput = height * width;
const auto areaOutput = channels * width;
// Allocate cv::Mat
if (cvMatResult.cols != channels * width || cvMatResult.rows != height)
// Allocate cv::Mat if it was not initialized yet
if (cvMatResult.empty() || cvMatResult.cols != channels * width || cvMatResult.rows != height)
cvMatResult = cv::Mat(height, areaOutput, CV_8UC1);
// Fill cv::Mat
// Fill cvMatResult from array
for (auto channel = 0 ; channel < channels ; channel++)
{
// Get memory to be modified
cv::Mat cvMatROI{cvMatResult, cv::Rect{channel * width, 0, width, height}};
cv::Mat cvMatROI(cvMatResult, cv::Rect{channel * width, 0, width, height});
// Modify memory
const auto* arrayPtr = array.getConstPtr() + channel * areaInput;
for (auto y = 0 ; y < height ; y++)
......@@ -101,7 +105,7 @@ namespace op
}
}
void uCharCvMatToFloatPtr(float* floatImage, const cv::Mat& cvImage, const bool normalize)
void uCharCvMatToFloatPtr(float* floatPtrImage, const cv::Mat& cvImage, const bool normalize)
{
try
{
......@@ -114,22 +118,23 @@ namespace op
const auto* const originFramePtr = cvImage.data; // cv::Mat.data is always uchar
for (auto c = 0; c < channels; c++)
{
const auto floatImageOffsetC = c * height;
const auto floatPtrImageOffsetC = c * height;
for (auto y = 0; y < height; y++)
{
const auto floatImageOffsetY = (floatImageOffsetC + y) * width;
const auto floatPtrImageOffsetY = (floatPtrImageOffsetC + y) * width;
const auto originFramePtrOffsetY = y * width;
for (auto x = 0; x < width; x++)
floatImage[floatImageOffsetY + x] = float(originFramePtr[(originFramePtrOffsetY + x) * channels + c]);
floatPtrImage[floatPtrImageOffsetY + x] = float(originFramePtr[(originFramePtrOffsetY + x) * channels + c]);
}
}
// Normalizing if desired
// floatPtrImage wrapped as cv::Mat
// Empirically tested - OpenCV is more efficient normalizing a whole matrix/image (it uses AVX and other optimized instruction sets)
// In addition, the following if statement does not copy the pointer to a cv::Mat, just wrapps it
if (normalize)
{
cv::Mat floatImageCvWrapper{height, width, CV_32FC3, floatImage};
floatImageCvWrapper = floatImageCvWrapper/256.f - 0.5f;
cv::Mat floatPtrImageCvWrapper(height, width, CV_32FC3, floatPtrImage);
floatPtrImageCvWrapper = floatPtrImageCvWrapper/256.f - 0.5f;
}
}
catch (const std::exception& e)
......@@ -171,7 +176,7 @@ namespace op
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat{};
return cv::Mat();
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册