提交 4af2328d 编写于 作者: G gineshidalgo99

Cleaner examples: flagsToOpenPose class created

上级 54443d63
OpenPose - Authors and Contributors
====================================
### Authors
OpenPose is authored by [Gines Hidalgo](https://www.linkedin.com/in/gineshidalgo/), [Zhe Cao](http://www.andrew.cmu.edu/user/zhecao), [Tomas Simon](http://www.cs.cmu.edu/~tsimon/), [Shih-En Wei](https://scholar.google.com/citations?user=sFQD3k4AAAAJ&hl=en), [Hanbyul Joo](http://www.cs.cmu.edu/~hanbyulj/) and [Yaser Sheikh](http://www.cs.cmu.edu/~yaser/).
### Contributors
We would like to thank the following people who have highly contributed to OpenPose:
1. [Helen Medina](https://github.com/helen-medina): Windows branch creator and making OpenPose stable on Windows.
因为 它太大了无法显示 image diff 。你可以改为 查看blob
......@@ -18,7 +18,6 @@
// C++ std library dependencies
#include <atomic>
#include <chrono> // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
#include <cstdio> // sscanf
#include <string>
#include <thread> // std::this_thread
#include <vector>
......@@ -155,220 +154,49 @@ DEFINE_string(write_heatmaps, "", "Directory to write heat
DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`."
" Recommended `png` or any compressed and lossless format.");
op::PoseModel gflagToPoseModel(const std::string& poseModeString)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (poseModeString == "COCO")
return op::PoseModel::COCO_18;
else if (poseModeString == "MPI")
return op::PoseModel::MPI_15;
else if (poseModeString == "MPI_4_layers")
return op::PoseModel::MPI_15_4;
else if (poseModeString == "BODY_22")
return op::PoseModel::BODY_22;
else
{
op::error("String does not correspond to any model (COCO, MPI, MPI_4_layers)", __LINE__, __FUNCTION__, __FILE__);
return op::PoseModel::COCO_18;
}
}
op::ScaleMode gflagToScaleMode(const int keypointScale)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (keypointScale == 0)
return op::ScaleMode::InputResolution;
else if (keypointScale == 1)
return op::ScaleMode::NetOutputResolution;
else if (keypointScale == 2)
return op::ScaleMode::OutputResolution;
else if (keypointScale == 3)
return op::ScaleMode::ZeroToOne;
else if (keypointScale == 4)
return op::ScaleMode::PlusMinusOne;
else
{
const std::string message = "String does not correspond to any scale mode: (0, 1, 2, 3, 4) for (InputResolution,"
" NetOutputResolution, OutputResolution, ZeroToOne, PlusMinusOne).";
op::error(message, __LINE__, __FUNCTION__, __FILE__);
return op::ScaleMode::InputResolution;
}
}
// Determine type of frame source
op::ProducerType gflagsToProducerType(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Avoid duplicates (e.g. selecting at the time camera & video)
if (!imageDirectory.empty() && !videoPath.empty())
op::error("Selected simultaneously image directory and video. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
else if (!imageDirectory.empty() && webcamIndex != 0)
op::error("Selected simultaneously image directory and webcam. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
else if (!videoPath.empty() && webcamIndex != 0)
op::error("Selected simultaneously video and webcam. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
// Get desired op::ProducerType
if (!imageDirectory.empty())
return op::ProducerType::ImageDirectory;
else if (!videoPath.empty())
return op::ProducerType::Video;
else
return op::ProducerType::Webcam;
}
std::shared_ptr<op::Producer> gflagsToProducer(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex,
const op::Point<int> webcamResolution, const double webcamFps)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
const auto type = gflagsToProducerType(imageDirectory, videoPath, webcamIndex);
if (type == op::ProducerType::ImageDirectory)
return std::make_shared<op::ImageDirectoryReader>(imageDirectory);
else if (type == op::ProducerType::Video)
return std::make_shared<op::VideoReader>(videoPath);
else if (type == op::ProducerType::Webcam)
return std::make_shared<op::WebcamReader>(webcamIndex, webcamResolution, webcamFps);
else
{
op::error("Undefined Producer selected.", __LINE__, __FUNCTION__, __FILE__);
return std::shared_ptr<op::Producer>{};
}
}
std::vector<op::HeatMapType> gflagToHeatMaps(const bool heatMapsAddParts, const bool heatMapsAddBkg, const bool heatMapsAddPAFs)
{
std::vector<op::HeatMapType> heatMapTypes;
if (heatMapsAddParts)
heatMapTypes.emplace_back(op::HeatMapType::Parts);
if (heatMapsAddBkg)
heatMapTypes.emplace_back(op::HeatMapType::Background);
if (heatMapsAddPAFs)
heatMapTypes.emplace_back(op::HeatMapType::PAFs);
return heatMapTypes;
}
op::DetectionMode gflagToDetectionMode(const int handDetectionModeFlag, const std::shared_ptr<op::Producer>& producer = nullptr)
int opRealTimePoseDemo()
{
if (handDetectionModeFlag == -1)
{
if (producer == nullptr)
op::error("Since there is no default producer, `hand_detection_mode` must be set.", __LINE__, __FUNCTION__, __FILE__);
const auto producerType = producer->getType();
if (producerType == op::ProducerType::Webcam)
return op::DetectionMode::Fast;
else if (producerType == op::ProducerType::ImageDirectory)
return op::DetectionMode::Iterative;
else if (producerType == op::ProducerType::Video)
return op::DetectionMode::Tracking;
}
else if (handDetectionModeFlag == 0)
return op::DetectionMode::Fast;
else if (handDetectionModeFlag == 1)
return op::DetectionMode::Iterative;
else if (handDetectionModeFlag == 2)
return op::DetectionMode::Tracking;
else if (handDetectionModeFlag == 3)
return op::DetectionMode::IterativeAndTracking;
// else
op::error("Undefined DetectionMode selected.", __LINE__, __FUNCTION__, __FILE__);
return op::DetectionMode::Fast;
}
// logging_level
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// op::ConfigureLog::setPriorityThreshold(op::Priority::None); // To print all logging messages
op::RenderMode gflagToRenderMode(const int renderFlag, const int renderPoseFlag = -2)
{
if (renderFlag == -1 && renderPoseFlag != -2)
return gflagToRenderMode(renderPoseFlag, -2);
else if (renderFlag == 0)
return op::RenderMode::None;
else if (renderFlag == 1)
return op::RenderMode::Cpu;
else if (renderFlag == 2)
return op::RenderMode::Gpu;
else
{
op::error("Undefined RenderMode selected.", __LINE__, __FUNCTION__, __FILE__);
return op::RenderMode::None;
}
}
op::log("Starting pose estimation demo.", op::Priority::High);
const auto timerBegin = std::chrono::high_resolution_clock::now();
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::Point<int>, std::shared_ptr<op::Producer>, op::PoseModel, op::ScaleMode,
std::vector<op::HeatMapType>> gflagsToOpParameters()
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// cameraFrameSize
op::Point<int> cameraFrameSize;
auto nRead = sscanf(FLAGS_camera_resolution.c_str(), "%dx%d", &cameraFrameSize.x, &cameraFrameSize.y);
op::checkE(nRead, 2, "Error, camera resolution format (" + FLAGS_camera_resolution + ") invalid, should be e.g., 1280x720",
__LINE__, __FUNCTION__, __FILE__);
// Applying user defined configuration - Google flags to program variables
// outputSize
op::Point<int> outputSize;
nRead = sscanf(FLAGS_resolution.c_str(), "%dx%d", &outputSize.x, &outputSize.y);
op::checkE(nRead, 2, "Error, resolution format (" + FLAGS_resolution + ") invalid, should be e.g., 960x540 ",
__LINE__, __FUNCTION__, __FILE__);
const auto outputSize = op::flagsToPoint(FLAGS_resolution, "1280x720");
// netInputSize
op::Point<int> netInputSize;
nRead = sscanf(FLAGS_net_resolution.c_str(), "%dx%d", &netInputSize.x, &netInputSize.y);
op::checkE(nRead, 2, "Error, net resolution format (" + FLAGS_net_resolution + ") invalid, should be e.g., 656x368 (multiples of 16)",
__LINE__, __FUNCTION__, __FILE__);
const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "656x368");
// faceNetInputSize
op::Point<int> faceNetInputSize;
nRead = sscanf(FLAGS_face_net_resolution.c_str(), "%dx%d", &faceNetInputSize.x, &faceNetInputSize.y);
op::checkE(nRead, 2, "Error, face net resolution format (" + FLAGS_face_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
// handNetInputSize
op::Point<int> handNetInputSize;
nRead = sscanf(FLAGS_hand_net_resolution.c_str(), "%dx%d", &handNetInputSize.x, &handNetInputSize.y);
op::checkE(nRead, 2, "Error, hand net resolution format (" + FLAGS_hand_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
// producerType
const auto producerSharedPtr = gflagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_camera, cameraFrameSize, FLAGS_camera_fps);
const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_camera, FLAGS_camera_resolution, FLAGS_camera_fps);
// poseModel
const auto poseModel = gflagToPoseModel(FLAGS_model_pose);
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
// keypointScale
const auto keypointScale = gflagToScaleMode(FLAGS_keypoint_scale);
const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
// heatmaps to add
const auto heatMapTypes = gflagToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg, FLAGS_heatmaps_add_PAFs);
// Return
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, handNetInputSize, producerSharedPtr, poseModel, keypointScale, heatMapTypes);
}
int opRealTimePoseDemo()
{
// logging_level
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// op::ConfigureLog::setPriorityThreshold(op::Priority::None); // To print all logging messages
op::log("Starting pose estimation demo.", op::Priority::High);
const auto timerBegin = std::chrono::high_resolution_clock::now();
// Applying user defined configuration
op::Point<int> outputSize;
op::Point<int> netInputSize;
op::Point<int> faceNetInputSize;
op::Point<int> handNetInputSize;
std::shared_ptr<op::Producer> producerSharedPtr;
op::PoseModel poseModel;
op::ScaleMode keypointScale;
std::vector<op::HeatMapType> heatMapTypes;
std::tie(outputSize, netInputSize, faceNetInputSize, handNetInputSize, producerSharedPtr, poseModel, keypointScale,
heatMapTypes) = gflagsToOpParameters();
const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg, FLAGS_heatmaps_add_PAFs);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// OpenPose wrapper
op::log("Configuring OpenPose wrapper.", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
op::Wrapper<std::vector<op::Datum>> opWrapper;
// Pose configuration (use WrapperStructPose{} for default and recommended configuration)
const op::WrapperStructPose wrapperStructPose{netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
FLAGS_num_scales, (float)FLAGS_scale_gap, gflagToRenderMode(FLAGS_render_pose), poseModel,
FLAGS_num_scales, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose), poseModel,
!FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, op::ScaleMode::UnsignedChar};
// Face configuration (use op::WrapperStructFace{} to disable it)
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, gflagToRenderMode(FLAGS_render_face, FLAGS_render_pose),
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_render_face, FLAGS_render_pose),
(float)FLAGS_alpha_face, (float)FLAGS_alpha_heatmap_face};
// Hand configuration (use op::WrapperStructHand{} to disable it)
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, gflagToDetectionMode(FLAGS_hand_detection_mode, producerSharedPtr),
gflagToRenderMode(FLAGS_render_hand, FLAGS_render_pose), (float)FLAGS_alpha_hand,
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, op::flagsToDetectionMode(FLAGS_hand_detection_mode, producerSharedPtr),
op::flagsToRenderMode(FLAGS_render_hand, FLAGS_render_pose), (float)FLAGS_alpha_hand,
(float)FLAGS_alpha_heatmap_hand};
// Producer (use default to disable any input)
const op::WrapperStructInput wrapperStructInput{producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last, FLAGS_process_real_time,
......
......@@ -45,65 +45,32 @@ DEFINE_bool(disable_blending, false, "If blending is enabled,
DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will"
" hide it. Only valid for GPU rendering.");
op::PoseModel gflagToPoseModel(const std::string& poseModeString)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (poseModeString == "COCO")
return op::PoseModel::COCO_18;
else if (poseModeString == "MPI")
return op::PoseModel::MPI_15;
else if (poseModeString == "MPI_4_layers")
return op::PoseModel::MPI_15_4;
else
{
op::error("String does not correspond to any model (COCO, MPI, MPI_4_layers)", __LINE__, __FUNCTION__, __FILE__);
return op::PoseModel::COCO_18;
}
}
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel> gflagsToOpParameters()
int openPoseTutorialPose1()
{
op::log("OpenPose Library Tutorial - Example 1.", op::Priority::High);
// ------------------------- INITIALIZATION -------------------------
// Step 1 - Set logging level
// - 0 will output all the logging messages
// - 255 will output nothing
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Step 2 - Read Google flags (user defined configuration)
// outputSize
op::Point<int> outputSize;
auto nRead = sscanf(FLAGS_resolution.c_str(), "%dx%d", &outputSize.x, &outputSize.y);
op::checkE(nRead, 2, "Error, resolution format (" + FLAGS_resolution + ") invalid, should be e.g., 960x540 ",
__LINE__, __FUNCTION__, __FILE__);
const auto outputSize = op::flagsToPoint(FLAGS_resolution, "1280x720");
// netInputSize
op::Point<int> netInputSize;
nRead = sscanf(FLAGS_net_resolution.c_str(), "%dx%d", &netInputSize.x, &netInputSize.y);
op::checkE(nRead, 2, "Error, net resolution format (" + FLAGS_net_resolution + ") invalid, should be e.g., 656x368 (multiples of 16)",
__LINE__, __FUNCTION__, __FILE__);
const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "656x368");
// netOutputSize
const auto netOutputSize = netInputSize;
// poseModel
const auto poseModel = gflagToPoseModel(FLAGS_model_pose);
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
// Check no contradictory flags enabled
if (FLAGS_alpha_pose < 0. || FLAGS_alpha_pose > 1.)
op::error("Alpha value for blending must be in the range [0,1].", __LINE__, __FUNCTION__, __FILE__);
if (FLAGS_scale_gap <= 0. && FLAGS_num_scales > 1)
op::error("Incompatible flag configuration: scale_gap must be greater than 0 or num_scales = 1.", __LINE__, __FUNCTION__, __FILE__);
// Logging and return result
// Logging
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
return std::make_tuple(outputSize, netInputSize, netOutputSize, poseModel);
}
int openPoseTutorialPose1()
{
op::log("OpenPose Library Tutorial - Example 1.", op::Priority::High);
// ------------------------- INITIALIZATION -------------------------
// Step 1 - Set logging level
// - 0 will output all the logging messages
// - 255 will output nothing
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// Step 2 - Read Google flags (user defined configuration)
op::Point<int> outputSize;
op::Point<int> netInputSize;
op::Point<int> netOutputSize;
op::PoseModel poseModel;
std::tie(outputSize, netInputSize, netOutputSize, poseModel) = gflagsToOpParameters();
// Step 3 - Initialize all required classes
op::CvMatToOpInput cvMatToOpInput{netInputSize, FLAGS_num_scales, (float)FLAGS_scale_gap};
op::CvMatToOpOutput cvMatToOpOutput{outputSize};
......
......@@ -48,65 +48,32 @@ DEFINE_double(alpha_pose, 0.6, "Blending factor (range
DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the"
" heatmap, 0 will only show the frame. Only valid for GPU rendering.");
op::PoseModel gflagToPoseModel(const std::string& poseModeString)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (poseModeString == "COCO")
return op::PoseModel::COCO_18;
else if (poseModeString == "MPI")
return op::PoseModel::MPI_15;
else if (poseModeString == "MPI_4_layers")
return op::PoseModel::MPI_15_4;
else
{
op::error("String does not correspond to any model (COCO, MPI, MPI_4_layers)", __LINE__, __FUNCTION__, __FILE__);
return op::PoseModel::COCO_18;
}
}
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel> gflagsToOpParameters()
int openPoseTutorialPose2()
{
op::log("OpenPose Library Tutorial - Example 2.", op::Priority::High);
// ------------------------- INITIALIZATION -------------------------
// Step 1 - Set logging level
// - 0 will output all the logging messages
// - 255 will output nothing
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Step 2 - Read Google flags (user defined configuration)
// outputSize
op::Point<int> outputSize;
auto nRead = sscanf(FLAGS_resolution.c_str(), "%dx%d", &outputSize.x, &outputSize.y);
op::checkE(nRead, 2, "Error, resolution format (" + FLAGS_resolution + ") invalid, should be e.g., 960x540 ",
__LINE__, __FUNCTION__, __FILE__);
const auto outputSize = op::flagsToPoint(FLAGS_resolution, "1280x720");
// netInputSize
op::Point<int> netInputSize;
nRead = sscanf(FLAGS_net_resolution.c_str(), "%dx%d", &netInputSize.x, &netInputSize.y);
op::checkE(nRead, 2, "Error, net resolution format (" + FLAGS_net_resolution + ") invalid, should be e.g., 656x368 (multiples of 16)",
__LINE__, __FUNCTION__, __FILE__);
const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "656x368");
// netOutputSize
const auto netOutputSize = netInputSize;
// poseModel
const auto poseModel = gflagToPoseModel(FLAGS_model_pose);
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
// Check no contradictory flags enabled
if (FLAGS_alpha_pose < 0. || FLAGS_alpha_pose > 1.)
op::error("Alpha value for blending must be in the range [0,1].", __LINE__, __FUNCTION__, __FILE__);
if (FLAGS_scale_gap <= 0. && FLAGS_num_scales > 1)
op::error("Incompatible flag configuration: scale_gap must be greater than 0 or num_scales = 1.", __LINE__, __FUNCTION__, __FILE__);
// Logging and return result
// Logging
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
return std::make_tuple(outputSize, netInputSize, netOutputSize, poseModel);
}
int openPoseTutorialPose2()
{
op::log("OpenPose Library Tutorial - Example 2.", op::Priority::High);
// ------------------------- INITIALIZATION -------------------------
// Step 1 - Set logging level
// - 0 will output all the logging messages
// - 255 will output nothing
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// Step 2 - Read Google flags (user defined configuration)
op::Point<int> outputSize;
op::Point<int> netInputSize;
op::Point<int> netOutputSize;
op::PoseModel poseModel;
std::tie(outputSize, netInputSize, netOutputSize, poseModel) = gflagsToOpParameters();
// Step 3 - Initialize all required classes
op::CvMatToOpInput cvMatToOpInput{netInputSize, FLAGS_num_scales, (float)FLAGS_scale_gap};
op::CvMatToOpOutput cvMatToOpOutput{outputSize};
......
......@@ -41,70 +41,6 @@ DEFINE_bool(fullscreen, false, "Run in full-screen mode
DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
" too long, it will skip frames. If it is too fast, it will slow it down.");
// Determine type of frame source
op::ProducerType gflagsToProducerType(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Avoid duplicates (e.g. selecting at the time camera & video)
if (!imageDirectory.empty() && !videoPath.empty())
op::error("Selected simultaneously image directory and video. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
else if (!imageDirectory.empty() && webcamIndex != 0)
op::error("Selected simultaneously image directory and webcam. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
else if (!videoPath.empty() && webcamIndex != 0)
op::error("Selected simultaneously video and webcam. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
// Get desired op::ProducerType
if (!imageDirectory.empty())
return op::ProducerType::ImageDirectory;
else if (!videoPath.empty())
return op::ProducerType::Video;
else
return op::ProducerType::Webcam;
}
std::shared_ptr<op::Producer> gflagsToProducer(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex,
const op::Point<int> webcamResolution, const int webcamFps)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
const auto type = gflagsToProducerType(imageDirectory, videoPath, webcamIndex);
if (type == op::ProducerType::ImageDirectory)
return std::make_shared<op::ImageDirectoryReader>(imageDirectory);
else if (type == op::ProducerType::Video)
return std::make_shared<op::VideoReader>(videoPath);
else if (type == op::ProducerType::Webcam)
return std::make_shared<op::WebcamReader>(webcamIndex, webcamResolution, webcamFps);
else
{
op::error("Undefined Producer selected.", __LINE__, __FUNCTION__, __FILE__);
return std::shared_ptr<op::Producer>{};
}
}
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, std::shared_ptr<op::Producer>> gflagsToOpParameters()
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// cameraFrameSize
op::Point<int> cameraFrameSize;
auto nRead = sscanf(FLAGS_camera_resolution.c_str(), "%dx%d", &cameraFrameSize.x, &cameraFrameSize.y);
op::checkE(nRead, 2, "Error, camera resolution format (" + FLAGS_camera_resolution + ") invalid, should be e.g., 1280x720",
__LINE__, __FUNCTION__, __FILE__);
// outputSize
op::Point<int> outputSize;
nRead = sscanf(FLAGS_resolution.c_str(), "%dx%d", &outputSize.x, &outputSize.y);
op::checkE(nRead, 2, "Error, camera resolution format (" + FLAGS_camera_resolution + ") invalid, should be e.g., 1280x720",
__LINE__, __FUNCTION__, __FILE__);
// producerType
const auto producerSharedPtr = gflagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_camera, cameraFrameSize, FLAGS_camera_fps);
const auto displayProducerFpsMode = (FLAGS_process_real_time ? op::ProducerFpsMode::OriginalFps : op::ProducerFpsMode::RetrievalFps);
producerSharedPtr->setProducerFpsMode(displayProducerFpsMode);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
return std::make_tuple(cameraFrameSize, outputSize, producerSharedPtr);
}
int openPoseTutorialThread1()
{
op::log("OpenPose Library Tutorial - Example 3.", op::Priority::High);
......@@ -115,10 +51,13 @@ int openPoseTutorialThread1()
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// Step 2 - Read Google flags (user defined configuration)
op::Point<int> cameraFrameSize;
op::Point<int> outputSize;
std::shared_ptr<op::Producer> producerSharedPtr;
std::tie(cameraFrameSize, outputSize, producerSharedPtr) = gflagsToOpParameters();
// outputSize
auto outputSize = op::flagsToPoint(FLAGS_resolution, "1280x720");
// producerType
const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_camera, FLAGS_camera_resolution, FLAGS_camera_fps);
const auto displayProducerFpsMode = (FLAGS_process_real_time ? op::ProducerFpsMode::OriginalFps : op::ProducerFpsMode::RetrievalFps);
producerSharedPtr->setProducerFpsMode(displayProducerFpsMode);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Step 3 - Setting producer
auto videoSeekSharedPtr = std::make_shared<std::pair<std::atomic<bool>, std::atomic<int>>>();
videoSeekSharedPtr->first = false;
......
......@@ -75,70 +75,6 @@ public:
}
};
// Determine type of frame source
op::ProducerType gflagsToProducerType(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Avoid duplicates (e.g. selecting at the time camera & video)
if (!imageDirectory.empty() && !videoPath.empty())
op::error("Selected simultaneously image directory and video. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
else if (!imageDirectory.empty() && webcamIndex != 0)
op::error("Selected simultaneously image directory and webcam. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
else if (!videoPath.empty() && webcamIndex != 0)
op::error("Selected simultaneously video and webcam. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
// Get desired op::ProducerType
if (!imageDirectory.empty())
return op::ProducerType::ImageDirectory;
else if (!videoPath.empty())
return op::ProducerType::Video;
else
return op::ProducerType::Webcam;
}
std::shared_ptr<op::Producer> gflagsToProducer(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex,
const op::Point<int> webcamResolution, const int webcamFps)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
const auto type = gflagsToProducerType(imageDirectory, videoPath, webcamIndex);
if (type == op::ProducerType::ImageDirectory)
return std::make_shared<op::ImageDirectoryReader>(imageDirectory);
else if (type == op::ProducerType::Video)
return std::make_shared<op::VideoReader>(videoPath);
else if (type == op::ProducerType::Webcam)
return std::make_shared<op::WebcamReader>(webcamIndex, webcamResolution, webcamFps);
else
{
op::error("Undefined Producer selected.", __LINE__, __FUNCTION__, __FILE__);
return std::shared_ptr<op::Producer>{};
}
}
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, std::shared_ptr<op::Producer>> gflagsToOpParameters()
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// cameraFrameSize
op::Point<int> cameraFrameSize;
auto nRead = sscanf(FLAGS_camera_resolution.c_str(), "%dx%d", &cameraFrameSize.x, &cameraFrameSize.y);
op::checkE(nRead, 2, "Error, camera resolution format (" + FLAGS_camera_resolution + ") invalid, should be e.g., 1280x720",
__LINE__, __FUNCTION__, __FILE__);
// outputSize
op::Point<int> outputSize;
nRead = sscanf(FLAGS_resolution.c_str(), "%dx%d", &outputSize.x, &outputSize.y);
op::checkE(nRead, 2, "Error, camera resolution format (" + FLAGS_camera_resolution + ") invalid, should be e.g., 1280x720",
__LINE__, __FUNCTION__, __FILE__);
// producerType
const auto producerSharedPtr = gflagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_camera, cameraFrameSize, FLAGS_camera_fps);
const auto displayProducerFpsMode = (FLAGS_process_real_time ? op::ProducerFpsMode::OriginalFps : op::ProducerFpsMode::RetrievalFps);
producerSharedPtr->setProducerFpsMode(displayProducerFpsMode);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
return std::make_tuple(cameraFrameSize, outputSize, producerSharedPtr);
}
int openPoseTutorialThread2()
{
op::log("OpenPose Library Tutorial - Example 3.", op::Priority::High);
......@@ -149,10 +85,13 @@ int openPoseTutorialThread2()
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// Step 2 - Read Google flags (user defined configuration)
op::Point<int> cameraFrameSize;
op::Point<int> outputSize;
std::shared_ptr<op::Producer> producerSharedPtr;
std::tie(cameraFrameSize, outputSize, producerSharedPtr) = gflagsToOpParameters();
// outputSize
auto outputSize = op::flagsToPoint(FLAGS_resolution, "1280x720");
// producerType
const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_camera, FLAGS_camera_resolution, FLAGS_camera_fps);
const auto displayProducerFpsMode = (FLAGS_process_real_time ? op::ProducerFpsMode::OriginalFps : op::ProducerFpsMode::RetrievalFps);
producerSharedPtr->setProducerFpsMode(displayProducerFpsMode);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Step 3 - Setting producer
auto videoSeekSharedPtr = std::make_shared<std::pair<std::atomic<bool>, std::atomic<int>>>();
videoSeekSharedPtr->first = false;
......
......@@ -14,9 +14,7 @@
// This file should only be used for the user to take specific examples.
// C++ std library dependencies
#include <atomic>
#include <chrono> // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
#include <cstdio> // sscanf
#include <string>
#include <thread> // std::this_thread
#include <vector>
......@@ -228,174 +226,49 @@ public:
}
};
op::PoseModel gflagToPoseModel(const std::string& poseModeString)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (poseModeString == "COCO")
return op::PoseModel::COCO_18;
else if (poseModeString == "MPI")
return op::PoseModel::MPI_15;
else if (poseModeString == "MPI_4_layers")
return op::PoseModel::MPI_15_4;
else
{
op::error("String does not correspond to any model (COCO, MPI, MPI_4_layers)", __LINE__, __FUNCTION__, __FILE__);
return op::PoseModel::COCO_18;
}
}
op::ScaleMode gflagToScaleMode(const int keypointScale)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (keypointScale == 0)
return op::ScaleMode::InputResolution;
else if (keypointScale == 1)
return op::ScaleMode::NetOutputResolution;
else if (keypointScale == 2)
return op::ScaleMode::OutputResolution;
else if (keypointScale == 3)
return op::ScaleMode::ZeroToOne;
else if (keypointScale == 4)
return op::ScaleMode::PlusMinusOne;
else
{
const std::string message = "String does not correspond to any scale mode: (0, 1, 2, 3, 4) for (InputResolution,"
" NetOutputResolution, OutputResolution, ZeroToOne, PlusMinusOne).";
op::error(message, __LINE__, __FUNCTION__, __FILE__);
return op::ScaleMode::InputResolution;
}
}
std::vector<op::HeatMapType> gflagToHeatMaps(const bool heatMapsAddParts, const bool heatMapsAddBkg, const bool heatMapsAddPAFs)
{
std::vector<op::HeatMapType> heatMapTypes;
if (heatMapsAddParts)
heatMapTypes.emplace_back(op::HeatMapType::Parts);
if (heatMapsAddBkg)
heatMapTypes.emplace_back(op::HeatMapType::Background);
if (heatMapsAddPAFs)
heatMapTypes.emplace_back(op::HeatMapType::PAFs);
return heatMapTypes;
}
op::DetectionMode gflagToDetectionMode(const int handDetectionModeFlag, const std::shared_ptr<op::Producer>& producer = nullptr)
int openPoseTutorialWrapper1()
{
if (handDetectionModeFlag == -1)
{
if (producer == nullptr)
op::error("Since there is no default producer, `hand_detection_mode` must be set.", __LINE__, __FUNCTION__, __FILE__);
const auto producerType = producer->getType();
if (producerType == op::ProducerType::Webcam)
return op::DetectionMode::Fast;
else if (producerType == op::ProducerType::ImageDirectory)
return op::DetectionMode::Iterative;
else if (producerType == op::ProducerType::Video)
return op::DetectionMode::Tracking;
}
else if (handDetectionModeFlag == 0)
return op::DetectionMode::Fast;
else if (handDetectionModeFlag == 1)
return op::DetectionMode::Iterative;
else if (handDetectionModeFlag == 2)
return op::DetectionMode::Tracking;
else if (handDetectionModeFlag == 3)
return op::DetectionMode::IterativeAndTracking;
// else
op::error("Undefined DetectionMode selected.", __LINE__, __FUNCTION__, __FILE__);
return op::DetectionMode::Fast;
}
// logging_level
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// op::ConfigureLog::setPriorityThreshold(op::Priority::None); // To print all logging messages
op::RenderMode gflagToRenderMode(const int renderFlag, const int renderPoseFlag = -2)
{
if (renderFlag == -1 && renderPoseFlag != -2)
return gflagToRenderMode(renderPoseFlag, -2);
else if (renderFlag == 0)
return op::RenderMode::None;
else if (renderFlag == 1)
return op::RenderMode::Cpu;
else if (renderFlag == 2)
return op::RenderMode::Gpu;
else
{
op::error("Undefined RenderMode selected.", __LINE__, __FUNCTION__, __FILE__);
return op::RenderMode::None;
}
}
op::log("Starting pose estimation demo.", op::Priority::High);
const auto timerBegin = std::chrono::high_resolution_clock::now();
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::ScaleMode, std::vector<op::HeatMapType>,
op::ScaleMode> gflagsToOpParameters()
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Applying user defined configuration - Google flags to program variables
// outputSize
op::Point<int> outputSize;
auto nRead = sscanf(FLAGS_resolution.c_str(), "%dx%d", &outputSize.x, &outputSize.y);
op::checkE(nRead, 2, "Error, resolution format (" + FLAGS_resolution + ") invalid, should be e.g., 960x540 ",
__LINE__, __FUNCTION__, __FILE__);
const auto outputSize = op::flagsToPoint(FLAGS_resolution, "1280x720");
// netInputSize
op::Point<int> netInputSize;
nRead = sscanf(FLAGS_net_resolution.c_str(), "%dx%d", &netInputSize.x, &netInputSize.y);
op::checkE(nRead, 2, "Error, net resolution format (" + FLAGS_net_resolution + ") invalid, should be e.g., 656x368 (multiples of 16)",
__LINE__, __FUNCTION__, __FILE__);
const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "656x368");
// faceNetInputSize
op::Point<int> faceNetInputSize;
nRead = sscanf(FLAGS_face_net_resolution.c_str(), "%dx%d", &faceNetInputSize.x, &faceNetInputSize.y);
op::checkE(nRead, 2, "Error, face net resolution format (" + FLAGS_face_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
// handNetInputSize
op::Point<int> handNetInputSize;
nRead = sscanf(FLAGS_hand_net_resolution.c_str(), "%dx%d", &handNetInputSize.x, &handNetInputSize.y);
op::checkE(nRead, 2, "Error, hand net resolution format (" + FLAGS_hand_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
// poseModel
const auto poseModel = gflagToPoseModel(FLAGS_model_pose);
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
// keypointScale
const auto keypointScale = gflagToScaleMode(FLAGS_keypoint_scale);
const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
// heatmaps to add
const auto heatMapTypes = gflagToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg, FLAGS_heatmaps_add_PAFs);
const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg, FLAGS_heatmaps_add_PAFs);
op::check(FLAGS_heatmaps_scale >= 0 && FLAGS_heatmaps_scale <= 2, "Non valid `heatmaps_scale`.", __LINE__, __FUNCTION__, __FILE__);
const auto heatMapScale = (FLAGS_heatmaps_scale == 0 ? op::ScaleMode::PlusMinusOne
: (FLAGS_heatmaps_scale == 1 ? op::ScaleMode::ZeroToOne : op::ScaleMode::UnsignedChar ));
// return
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, handNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale);
}
int openPoseTutorialWrapper1()
{
// logging_level
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// op::ConfigureLog::setPriorityThreshold(op::Priority::None); // To print all logging messages
op::log("Starting pose estimation demo.", op::Priority::High);
const auto timerBegin = std::chrono::high_resolution_clock::now();
// Applying user defined configuration
op::Point<int> outputSize;
op::Point<int> netInputSize;
op::Point<int> faceNetInputSize;
op::Point<int> handNetInputSize;
op::PoseModel poseModel;
op::ScaleMode keypointScale;
std::vector<op::HeatMapType> heatMapTypes;
op::ScaleMode heatMapScale;
std::tie(outputSize, netInputSize, faceNetInputSize, handNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale) = gflagsToOpParameters();
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Configure OpenPose
op::Wrapper<std::vector<UserDatum>> opWrapper{op::ThreadManagerMode::Asynchronous};
// Pose configuration (use WrapperStructPose{} for default and recommended configuration)
const op::WrapperStructPose wrapperStructPose{netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
FLAGS_num_scales, (float)FLAGS_scale_gap, gflagToRenderMode(FLAGS_render_pose), poseModel,
FLAGS_num_scales, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose), poseModel,
!FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale};
// Face configuration (use op::WrapperStructFace{} to disable it)
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, gflagToRenderMode(FLAGS_render_face, FLAGS_render_pose),
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_render_face, FLAGS_render_pose),
(float)FLAGS_alpha_face, (float)FLAGS_alpha_heatmap_face};
// Hand configuration (use op::WrapperStructHand{} to disable it)
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, gflagToDetectionMode(FLAGS_hand_detection_mode),
gflagToRenderMode(FLAGS_render_hand, FLAGS_render_pose), (float)FLAGS_alpha_hand,
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, op::flagsToDetectionMode(FLAGS_hand_detection_mode),
op::flagsToRenderMode(FLAGS_render_hand, FLAGS_render_pose), (float)FLAGS_alpha_hand,
(float)FLAGS_alpha_heatmap_hand};
// Consumer (comment or use default argument to disable any output)
const bool displayGui = false;
......
......@@ -14,9 +14,7 @@
// This file should only be used for the user to take specific examples.
// C++ std library dependencies
#include <atomic>
#include <chrono> // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
#include <cstdio> // sscanf
#include <string>
#include <thread> // std::this_thread
#include <vector>
......@@ -273,159 +271,34 @@ public:
}
};
op::PoseModel gflagToPoseModel(const std::string& poseModeString)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (poseModeString == "COCO")
return op::PoseModel::COCO_18;
else if (poseModeString == "MPI")
return op::PoseModel::MPI_15;
else if (poseModeString == "MPI_4_layers")
return op::PoseModel::MPI_15_4;
else
{
op::error("String does not correspond to any model (COCO, MPI, MPI_4_layers)", __LINE__, __FUNCTION__, __FILE__);
return op::PoseModel::COCO_18;
}
}
op::ScaleMode gflagToScaleMode(const int keypointScale)
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (keypointScale == 0)
return op::ScaleMode::InputResolution;
else if (keypointScale == 1)
return op::ScaleMode::NetOutputResolution;
else if (keypointScale == 2)
return op::ScaleMode::OutputResolution;
else if (keypointScale == 3)
return op::ScaleMode::ZeroToOne;
else if (keypointScale == 4)
return op::ScaleMode::PlusMinusOne;
else
{
const std::string message = "String does not correspond to any scale mode: (0, 1, 2, 3, 4) for (InputResolution,"
" NetOutputResolution, OutputResolution, ZeroToOne, PlusMinusOne).";
op::error(message, __LINE__, __FUNCTION__, __FILE__);
return op::ScaleMode::InputResolution;
}
}
std::vector<op::HeatMapType> gflagToHeatMaps(const bool heatMapsAddParts, const bool heatMapsAddBkg, const bool heatMapsAddPAFs)
{
std::vector<op::HeatMapType> heatMapTypes;
if (heatMapsAddParts)
heatMapTypes.emplace_back(op::HeatMapType::Parts);
if (heatMapsAddBkg)
heatMapTypes.emplace_back(op::HeatMapType::Background);
if (heatMapsAddPAFs)
heatMapTypes.emplace_back(op::HeatMapType::PAFs);
return heatMapTypes;
}
op::DetectionMode gflagToDetectionMode(const int handDetectionModeFlag, const std::shared_ptr<op::Producer>& producer = nullptr)
int openPoseTutorialWrapper2()
{
if (handDetectionModeFlag == -1)
{
if (producer == nullptr)
op::error("Since there is no default producer, `hand_detection_mode` must be set.", __LINE__, __FUNCTION__, __FILE__);
const auto producerType = producer->getType();
if (producerType == op::ProducerType::Webcam)
return op::DetectionMode::Fast;
else if (producerType == op::ProducerType::ImageDirectory)
return op::DetectionMode::Iterative;
else if (producerType == op::ProducerType::Video)
return op::DetectionMode::Tracking;
}
else if (handDetectionModeFlag == 0)
return op::DetectionMode::Fast;
else if (handDetectionModeFlag == 1)
return op::DetectionMode::Iterative;
else if (handDetectionModeFlag == 2)
return op::DetectionMode::Tracking;
else if (handDetectionModeFlag == 3)
return op::DetectionMode::IterativeAndTracking;
// else
op::error("Undefined DetectionMode selected.", __LINE__, __FUNCTION__, __FILE__);
return op::DetectionMode::Fast;
}
// logging_level
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// op::ConfigureLog::setPriorityThreshold(op::Priority::None); // To print all logging messages
op::RenderMode gflagToRenderMode(const int renderFlag, const int renderPoseFlag = -2)
{
if (renderFlag == -1 && renderPoseFlag != -2)
return gflagToRenderMode(renderPoseFlag, -2);
else if (renderFlag == 0)
return op::RenderMode::None;
else if (renderFlag == 1)
return op::RenderMode::Cpu;
else if (renderFlag == 2)
return op::RenderMode::Gpu;
else
{
op::error("Undefined RenderMode selected.", __LINE__, __FUNCTION__, __FILE__);
return op::RenderMode::None;
}
}
op::log("Starting pose estimation demo.", op::Priority::High);
const auto timerBegin = std::chrono::high_resolution_clock::now();
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::ScaleMode, std::vector<op::HeatMapType>,
op::ScaleMode> gflagsToOpParameters()
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Applying user defined configuration - Google flags to program variables
// outputSize
op::Point<int> outputSize;
auto nRead = sscanf(FLAGS_resolution.c_str(), "%dx%d", &outputSize.x, &outputSize.y);
op::checkE(nRead, 2, "Error, resolution format (" + FLAGS_resolution + ") invalid, should be e.g., 960x540 ",
__LINE__, __FUNCTION__, __FILE__);
const auto outputSize = op::flagsToPoint(FLAGS_resolution, "1280x720");
// netInputSize
op::Point<int> netInputSize;
nRead = sscanf(FLAGS_net_resolution.c_str(), "%dx%d", &netInputSize.x, &netInputSize.y);
op::checkE(nRead, 2, "Error, net resolution format (" + FLAGS_net_resolution + ") invalid, should be e.g., 656x368 (multiples of 16)",
__LINE__, __FUNCTION__, __FILE__);
const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "656x368");
// faceNetInputSize
op::Point<int> faceNetInputSize;
nRead = sscanf(FLAGS_face_net_resolution.c_str(), "%dx%d", &faceNetInputSize.x, &faceNetInputSize.y);
op::checkE(nRead, 2, "Error, face net resolution format (" + FLAGS_face_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
// handNetInputSize
op::Point<int> handNetInputSize;
nRead = sscanf(FLAGS_hand_net_resolution.c_str(), "%dx%d", &handNetInputSize.x, &handNetInputSize.y);
op::checkE(nRead, 2, "Error, hand net resolution format (" + FLAGS_hand_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
// poseModel
const auto poseModel = gflagToPoseModel(FLAGS_model_pose);
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
// keypointScale
const auto keypointScale = gflagToScaleMode(FLAGS_keypoint_scale);
const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
// heatmaps to add
const auto heatMapTypes = gflagToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg, FLAGS_heatmaps_add_PAFs);
const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg, FLAGS_heatmaps_add_PAFs);
op::check(FLAGS_heatmaps_scale >= 0 && FLAGS_heatmaps_scale <= 2, "Non valid `heatmaps_scale`.", __LINE__, __FUNCTION__, __FILE__);
const auto heatMapScale = (FLAGS_heatmaps_scale == 0 ? op::ScaleMode::PlusMinusOne
: (FLAGS_heatmaps_scale == 1 ? op::ScaleMode::ZeroToOne : op::ScaleMode::UnsignedChar ));
// return
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, handNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale);
}
int openPoseTutorialWrapper2()
{
// logging_level
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.", __LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
// op::ConfigureLog::setPriorityThreshold(op::Priority::None); // To print all logging messages
op::log("Starting pose estimation demo.", op::Priority::High);
const auto timerBegin = std::chrono::high_resolution_clock::now();
// Applying user defined configuration
op::Point<int> outputSize;
op::Point<int> netInputSize;
op::Point<int> faceNetInputSize;
op::Point<int> handNetInputSize;
op::PoseModel poseModel;
op::ScaleMode keypointScale;
std::vector<op::HeatMapType> heatMapTypes;
op::ScaleMode heatMapScale;
std::tie(outputSize, netInputSize, faceNetInputSize, handNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale) = gflagsToOpParameters();
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Initializing the user custom classes
......@@ -448,15 +321,15 @@ int openPoseTutorialWrapper2()
opWrapper.setWorkerOutput(wUserOutput, workerOutputOnNewThread);
// Configure OpenPose
const op::WrapperStructPose wrapperStructPose{netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
FLAGS_num_scales, (float)FLAGS_scale_gap, gflagToRenderMode(FLAGS_render_pose), poseModel,
FLAGS_num_scales, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose), poseModel,
!FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale};
// Face configuration (use op::WrapperStructFace{} to disable it)
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, gflagToRenderMode(FLAGS_render_face, FLAGS_render_pose),
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_render_face, FLAGS_render_pose),
(float)FLAGS_alpha_face, (float)FLAGS_alpha_heatmap_face};
// Hand configuration (use op::WrapperStructHand{} to disable it)
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, gflagToDetectionMode(FLAGS_hand_detection_mode),
gflagToRenderMode(FLAGS_render_hand, FLAGS_render_pose), (float)FLAGS_alpha_hand,
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, op::flagsToDetectionMode(FLAGS_hand_detection_mode),
op::flagsToRenderMode(FLAGS_render_hand, FLAGS_render_pose), (float)FLAGS_alpha_hand,
(float)FLAGS_alpha_heatmap_hand};
// Consumer (comment or use default argument to disable any output)
const bool displayGui = false;
......
......@@ -2,6 +2,7 @@
#define OPENPOSE_PRODUCER_WEBCAM_READER_HPP
#include <atomic>
#include <mutex>
#include <thread>
#include <openpose/core/point.hpp>
#include "videoCaptureReader.hpp"
......
#ifndef OPENPOSE_UTILITIES_G_FLAGS_TO_OPEN_POSE_HPP
#define OPENPOSE_UTILITIES_G_FLAGS_TO_OPEN_POSE_HPP
#include <memory> // std::shared_ptr
#include <string>
#include <vector>
#include <openpose/core/enumClasses.hpp>
#include <openpose/core/point.hpp>
#include <openpose/pose/enumClasses.hpp>
#include <openpose/producer/producer.hpp>
namespace op
{
PoseModel flagsToPoseModel(const std::string& poseModeString);
ScaleMode flagsToScaleMode(const int keypointScale);
// Determine type of frame source
ProducerType flagsToProducerType(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex);
std::shared_ptr<Producer> flagsToProducer(const std::string& imageDirectory, const std::string& videoPath,
const int webcamIndex, const std::string& webcamResolution, const double webcamFps);
std::vector<HeatMapType> flagsToHeatMaps(const bool heatMapsAddParts, const bool heatMapsAddBkg, const bool heatMapsAddPAFs);
DetectionMode flagsToDetectionMode(const int handDetectionModeFlag, const std::shared_ptr<Producer>& producer = nullptr);
RenderMode flagsToRenderMode(const int renderFlag, const int renderPoseFlag = -2);
Point<int> flagsToPoint(const std::string& pointString, const std::string& pointExample = "1280x720");
}
#endif // OPENPOSE_UTILITIES_G_FLAGS_TO_OPEN_POSE_HPP
......@@ -8,6 +8,7 @@
#include "errorAndLog.hpp"
#include "fastMath.hpp"
#include "fileSystem.hpp"
#include "flagsToOpenPose.hpp"
#include "keypoint.hpp"
#include "macros.hpp"
#include "openCv.hpp"
......
#include <cstdio> // sscanf
#include <openpose/producer/imageDirectoryReader.hpp>
#include <openpose/producer/videoReader.hpp>
#include <openpose/producer/webcamReader.hpp>
#include <openpose/utilities/check.hpp>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/flagsToOpenPose.hpp>
namespace op
{
PoseModel flagsToPoseModel(const std::string& poseModeString)
{
try
{
log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (poseModeString == "COCO")
return PoseModel::COCO_18;
else if (poseModeString == "MPI")
return PoseModel::MPI_15;
else if (poseModeString == "MPI_4_layers")
return PoseModel::MPI_15_4;
else if (poseModeString == "BODY_22")
return PoseModel::BODY_22;
// else
error("String does not correspond to any model (COCO, MPI, MPI_4_layers)", __LINE__, __FUNCTION__, __FILE__);
return PoseModel::COCO_18;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return PoseModel::COCO_18;
}
}
ScaleMode flagsToScaleMode(const int keypointScale)
{
try
{
log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
if (keypointScale == 0)
return ScaleMode::InputResolution;
else if (keypointScale == 1)
return ScaleMode::NetOutputResolution;
else if (keypointScale == 2)
return ScaleMode::OutputResolution;
else if (keypointScale == 3)
return ScaleMode::ZeroToOne;
else if (keypointScale == 4)
return ScaleMode::PlusMinusOne;
// else
const std::string message = "String does not correspond to any scale mode: (0, 1, 2, 3, 4) for (InputResolution,"
" NetOutputResolution, OutputResolution, ZeroToOne, PlusMinusOne).";
error(message, __LINE__, __FUNCTION__, __FILE__);
return ScaleMode::InputResolution;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return ScaleMode::InputResolution;
}
}
ProducerType flagsToProducerType(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex)
{
try
{
log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Avoid duplicates (e.g. selecting at the time camera & video)
if (!imageDirectory.empty() && !videoPath.empty())
error("Selected simultaneously image directory and video. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
else if (!imageDirectory.empty() && webcamIndex != 0)
error("Selected simultaneously image directory and webcam. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
else if (!videoPath.empty() && webcamIndex != 0)
error("Selected simultaneously video and webcam. Please, select only one.", __LINE__, __FUNCTION__, __FILE__);
// Get desired ProducerType
if (!imageDirectory.empty())
return ProducerType::ImageDirectory;
else if (!videoPath.empty())
return ProducerType::Video;
else
return ProducerType::Webcam;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return ProducerType::Webcam;
}
}
std::shared_ptr<Producer> flagsToProducer(const std::string& imageDirectory, const std::string& videoPath, const int webcamIndex,
const std::string& webcamResolution, const double webcamFps)
{
try
{
log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
const auto type = flagsToProducerType(imageDirectory, videoPath, webcamIndex);
if (type == ProducerType::ImageDirectory)
return std::make_shared<ImageDirectoryReader>(imageDirectory);
else if (type == ProducerType::Video)
return std::make_shared<VideoReader>(videoPath);
else if (type == ProducerType::Webcam)
{
// cameraFrameSize
const auto webcamFrameSize = op::flagsToPoint(webcamResolution, "1280x720");
return std::make_shared<WebcamReader>(webcamIndex, webcamFrameSize, webcamFps);
}
// else
error("Undefined Producer selected.", __LINE__, __FUNCTION__, __FILE__);
return std::shared_ptr<Producer>{};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return std::shared_ptr<Producer>{};
}
}
std::vector<HeatMapType> flagsToHeatMaps(const bool heatMapsAddParts, const bool heatMapsAddBkg, const bool heatMapsAddPAFs)
{
try
{
std::vector<HeatMapType> heatMapTypes;
if (heatMapsAddParts)
heatMapTypes.emplace_back(HeatMapType::Parts);
if (heatMapsAddBkg)
heatMapTypes.emplace_back(HeatMapType::Background);
if (heatMapsAddPAFs)
heatMapTypes.emplace_back(HeatMapType::PAFs);
return heatMapTypes;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
DetectionMode flagsToDetectionMode(const int handDetectionModeFlag, const std::shared_ptr<Producer>& producer)
{
try
{
if (handDetectionModeFlag == -1)
{
if (producer == nullptr)
error("Since there is no default producer, `hand_detection_mode` must be set.", __LINE__, __FUNCTION__, __FILE__);
const auto producerType = producer->getType();
if (producerType == ProducerType::Webcam)
return DetectionMode::Fast;
else if (producerType == ProducerType::ImageDirectory)
return DetectionMode::Iterative;
else if (producerType == ProducerType::Video)
return DetectionMode::Tracking;
}
else if (handDetectionModeFlag == 0)
return DetectionMode::Fast;
else if (handDetectionModeFlag == 1)
return DetectionMode::Iterative;
else if (handDetectionModeFlag == 2)
return DetectionMode::Tracking;
else if (handDetectionModeFlag == 3)
return DetectionMode::IterativeAndTracking;
// else
error("Undefined DetectionMode selected.", __LINE__, __FUNCTION__, __FILE__);
return DetectionMode::Fast;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return DetectionMode::Fast;
}
}
RenderMode flagsToRenderMode(const int renderFlag, const int renderPoseFlag)
{
try
{
if (renderFlag == -1 && renderPoseFlag != -2)
return flagsToRenderMode(renderPoseFlag, -2);
else if (renderFlag == 0)
return RenderMode::None;
else if (renderFlag == 1)
return RenderMode::Cpu;
else if (renderFlag == 2)
return RenderMode::Gpu;
// else
error("Undefined RenderMode selected.", __LINE__, __FUNCTION__, __FILE__);
return RenderMode::None;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return RenderMode::None;
}
}
Point<int> flagsToPoint(const std::string& pointString, const std::string& pointExample)
{
try
{
Point<int> point;
const auto nRead = sscanf(pointString.c_str(), "%dx%d", &point.x, &point.y);
checkE(nRead, 2, "Invalid resolution format: `" + pointString + "`, it should be e.g. `" + pointExample
+ "`.", __LINE__, __FUNCTION__, __FILE__);
return point;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return Point<int>{};
}
}
}
......@@ -149,8 +149,7 @@ namespace op
const auto index2 = (person * numberKeypoints + pairs[pair+1]) * keypoints.getSize(2);
if (keypoints[index1+2] > threshold && keypoints[index2+2] > threshold)
{
// const auto colorIndex = pair/2*3;
const auto colorIndex = pairs[pair+1]*3;
const auto colorIndex = pairs[pair+1]*3; // Before: colorIndex = pair/2*3;
const cv::Scalar color{colors[colorIndex % numberColors],
colors[(colorIndex+1) % numberColors],
colors[(colorIndex+2) % numberColors]};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册