提交 56833873 编写于 作者: G gineshidalgo99

3-D reconstruction fully integrated in OpenPose

上级 6477264f
......@@ -140,6 +140,7 @@ Each flag is divided into flag name, default value, and description.
- DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (only used when saving video from webcam). Set this value to the minimum value between the OpenPose displayed speed and the webcam real frame rate.");
- DEFINE_string(video, "", "Use a video file instead of the camera. Use `examples/media/video.avi` for our default example video.");
- DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20 images. Read all standard formats (jpg, png, bmp, etc.).");
- DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
- DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
- DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
- DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to 10, it will process 11 frames (0-10).");
......
......@@ -30,9 +30,9 @@ bin\OpenPoseDemo.exe --video examples\media\video.avi --face --hand
```
```
:: Windows - Library
windows\x64\Release\OpenPoseDemo.exe --video examples\media\video.avi
build\x64\Release\OpenPoseDemo.exe --video examples\media\video.avi
:: With face and hands
windows\x64\Release\OpenPoseDemo.exe --video examples\media\video.avi --face --hand
build\x64\Release\OpenPoseDemo.exe --video examples\media\video.avi --face --hand
```
......@@ -52,9 +52,9 @@ bin\OpenPoseDemo.exe --face --hand
```
```
:: Windows - Library
windows\x64\Release\OpenPoseDemo.exe
build\x64\Release\OpenPoseDemo.exe
:: With face and hands
windows\x64\Release\OpenPoseDemo.exe --face --hand
build\x64\Release\OpenPoseDemo.exe --face --hand
```
......@@ -74,9 +74,9 @@ bin\OpenPoseDemo.exe --image_dir examples\media\ --face --hand
```
```
:: Windows - Library
windows\x64\Release\OpenPoseDemo.exe --image_dir examples\media\
build\x64\Release\OpenPoseDemo.exe --image_dir examples\media\
:: With face and hands
windows\x64\Release\OpenPoseDemo.exe --image_dir examples\media\ --face --hand
build\x64\Release\OpenPoseDemo.exe --image_dir examples\media\ --face --hand
```
......@@ -97,9 +97,9 @@ bin\OpenPoseDemo.exe --net_resolution "1312x736" --scale_number 4 --scale_gap 0.
```
```
:: Windows - Library: Body
windows\x64\Release\OpenPoseDemo.exe --net_resolution "1312x736" --scale_number 4 --scale_gap 0.25
build\x64\Release\OpenPoseDemo.exe --net_resolution "1312x736" --scale_number 4 --scale_gap 0.25
:: Windows - Library: Body + Hand + Face
windows\x64\Release\OpenPoseDemo.exe --net_resolution "1312x736" --scale_number 4 --scale_gap 0.25 --hand --hand_scale_number 6 --hand_scale_range 0.4 --face
build\x64\Release\OpenPoseDemo.exe --net_resolution "1312x736" --scale_number 4 --scale_gap 0.25 --hand --hand_scale_number 6 --hand_scale_range 0.4 --face
```
......@@ -108,35 +108,34 @@ windows\x64\Release\OpenPoseDemo.exe --net_resolution "1312x736" --scale_number
1. Real-time demo
```
# Ubuntu
./build/examples/openpose/openpose.bin --3d --number_people_max 1
./build/examples/openpose/openpose.bin --flir_camera --3d --number_people_max 1
# With face and hands
./build/examples/openpose/openpose.bin --3d --number_people_max 1 --face --hand
./build/examples/openpose/openpose.bin --flir_camera --3d --number_people_max 1 --face --hand
```
```
:: Windows - Portable Demo
bin\OpenPoseDemo.exe --3d --number_people_max 1
bin\OpenPoseDemo.exe --flir_camera --3d --number_people_max 1
:: With face and hands
bin\OpenPoseDemo.exe --3d --number_people_max 1 --face --hand
bin\OpenPoseDemo.exe --flir_camera --3d --number_people_max 1 --face --hand
```
```
:: Windows - Library
windows\x64\Release\OpenPoseDemo.exe --3d --number_people_max 1
build\x64\Release\OpenPoseDemo.exe --flir_camera --3d --number_people_max 1
:: With face and hands
windows\x64\Release\OpenPoseDemo.exe --3d --number_people_max 1 --face --hand
build\x64\Release\OpenPoseDemo.exe --flir_camera --3d --number_people_max 1 --face --hand
```
2. Saving stereo camera images for a later post-processing:
```
# Ubuntu (same flags for Windows version)
./build/examples/openpose/openpose.bin --3d --num_gpu 0 --write_images output_folder_path/
./build/examples/openpose/openpose.bin --flir_camera --3d --number_people_max 1 --num_gpu 0 --write_images output_folder_path/
```
3. Reading previouly saved stereo camera images and processing them:
```
# Ubuntu (same flags for Windows version)
# Optionally add `--face` and/or `--hand` to include face and/or hands
./build/examples/openpose/openpose.bin --image_dir output_folder_path/ --3d --number_people_max 1
# Ubuntu face and hands (same flags for Windows version)
./build/examples/openpose/openpose.bin --image_dir output_folder_path/ --3d --number_people_max 1 --face --hand
```
......
......@@ -186,6 +186,7 @@ OpenPose Library - Release Notes
14. Removed old `windows/` version. CMake is the only Windows version available.
2. Functions or parameters renamed:
1. Flag `no_display` renamed as `display`, able to select between `NoDisplay`, `Display2D`, `Display3D`, and `DisplayAll`.
2. 3-D reconstruction demo is now inside the OpenPose demo binary.
3. Main bugs fixed:
1. Slight speed up (~1%) for performing the non-maximum suppression stage only in the body part heatmaps channels, and not also in the PAF channels.
2. Fixed core-dumped in PoseRenderer with GUI when changed element to be rendered to something else than skeleton.
......
add_subdirectory(experimental_3d)
add_subdirectory(openpose)
add_subdirectory(tutorial_add_module)
add_subdirectory(tutorial_pose)
......
此差异已折叠。
set(EXAMPLE_FILES
3d.cpp)
foreach(EXAMPLE_FILE ${EXAMPLE_FILES})
get_filename_component(SOURCE_NAME ${EXAMPLE_FILE} NAME_WE)
if (UNIX AND NOT APPLE)
set(EXE_NAME "${SOURCE_NAME}.bin")
elseif (WIN32)
set(EXE_NAME "OpenPoseDemo3D")
endif ()
message(STATUS "Adding Example ${EXE_NAME}")
add_executable(${EXE_NAME} ${EXAMPLE_FILE})
target_link_libraries( ${EXE_NAME} openpose ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} ${Caffe_LIBS} ${MKL_LIBS} ${GLUT_LIBRARY} ${SPINNAKER_LIB})
if (WIN32)
set_property(TARGET ${EXE_NAME} PROPERTY FOLDER "Examples")
configure_file(${CMAKE_SOURCE_DIR}/cmake/OpenPose.vcxproj.user
${CMAKE_CURRENT_BINARY_DIR}/${EXE_NAME}.vcxproj.user @ONLY)
# Properties->General->Output Directory
set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
endif (WIN32)
endforeach()
......@@ -52,6 +52,7 @@ DEFINE_string(video, "", "Use a video file instea
" example video.");
DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
" images. Read all standard formats (jpg, png, bmp, etc.).");
DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to"
......@@ -233,7 +234,7 @@ int openPoseDemo()
const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
// producerType
const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
FLAGS_camera_resolution, FLAGS_camera_fps);
FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps);
// poseModel
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
// JSON saving
......
......@@ -61,6 +61,7 @@ DEFINE_string(video, "", "Use a video file instea
" example video.");
DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
" images. Read all standard formats (jpg, png, bmp, etc.).");
DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to"
......@@ -235,7 +236,7 @@ int openPoseTutorialWrapper4()
const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
// producerType
const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
FLAGS_camera_resolution, FLAGS_camera_fps);
FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps);
// poseModel
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
// JSON saving
......
......@@ -38,6 +38,7 @@ DEFINE_string(video, "", "Use a video file instea
" example video.");
DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
" images. Read all standard formats (jpg, png, bmp, etc.).");
DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
" too long, it will skip frames. If it is too fast, it will slow it down.");
......@@ -62,8 +63,9 @@ int openPoseTutorialThread1()
const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
// producerType
const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
FLAGS_camera_resolution, FLAGS_camera_fps);
const auto displayProducerFpsMode = (FLAGS_process_real_time ? op::ProducerFpsMode::OriginalFps : op::ProducerFpsMode::RetrievalFps);
FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps);
const auto displayProducerFpsMode = (FLAGS_process_real_time
? op::ProducerFpsMode::OriginalFps : op::ProducerFpsMode::RetrievalFps);
producerSharedPtr->setProducerFpsMode(displayProducerFpsMode);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Step 3 - Setting producer
......
......@@ -39,6 +39,7 @@ DEFINE_string(video, "", "Use a video file instea
" example video.");
DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
" images. Read all standard formats (jpg, png, bmp, etc.).");
DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
" too long, it will skip frames. If it is too fast, it will slow it down.");
......@@ -96,8 +97,9 @@ int openPoseTutorialThread2()
const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
// producerType
const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
FLAGS_camera_resolution, FLAGS_camera_fps);
const auto displayProducerFpsMode = (FLAGS_process_real_time ? op::ProducerFpsMode::OriginalFps : op::ProducerFpsMode::RetrievalFps);
FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps);
const auto displayProducerFpsMode = (FLAGS_process_real_time
? op::ProducerFpsMode::OriginalFps : op::ProducerFpsMode::RetrievalFps);
producerSharedPtr->setProducerFpsMode(displayProducerFpsMode);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Step 3 - Setting producer
......
......@@ -51,6 +51,7 @@ DEFINE_string(video, "", "Use a video file instea
" example video.");
DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
" images. Read all standard formats (jpg, png, bmp, etc.).");
DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to"
......@@ -310,7 +311,7 @@ int openPoseTutorialWrapper1()
const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
// producerType
const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
FLAGS_camera_resolution, FLAGS_camera_fps);
FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps);
// poseModel
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
// JSON saving
......
#ifndef OPENPOSE_EXPERIMENTAL_3D_CAMERA_PARAMETERS_HPP
#define OPENPOSE_EXPERIMENTAL_3D_CAMERA_PARAMETERS_HPP
#ifndef OPENPOSE_EXPERIMENTAL_3D_CAMERA_PARAMETER_READER_HPP
#define OPENPOSE_EXPERIMENTAL_3D_CAMERA_PARAMETER_READER_HPP
#include <opencv2/core/core.hpp>
#include <openpose/core/common.hpp>
......@@ -15,4 +15,4 @@ namespace op
OP_API unsigned long long getNumberCameras();
}
#endif // OPENPOSE_EXPERIMENTAL_3D_CAMERA_PARAMETERS_HPP
#endif // OPENPOSE_EXPERIMENTAL_3D_CAMERA_PARAMETER_READER_HPP
#ifndef OPENPOSE_3D_HEADERS_HPP
#define OPENPOSE_3D_HEADERS_HPP
// pose module
// 3d module
#include <openpose/3d/cameraParameterReader.hpp>
#include <openpose/3d/poseTriangulation.hpp>
#include <openpose/3d/wPoseTriangulation.hpp>
......
......@@ -52,7 +52,7 @@ namespace op
// Profiling speed
const auto profilerKey = Profiler::timerInit(__LINE__, __FUNCTION__, __FILE__);
// 3-D triangulation and reconstruction
std::vector<cv::Mat> cameraParameterMatrices;
std::vector<cv::Mat> cameraMatrices;
std::vector<Array<float>> poseKeypointVector;
std::vector<Array<float>> faceKeypointVector;
std::vector<Array<float>> leftHandKeypointVector;
......@@ -63,13 +63,13 @@ namespace op
faceKeypointVector.emplace_back(datumsElement.faceKeypoints);
leftHandKeypointVector.emplace_back(datumsElement.handKeypoints[0]);
rightHandKeypointVector.emplace_back(datumsElement.handKeypoints[1]);
cameraParameterMatrices.emplace_back(datumsElement.cameraParameterMatrix);
cameraMatrices.emplace_back(datumsElement.cameraMatrix);
}
// Pose 3-D reconstruction
auto poseKeypoints3D = reconstructArray(poseKeypointVector, cameraParameterMatrices);
auto faceKeypoints3D = reconstructArray(faceKeypointVector, cameraParameterMatrices);
auto leftHandKeypoints3D = reconstructArray(leftHandKeypointVector, cameraParameterMatrices);
auto rightHandKeypoints3D = reconstructArray(rightHandKeypointVector, cameraParameterMatrices);
auto poseKeypoints3D = reconstructArray(poseKeypointVector, cameraMatrices);
auto faceKeypoints3D = reconstructArray(faceKeypointVector, cameraMatrices);
auto leftHandKeypoints3D = reconstructArray(leftHandKeypointVector, cameraMatrices);
auto rightHandKeypoints3D = reconstructArray(rightHandKeypointVector, cameraMatrices);
// Assign to all tDatums
for (auto& datumsElement : *tDatums)
{
......
......@@ -174,9 +174,9 @@ namespace op
std::array<Array<float>, 2> handKeypoints3D;
/**
* 4x3 extrinsic parameter matrix of the camera.
* 3x4 camera matrix of the camera.
*/
cv::Mat cameraParameterMatrix;
cv::Mat cameraMatrix;
// ---------------------------------------- Other (internal) parameters ---------------------------------------- //
/**
......
#ifndef OPENPOSE_EXPERIMENTAL_3D_POINT_GREY_HPP
#define OPENPOSE_EXPERIMENTAL_3D_POINT_GREY_HPP
#include <openpose/core/common.hpp>
#include <openpose/thread/workerProducer.hpp>
namespace op
{
/**
* FLIR (Point-Grey) camera producer
* Following OpenPose `tutorial_wrapper/` examples, we create our own class inherited from WorkerProducer.
* This worker:
* 1. Set hardware trigger and the buffer to get the latest obtained frame.
* 2. Read images from FLIR cameras.
* 3. Turn them into std::vector<cv::Mat>.
* 4. Return the resulting images wrapped into a std::shared_ptr<std::vector<Datum>>.
* The HW trigger + reading FLIR camera code is highly based on the Spinnaker SDK examples
* `AcquisitionMultipleCamera` and specially `Trigger`
* (located in `src/`). See them for more details about the cameras.
* See `examples/tutorial_wrapper/` for more details about inhering the WorkerProducer class.
*/
class OP_API WFlirReader : public WorkerProducer<std::shared_ptr<std::vector<Datum>>>
{
public:
WFlirReader();
~WFlirReader();
void initializationOnThread();
std::shared_ptr<std::vector<Datum>> workProducer();
private:
// PIMPL idiom
// http://www.cppsamples.com/common-tasks/pimpl.html
struct ImplWFlirReader;
std::unique_ptr<ImplWFlirReader> upImpl;
};
}
#endif // OPENPOSE_EXPERIMENTAL_3D_POINT_GREY_HPP
#ifndef OPENPOSE_EXPERIMENTAL_3D_HEADERS_HPP
#define OPENPOSE_EXPERIMENTAL_3D_HEADERS_HPP
// 3d module
#include <openpose/experimental/3d/cameraParameters.hpp>
#include <openpose/experimental/3d/flirReader.hpp>
#endif // OPENPOSE_EXPERIMENTAL_3D_HEADERS_HPP
......@@ -2,7 +2,6 @@
#define OPENPOSE_EXPERIMENTAL_HEADERS_HPP
// experimental modules
#include <openpose/experimental/3d/headers.hpp>
#include <openpose/experimental/tracking/headers.hpp>
#endif // OPENPOSE_EXPERIMENTAL_HEADERS_HPP
......@@ -73,6 +73,7 @@ namespace op
{
try
{
auto datums = std::make_shared<TDatumsNoPtr>();
// Check last desired frame has not been reached
if (mNumberFramesToProcess != std::numeric_limits<unsigned long long>::max()
&& mGlobalCounter > mNumberFramesToProcess)
......@@ -81,9 +82,7 @@ namespace op
}
// If producer released -> it sends an empty cv::Mat + a datumProducerRunning signal
const bool datumProducerRunning = spProducer->isOpened();
auto datums = std::make_shared<TDatumsNoPtr>(1);
auto& datum = (*datums)[0];
// Check producer device is open
// If device is open
if (datumProducerRunning)
{
// Fast forward/backward - Seek to specific frame index desired
......@@ -98,30 +97,56 @@ namespace op
spVideoSeek->second = 0;
}
}
// Get cv::Mat
datum.name = spProducer->getFrameName();
datum.cvInputData = spProducer->getFrame();
if (datum.cvInputData.channels() != 3)
auto cvMats = spProducer->getFrames();
auto cameraMatrices = spProducer->getCameraMatrices();
// Check frames are not empty
checkIfTooManyConsecutiveEmptyFrames(mNumberConsecutiveEmptyFrames, cvMats.empty() || cvMats[0].empty());
if (!cvMats.empty())
{
const std::string commonMessage{"Input images must be 3-channel BGR."};
if (datum.cvInputData.channels() == 1)
datums->resize(cvMats.size());
// Datum cannot be assigned before resize()
auto& datum = (*datums)[0];
// Filling first element
datum.name = spProducer->getFrameName();
datum.cvInputData = cvMats[0];
if (!cameraMatrices.empty())
datum.cameraMatrix = cameraMatrices[0];
// Image integrity
if (datum.cvInputData.channels() != 3)
{
const std::string commonMessage{"Input images must be 3-channel BGR."};
// Grey to RGB if required
if (datum.cvInputData.channels() == 1)
{
log(commonMessage + " Converting grey image into BGR.", Priority::High);
cv::cvtColor(datum.cvInputData, datum.cvInputData, CV_GRAY2BGR);
}
else
error(commonMessage, __LINE__, __FUNCTION__, __FILE__);
}
datum.cvOutputData = datum.cvInputData;
// Resize if it's stereo-system
if (datums->size() > 1)
{
log(commonMessage + " Converting grey image into BGR.", Priority::High);
cv::cvtColor(datum.cvInputData, datum.cvInputData, CV_GRAY2BGR);
// Stereo-system: Assign all cv::Mat
for (auto i = 1u ; i < datums->size() ; i++)
{
auto& datumI = (*datums)[i];
datumI.name = datum.name;
datumI.cvInputData = cvMats[i];
datumI.cvOutputData = datumI.cvInputData;
if (cameraMatrices.size() > i)
datumI.cameraMatrix = cameraMatrices[i];
}
}
else
error(commonMessage, __LINE__, __FUNCTION__, __FILE__);
// Check producer is running
if (!datumProducerRunning || (*datums)[0].cvInputData.empty())
datums = nullptr;
// Increase counter if successful image
if (datums != nullptr)
mGlobalCounter++;
}
datum.cvOutputData = datum.cvInputData;
// Check frames are not empty
checkIfTooManyConsecutiveEmptyFrames(mNumberConsecutiveEmptyFrames, datum.cvInputData.empty());
}
// Check producer is running
if (!datumProducerRunning || datum.cvInputData.empty())
datums = nullptr;
// Increase counter if successful image
if (datums != nullptr)
mGlobalCounter++;
// Return result
return std::make_pair(datumProducerRunning, datums);
}
......
......@@ -24,6 +24,7 @@ namespace op
*/
enum class ProducerType : unsigned char
{
FlirCamera, /**< Stereo FLIR (Point-Grey) camera reader. Based on Spinnaker SDK. */
ImageDirectory, /**< An image directory reader. It is able to read images on a folder with a interface similar to the OpenCV cv::VideoCapture. */
IPCamera, /**< An IP camera frames extractor, extending the functionality of cv::VideoCapture. */
Video, /**< A video frames extractor, extending the functionality of cv::VideoCapture. */
......
#ifndef OPENPOSE_PRODUCER_FLIR_READER_HPP
#define OPENPOSE_PRODUCER_FLIR_READER_HPP
#include <openpose/core/common.hpp>
#include <openpose/producer/producer.hpp>
namespace op
{
/**
* FlirReader is an abstract class to extract frames from a image directory. Its interface imitates the
* cv::VideoCapture class, so it can be used quite similarly to the cv::VideoCapture class. Thus,
* it is quite similar to VideoReader and WebcamReader.
*/
class OP_API FlirReader : public Producer
{
public:
/**
* Constructor of FlirReader. It sets the image directory path from which the images will be loaded
* and generates a std::vector<std::string> with the list of images on that directory.
* @param imageDirectoryPath const std::string parameter with the folder path containing the images.
*/
explicit FlirReader();
~FlirReader();
std::vector<cv::Mat> getCameraMatrices();
std::string getFrameName();
inline bool isOpened() const
{
return true;
}
void release();
double get(const int capProperty);
void set(const int capProperty, const double value);
private:
// PIMPL idiom
// http://www.cppsamples.com/common-tasks/pimpl.html
struct ImplFlirReader;
std::shared_ptr<ImplFlirReader> upImpl;
Point<int> mResolution;
long long mFrameNameCounter;
cv::Mat getRawFrame();
std::vector<cv::Mat> getRawFrames();
DELETE_COPY(FlirReader);
};
}
#endif // OPENPOSE_PRODUCER_FLIR_READER_HPP
......@@ -4,6 +4,7 @@
// producer module
#include <openpose/producer/datumProducer.hpp>
#include <openpose/producer/enumClasses.hpp>
#include <openpose/producer/flirReader.hpp>
#include <openpose/producer/imageDirectoryReader.hpp>
#include <openpose/producer/ipCameraReader.hpp>
#include <openpose/producer/producer.hpp>
......
......@@ -21,6 +21,8 @@ namespace op
*/
explicit ImageDirectoryReader(const std::string& imageDirectoryPath);
std::vector<cv::Mat> getCameraMatrices();
std::string getFrameName();
inline bool isOpened() const
......@@ -37,16 +39,6 @@ namespace op
void set(const int capProperty, const double value);
inline double get(const ProducerProperty property)
{
return Producer::get(property);
}
inline void set(const ProducerProperty property, const double value)
{
Producer::set(property, value);
}
private:
const std::string mImageDirectoryPath;
const std::vector<std::string> mFilePaths;
......@@ -55,6 +47,8 @@ namespace op
cv::Mat getRawFrame();
std::vector<cv::Mat> getRawFrames();
DELETE_COPY(ImageDirectoryReader);
};
}
......
......@@ -18,6 +18,8 @@ namespace op
*/
explicit IpCameraReader(const std::string& cameraPath);
std::vector<cv::Mat> getCameraMatrices();
std::string getFrameName();
inline double get(const int capProperty)
......@@ -35,6 +37,8 @@ namespace op
cv::Mat getRawFrame();
std::vector<cv::Mat> getRawFrames();
DELETE_COPY(IpCameraReader);
};
}
......
......@@ -31,10 +31,22 @@ namespace op
/**
* Main function of Producer, it retrieves and returns a new frame from the frames producer.
* @return cv::Mat with the new frame.
* @return cv::Mat with the new frame.
*/
cv::Mat getFrame();
/**
* Analogous to getFrame, but it could return > 1 frame.
* @return std::vector<cv::Mat> with the new frame(s).
*/
std::vector<cv::Mat> getFrames();
/**
* It retrieves and returns the camera matrixes from the frames producer.
* @return std::vector<cv::Mat> with the camera matrices.
*/
virtual std::vector<cv::Mat> getCameraMatrices() = 0;
/**
* This function returns a unique frame name (e.g. the frame number for video, the
* frame counter for webcam, the image name for image directory reader, etc.).
......@@ -67,7 +79,7 @@ namespace op
/**
* This function releases and closes the Producer. After it is called, no more frames
* can be retrieved from Producer::getFrame.
* can be retrieved from Producer::getFrames.
*/
virtual void release() = 0;
......@@ -89,9 +101,18 @@ namespace op
*/
virtual void set(const int capProperty, const double value) = 0;
virtual double get(const ProducerProperty property) = 0;
/**
* Extra attributes that VideoCapture::get/set do not contain.
* @param property ProducerProperty indicating the property to be modified.
*/
double get(const ProducerProperty property);
virtual void set(const ProducerProperty property, const double value) = 0;
/**
* Extra attributes that VideoCapture::get/set do not contain.
* @param property ProducerProperty indicating the property to be modified.
* @param value double indicating the new value to be assigned.
*/
void set(const ProducerProperty property, const double value);
protected:
/**
......@@ -121,10 +142,17 @@ namespace op
/**
* Function to be defined by its children class. It retrieves and returns a new frame from the frames producer.
* @return cv::Mat with the new frame.
* @return cv::Mat with the new frame.
*/
virtual cv::Mat getRawFrame() = 0;
/**
* Function to be defined by its children class. It retrieves and returns a new frame from the frames producer.
* It is equivalent to getRawFrame when more than 1 image can be returned.
* @return std::vector<cv::Mat> with the new frames.
*/
virtual std::vector<cv::Mat> getRawFrames() = 0;
private:
const ProducerType mType;
ProducerFpsMode mProducerFpsMode;
......
......@@ -47,19 +47,11 @@ namespace op
virtual void set(const int capProperty, const double value) = 0;
inline double get(const ProducerProperty property)
{
return Producer::get(property);
}
inline void set(const ProducerProperty property, const double value)
{
Producer::set(property, value);
}
protected:
virtual cv::Mat getRawFrame() = 0;
virtual std::vector<cv::Mat> getRawFrames() = 0;
private:
cv::VideoCapture mVideoCapture;
......
......@@ -20,6 +20,8 @@ namespace op
*/
explicit VideoReader(const std::string& videoPath);
std::vector<cv::Mat> getCameraMatrices();
std::string getFrameName();
inline double get(const int capProperty)
......@@ -37,6 +39,8 @@ namespace op
cv::Mat getRawFrame();
std::vector<cv::Mat> getRawFrames();
DELETE_COPY(VideoReader);
};
}
......
......@@ -29,6 +29,8 @@ namespace op
~WebcamReader();
std::vector<cv::Mat> getCameraMatrices();
std::string getFrameName();
double get(const int capProperty);
......@@ -46,6 +48,8 @@ namespace op
cv::Mat getRawFrame();
std::vector<cv::Mat> getRawFrames();
void bufferingThread();
DELETE_COPY(WebcamReader);
......
......@@ -22,15 +22,10 @@ namespace op
OP_API std::shared_ptr<Producer> flagsToProducer(const std::string& imageDirectory, const std::string& videoPath,
const std::string& ipCameraPath, const int webcamIndex,
const bool flirCamera = false,
const std::string& webcamResolution = "1280x720",
const double webcamFps = 30.);
OP_API std::shared_ptr<Producer> flagsToProducer(const std::string& imageDirectory, const std::string& videoPath,
const std::string& ipCameraPath, const int webcamIndex,
const bool flirCamera /*= false*/,
const std::string& webcamResolution /*= "1280x720"*/,
const double webcamFps /*= 30.*/);
OP_API std::vector<HeatMapType> flagsToHeatMaps(const bool heatMapsAddParts = false,
const bool heatMapsAddBkg = false,
const bool heatMapsAddPAFs = false);
......
......@@ -919,6 +919,8 @@ namespace op
// WGui
spWGui = {std::make_shared<WGui<TDatumsPtr>>(gui)};
}
else
error("Unknown DisplayMode.", __LINE__, __FUNCTION__, __FILE__);
}
// Set wrapper as configured
mConfigured = true;
......
set(SOURCES_OP_3D
cameraParameterReader.cpp
defineTemplates.cpp
poseTriangulation.cpp)
......
#include <vector>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/experimental/3d/cameraParameters.hpp>
#include <openpose/3d/cameraParameterReader.hpp>
namespace op
{
......
......@@ -35,7 +35,7 @@ namespace op
poseKeypoints3D{datum.poseKeypoints3D},
faceKeypoints3D{datum.faceKeypoints3D},
handKeypoints3D(datum.handKeypoints3D), // Parentheses instead of braces to avoid error in GCC 4.8
cameraParameterMatrix{datum.cameraParameterMatrix},
cameraMatrix{datum.cameraMatrix},
// Other parameters
scaleInputToNetInputs{datum.scaleInputToNetInputs},
netInputSizes{datum.netInputSizes},
......@@ -74,7 +74,7 @@ namespace op
poseKeypoints3D = datum.poseKeypoints3D,
faceKeypoints3D = datum.faceKeypoints3D,
handKeypoints3D = datum.handKeypoints3D,
cameraParameterMatrix = datum.cameraParameterMatrix;
cameraMatrix = datum.cameraMatrix;
// Other parameters
scaleInputToNetInputs = datum.scaleInputToNetInputs;
netInputSizes = datum.netInputSizes;
......@@ -124,7 +124,7 @@ namespace op
std::swap(poseKeypoints3D, datum.poseKeypoints3D);
std::swap(faceKeypoints3D, datum.faceKeypoints3D);
std::swap(handKeypoints3D, datum.handKeypoints3D);
std::swap(cameraParameterMatrix, datum.cameraParameterMatrix);
std::swap(cameraMatrix, datum.cameraMatrix);
// Other parameters
std::swap(scaleInputToNetInputs, datum.scaleInputToNetInputs);
std::swap(netInputSizes, datum.netInputSizes);
......@@ -165,7 +165,7 @@ namespace op
std::swap(poseKeypoints3D, datum.poseKeypoints3D);
std::swap(faceKeypoints3D, datum.faceKeypoints3D);
std::swap(handKeypoints3D, datum.handKeypoints3D);
std::swap(cameraParameterMatrix, datum.cameraParameterMatrix);
std::swap(cameraMatrix, datum.cameraMatrix);
// Other parameters
std::swap(scaleInputToNetInputs, datum.scaleInputToNetInputs);
std::swap(netInputSizes, datum.netInputSizes);
......@@ -219,7 +219,7 @@ namespace op
datum.faceKeypoints3D = faceKeypoints3D.clone();
for (auto i = 0u ; i < datum.handKeypoints.size() ; i++)
datum.handKeypoints3D[i] = handKeypoints3D[i].clone();
datum.cameraParameterMatrix = cameraParameterMatrix.clone();
datum.cameraMatrix = cameraMatrix.clone();
// Other parameters
datum.scaleInputToNetInputs = scaleInputToNetInputs;
datum.netInputSizes = netInputSizes;
......
set(SOURCES_OP_3D
cameraParameters.cpp
flirReader.cpp)
include(${CMAKE_SOURCE_DIR}/cmake/Utils.cmake)
prepend(SOURCES_OP_3D_WITH_CP ${CMAKE_CURRENT_SOURCE_DIR} ${SOURCES_OP_3D})
set(SOURCES_OP_3D_WITH_CP ${SOURCES_OP_3D_WITH_CP} PARENT_SCOPE)
set(SOURCES_OPENPOSE ${SOURCES_OPENPOSE} ${SOURCES_OP_3D_WITH_CP} PARENT_SCOPE)
if (UNIX AND NOT APPLE)
add_library(openpose_experimental_3d ${SOURCES_OP_3D})
set_target_properties(openpose_experimental_3d PROPERTIES COMPILE_FLAGS ${OP_CXX_FLAGS})
install(TARGETS openpose_experimental_3d
EXPORT OpenPose
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib/openpose)
endif (UNIX AND NOT APPLE)
add_subdirectory(3d)
add_subdirectory(tracking)
if (UNIX AND NOT APPLE)
add_custom_target(openpose_experimental)
add_dependencies(openpose_experimental openpose_tracking openpose_experimental_3d)
add_dependencies(openpose_experimental openpose_tracking)
endif (UNIX AND NOT APPLE)
set(SOURCES_OPENPOSE ${SOURCES_OPENPOSE} PARENT_SCOPE)
......@@ -408,6 +408,9 @@ namespace op
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
#else
const std::string WITH_3D_RENDERER_ERROR{"OpenPose CMake must be compiled with the `WITH_3D_RENDERER` flag in"
" order to use the 3-D visualization renderer. Alternatively, set 2-D rendering with `--display 2`."};
#endif
Gui3D::Gui3D(const Point<int>& outputSize, const bool fullScreen,
......@@ -431,8 +434,7 @@ namespace op
#else
UNUSED(poseModel);
if (mDisplayMode == DisplayMode::DisplayAll || mDisplayMode == DisplayMode::Display3D)
error("OpenPose must be compiled with `WITH_3D_RENDERER` in order to use the 3-D visualization"
"renderer. Alternatively, set 2-D rendering.", __LINE__, __FUNCTION__, __FILE__);
error(WITH_3D_RENDERER_ERROR, __LINE__, __FUNCTION__, __FILE__);
#endif
}
catch (const std::exception& e)
......
set(SOURCES_OP_PRODUCER
defineTemplates.cpp
flirReader.cpp
imageDirectoryReader.cpp
ipCameraReader.cpp
producer.cpp
......
#include <chrono>
#include <thread>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgproc/imgproc.hpp> // cv::undistort
#ifdef WITH_FLIR_CAMERA
#include <Spinnaker.h>
#endif
#include <openpose/experimental/3d/cameraParameters.hpp>
#include <openpose/utilities/check.hpp>
#include <openpose/experimental/3d/flirReader.hpp>
#include <openpose/3d/cameraParameterReader.hpp>
#include <openpose/filestream/fileStream.hpp>
#include <openpose/utilities/fastMath.hpp>
#include <openpose/utilities/fileSystem.hpp>
#include <openpose/utilities/string.hpp>
#include <openpose/producer/flirReader.hpp>
namespace op
{
......@@ -319,105 +320,31 @@ namespace op
return result;
}
#else
const std::string WITH_FLIR_CAMERA_ERROR{"OpenPose CMake must be compiled with the `WITH_FLIR_CAMERA` flag in"
" order to use the FLIR camera. Alternatively, disable `--flir_camera`."};
#endif
struct WFlirReader::ImplWFlirReader
struct FlirReader::ImplFlirReader
{
#ifdef WITH_FLIR_CAMERA
bool mInitialized;
Spinnaker::CameraList mCameraList;
Spinnaker::SystemPtr mSystemPtr;
ImplWFlirReader() :
ImplFlirReader() :
mInitialized{false}
{
}
#endif
};
WFlirReader::WFlirReader()
FlirReader::FlirReader() :
Producer{ProducerType::FlirCamera},
#ifdef WITH_FLIR_CAMERA
: upImpl{new ImplWFlirReader{}}
upImpl{new ImplFlirReader{}},
#endif
{
try
{
#ifndef WITH_FLIR_CAMERA
error("OpenPose must be compiled with `WITH_FLIR_CAMERA` in order to use this class.",
__LINE__, __FUNCTION__, __FILE__);
#endif
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
WFlirReader::~WFlirReader()
{
#ifdef WITH_FLIR_CAMERA
try
{
if (upImpl->mInitialized)
{
// End acquisition for each camera
// Notice that what is usually a one-step process is now two steps
// because of the additional step of selecting the camera. It is worth
// repeating that camera selection needs to be done once per loop.
// It is possible to interact with cameras through the camera list with
// GetByIndex(); this is an alternative to retrieving cameras as
// Spinnaker::CameraPtr objects that can be quick and easy for small tasks.
//
for (auto i = 0; i < upImpl->mCameraList.GetSize(); i++)
upImpl->mCameraList.GetByIndex(i)->EndAcquisition();
for (auto i = 0; i < upImpl->mCameraList.GetSize(); i++)
{
// Select camera
auto cameraPtr = upImpl->mCameraList.GetByIndex(i);
// Retrieve GenICam nodemap
auto& iNodeMap = cameraPtr->GetNodeMap();
// // Disable chunk data
// result = disableChunkData(iNodeMap);
// // if (result < 0)
// // return result;
// Reset trigger
auto result = resetTrigger(iNodeMap);
if (result < 0)
error("Error happened..." + std::to_string(result), __LINE__, __FUNCTION__, __FILE__);
// Deinitialize each camera
// Each camera must be deinitialized separately by first
// selecting the camera and then deinitializing it.
cameraPtr->DeInit();
}
log("FLIR (Point-grey) capture completed. Releasing cameras...", Priority::High);
// Clear camera list before releasing upImpl->mSystemPtr
upImpl->mCameraList.Clear();
// Release upImpl->mSystemPtr
upImpl->mSystemPtr->ReleaseInstance();
}
log("Cameras released! Exitting program.", Priority::High);
}
catch (const Spinnaker::Exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
#endif
}
void WFlirReader::initializationOnThread()
mFrameNameCounter{0}
{
#ifdef WITH_FLIR_CAMERA
try
......@@ -587,6 +514,17 @@ namespace op
log(" ", Priority::High);
}
const auto cvMats = getRawFrames();
// Security checks
if (cvMats.empty())
error("Cameras could not be opened.", __LINE__, __FUNCTION__, __FILE__);
// Get resolution
else
{
set(CV_CAP_PROP_FRAME_WIDTH, cvMats[0].cols);
set(CV_CAP_PROP_FRAME_HEIGHT, cvMats[0].rows);
}
log("\nRunning for all cameras...\n\n*** IMAGE ACQUISITION ***\n", Priority::High);
}
catch (const Spinnaker::Exception& e)
......@@ -597,58 +535,218 @@ namespace op
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
#else
error(WITH_FLIR_CAMERA_ERROR, __LINE__, __FUNCTION__, __FILE__);
#endif
}
std::shared_ptr<std::vector<Datum>> WFlirReader::workProducer()
FlirReader::~FlirReader()
{
try
{
#ifdef WITH_FLIR_CAMERA
try
release();
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
std::vector<cv::Mat> FlirReader::getCameraMatrices()
{
try
{
std::vector<cv::Mat> cameraMatrices(getNumberCameras());
for (auto i = 0ull ; i < cameraMatrices.size() ; i++)
cameraMatrices[i] = getM(i);
return cameraMatrices;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
std::string FlirReader::getFrameName()
{
const auto stringLength = 12u;
return toFixedLengthString( fastMax(0ll, longLongRound(mFrameNameCounter)), stringLength);
}
void FlirReader::release()
{
#ifdef WITH_FLIR_CAMERA
try
{
if (upImpl->mInitialized)
{
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Profiling speed
const auto profilerKey = Profiler::timerInit(__LINE__, __FUNCTION__, __FILE__);
// Get image from each camera
const auto cvMats = acquireImages(upImpl->mCameraList);
// Images to userDatum
auto datums3d = std::make_shared<std::vector<Datum>>(cvMats.size());
for (auto i = 0u ; i < cvMats.size() ; i++)
{
datums3d->at(i).cvInputData = cvMats.at(i);
(*datums3d)[i].cvOutputData = (*datums3d)[i].cvInputData;
(*datums3d)[i].cameraParameterMatrix = getM(i);
}
// Profiling speed
if (!cvMats.empty())
// End acquisition for each camera
// Notice that what is usually a one-step process is now two steps
// because of the additional step of selecting the camera. It is worth
// repeating that camera selection needs to be done once per loop.
// It is possible to interact with cameras through the camera list with
// GetByIndex(); this is an alternative to retrieving cameras as
// Spinnaker::CameraPtr objects that can be quick and easy for small tasks.
//
for (auto i = 0; i < upImpl->mCameraList.GetSize(); i++)
upImpl->mCameraList.GetByIndex(i)->EndAcquisition();
for (auto i = 0; i < upImpl->mCameraList.GetSize(); i++)
{
Profiler::timerEnd(profilerKey);
Profiler::printAveragedTimeMsOnIterationX(profilerKey, __LINE__, __FUNCTION__, __FILE__);
// Select camera
auto cameraPtr = upImpl->mCameraList.GetByIndex(i);
// Retrieve GenICam nodemap
auto& iNodeMap = cameraPtr->GetNodeMap();
// // Disable chunk data
// result = disableChunkData(iNodeMap);
// // if (result < 0)
// // return result;
// Reset trigger
auto result = resetTrigger(iNodeMap);
if (result < 0)
error("Error happened..." + std::to_string(result), __LINE__, __FUNCTION__, __FILE__);
// Deinitialize each camera
// Each camera must be deinitialized separately by first
// selecting the camera and then deinitializing it.
cameraPtr->DeInit();
}
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Return Datum
return datums3d;
log("FLIR (Point-grey) capture completed. Releasing cameras...", Priority::High);
// Clear camera list before releasing upImpl->mSystemPtr
upImpl->mCameraList.Clear();
// Release upImpl->mSystemPtr
upImpl->mSystemPtr->ReleaseInstance();
}
log("Cameras released! Exiting program.", Priority::High);
}
catch (const Spinnaker::Exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
#endif
}
cv::Mat FlirReader::getRawFrame()
{
try
{
#ifdef WITH_FLIR_CAMERA
try
{
return acquireImages(upImpl->mCameraList).at(0);
}
catch (const Spinnaker::Exception& e)
{
this->stop();
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return nullptr;
return cv::Mat();
}
#else
error("OpenPose must be compiled with `WITH_FLIR_CAMERA` in order to use this class.",
__LINE__, __FUNCTION__, __FILE__);
return nullptr;
error(WITH_FLIR_CAMERA_ERROR, __LINE__, __FUNCTION__, __FILE__);
return cv::Mat();
#endif
}
catch (const std::exception& e)
{
this->stop();
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return nullptr;
return cv::Mat();
}
}
std::vector<cv::Mat> FlirReader::getRawFrames()
{
try
{
#ifdef WITH_FLIR_CAMERA
try
{
return acquireImages(upImpl->mCameraList);
}
catch (const Spinnaker::Exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
#else
error(WITH_FLIR_CAMERA_ERROR, __LINE__, __FUNCTION__, __FILE__);
return {};
#endif
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
double FlirReader::get(const int capProperty)
{
try
{
if (capProperty == CV_CAP_PROP_FRAME_WIDTH)
{
if (Producer::get(ProducerProperty::Rotation) == 0.
|| Producer::get(ProducerProperty::Rotation) == 180.)
return mResolution.x;
else
return mResolution.y;
}
else if (capProperty == CV_CAP_PROP_FRAME_HEIGHT)
{
if (Producer::get(ProducerProperty::Rotation) == 0.
|| Producer::get(ProducerProperty::Rotation) == 180.)
return mResolution.y;
else
return mResolution.x;
}
else if (capProperty == CV_CAP_PROP_POS_FRAMES)
return (double)mFrameNameCounter;
else if (capProperty == CV_CAP_PROP_FRAME_COUNT)
return -1.;
else if (capProperty == CV_CAP_PROP_FPS)
return -1.;
else
{
log("Unknown property", Priority::Max, __LINE__, __FUNCTION__, __FILE__);
return -1.;
}
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return 0.;
}
}
void FlirReader::set(const int capProperty, const double value)
{
try
{
if (capProperty == CV_CAP_PROP_FRAME_WIDTH)
mResolution.x = {(int)value};
else if (capProperty == CV_CAP_PROP_FRAME_HEIGHT)
mResolution.y = {(int)value};
else if (capProperty == CV_CAP_PROP_POS_FRAMES)
log("This property is read-only.", Priority::Max, __LINE__, __FUNCTION__, __FILE__);
else if (capProperty == CV_CAP_PROP_FRAME_COUNT || capProperty == CV_CAP_PROP_FPS)
log("This property is read-only.", Priority::Max, __LINE__, __FUNCTION__, __FILE__);
else
log("Unknown property", Priority::Max, __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
}
......@@ -38,6 +38,19 @@ namespace op
{
}
std::vector<cv::Mat> ImageDirectoryReader::getCameraMatrices()
{
try
{
return {};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
std::string ImageDirectoryReader::getFrameName()
{
return getFileNameNoExtension(mFilePaths.at(mFrameNameCounter));
......@@ -62,20 +75,35 @@ namespace op
}
}
std::vector<cv::Mat> ImageDirectoryReader::getRawFrames()
{
try
{
return std::vector<cv::Mat>{getRawFrame()};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
double ImageDirectoryReader::get(const int capProperty)
{
try
{
if (capProperty == CV_CAP_PROP_FRAME_WIDTH)
{
if (get(ProducerProperty::Rotation) == 0. || get(ProducerProperty::Rotation) == 180.)
if (Producer::get(ProducerProperty::Rotation) == 0.
|| Producer::get(ProducerProperty::Rotation) == 180.)
return mResolution.x;
else
return mResolution.y;
}
else if (capProperty == CV_CAP_PROP_FRAME_HEIGHT)
{
if (get(ProducerProperty::Rotation) == 0. || get(ProducerProperty::Rotation) == 180.)
if (Producer::get(ProducerProperty::Rotation) == 0.
|| Producer::get(ProducerProperty::Rotation) == 180.)
return mResolution.y;
else
return mResolution.x;
......
......@@ -12,6 +12,19 @@ namespace op
{
}
std::vector<cv::Mat> IpCameraReader::getCameraMatrices()
{
try
{
return {};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
std::string IpCameraReader::getFrameName()
{
try
......@@ -37,4 +50,17 @@ namespace op
return cv::Mat();
}
}
std::vector<cv::Mat> IpCameraReader::getRawFrames()
{
try
{
return VideoCaptureReader::getRawFrames();
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
}
......@@ -37,7 +37,22 @@ namespace op
{
try
{
cv::Mat frame;
// Return first element from getFrames (if any)
const auto frames = getFrames();
return (frames.empty() ? cv::Mat() : frames[0]);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat();
}
}
std::vector<cv::Mat> Producer::getFrames()
{
try
{
std::vector<cv::Mat> frames;
if (isOpened())
{
......@@ -45,20 +60,29 @@ namespace op
// sources (e.g. a video)
keepDesiredFrameRate();
// Get frame
frame = getRawFrame();
// Flip + rotate frame
flipAndRotate(frame);
// Check frame integrity
checkFrameIntegrity(frame);
frames = getRawFrames();
for (auto& frame : frames)
{
// Flip + rotate frame
flipAndRotate(frame);
// Check frame integrity
checkFrameIntegrity(frame);
// If any frame unvalid --> exit
if (frame.empty())
{
frames.clear();
break;
}
}
// Check if video capture did finish and close/restart it
ifEndedResetOrRelease();
}
return frame;
return frames;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return cv::Mat();
return {};
}
}
......@@ -228,7 +252,8 @@ namespace op
// videos (i.e. there is a frame missing), mNumberEmptyFrames allows the program to be properly
// closed keeping the 0-index frame counting
if (mNumberEmptyFrames > 2
|| (mType != ProducerType::IPCamera && mType != ProducerType::Webcam
|| (mType != ProducerType::FlirCamera && mType != ProducerType::IPCamera
&& mType != ProducerType::Webcam
&& get(CV_CAP_PROP_POS_FRAMES) >= get(CV_CAP_PROP_FRAME_COUNT)))
{
// Repeat video
......@@ -282,9 +307,9 @@ namespace op
}
else
{
cv::Mat frame;
std::vector<cv::Mat> frames;
for (auto i = 0 ; i < std::floor(difference) ; i++)
frame = getRawFrame();
frames = getRawFrames();
}
}
// Low down frame extraction - sleep thread unless it is too slow in most frames (using
......
......@@ -83,6 +83,19 @@ namespace op
}
}
std::vector<cv::Mat> VideoCaptureReader::getRawFrames()
{
try
{
return std::vector<cv::Mat>{getRawFrame()};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
void VideoCaptureReader::release()
{
try
......@@ -105,7 +118,9 @@ namespace op
{
// Specific cases
// If rotated 90 or 270 degrees, then width and height is exchanged
if ((capProperty == CV_CAP_PROP_FRAME_WIDTH || capProperty == CV_CAP_PROP_FRAME_HEIGHT) && (get(ProducerProperty::Rotation) != 0. && get(ProducerProperty::Rotation) != 180.))
if ((capProperty == CV_CAP_PROP_FRAME_WIDTH || capProperty == CV_CAP_PROP_FRAME_HEIGHT)
&& (Producer::get(ProducerProperty::Rotation) != 0.
&& Producer::get(ProducerProperty::Rotation) != 180.))
{
if (capProperty == CV_CAP_PROP_FRAME_WIDTH)
return mVideoCapture.get(CV_CAP_PROP_FRAME_HEIGHT);
......
......@@ -9,6 +9,19 @@ namespace op
{
}
std::vector<cv::Mat> VideoReader::getCameraMatrices()
{
try
{
return {};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
std::string VideoReader::getFrameName()
{
try
......@@ -34,4 +47,17 @@ namespace op
return cv::Mat();
}
}
std::vector<cv::Mat> VideoReader::getRawFrames()
{
try
{
return VideoCaptureReader::getRawFrames();
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
}
......@@ -58,6 +58,19 @@ namespace op
}
}
std::vector<cv::Mat> WebcamReader::getCameraMatrices()
{
try
{
return {};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
std::string WebcamReader::getFrameName()
{
try
......@@ -141,6 +154,19 @@ namespace op
}
}
std::vector<cv::Mat> WebcamReader::getRawFrames()
{
try
{
return std::vector<cv::Mat>{getRawFrame()};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
}
}
void WebcamReader::bufferingThread()
{
mCloseThread = false;
......
#include <cstdio> // sscanf
// #include <openpose/producer/flirReader.hpp>
#include <openpose/producer/flirReader.hpp>
#include <openpose/producer/imageDirectoryReader.hpp>
#include <openpose/producer/ipCameraReader.hpp>
#include <openpose/producer/videoReader.hpp>
......@@ -122,11 +122,10 @@ namespace op
return ProducerType::Video;
else if (!ipCameraPath.empty())
return ProducerType::IPCamera;
// else if (flirCamera)
// return ProducerType::FlirCamera;
else if (flirCamera)
return ProducerType::FlirCamera;
else
return ProducerType::Webcam;
UNUSED(flirCamera);
}
catch (const std::exception& e)
{
......@@ -135,22 +134,6 @@ namespace op
}
}
std::shared_ptr<Producer> flagsToProducer(const std::string& imageDirectory, const std::string& videoPath,
const std::string& ipCameraPath, const int webcamIndex,
const std::string& webcamResolution, const double webcamFps)
{
try
{
return flagsToProducer(imageDirectory, videoPath, ipCameraPath, webcamIndex, false, webcamResolution,
webcamFps);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return std::shared_ptr<Producer>{};
}
}
std::shared_ptr<Producer> flagsToProducer(const std::string& imageDirectory, const std::string& videoPath,
const std::string& ipCameraPath, const int webcamIndex,
const bool flirCamera, const std::string& webcamResolution,
......@@ -167,8 +150,8 @@ namespace op
return std::make_shared<VideoReader>(videoPath);
else if (type == ProducerType::IPCamera)
return std::make_shared<IpCameraReader>(ipCameraPath);
// else if (type == ProducerType::FlirCamera)
// return std::make_shared<FlirReader>();
else if (type == ProducerType::FlirCamera)
return std::make_shared<FlirReader>();
else if (type == ProducerType::Webcam)
{
// cameraFrameSize
......
......@@ -105,7 +105,7 @@ namespace op
__LINE__, __FUNCTION__, __FILE__);
}
// If 3-D module, 1 person is the maximum
if (wrapperStructPose.reconstruct3d && wrapperStructPose.numberPeopleMax > 1)
if (wrapperStructPose.reconstruct3d && wrapperStructPose.numberPeopleMax != 1)
{
error("Set `--number_people_max 1` when using `--3d`. The 3-D reconstruction demo assumes there is"
" at most 1 person on each image.", __LINE__, __FUNCTION__, __FILE__);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册