OpenPose  1.0.0rc2
OpenPose: A Real-Time Multi-Person Key-Point Detection And Multi-Threading C++ Library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
wrapperAuxiliary.hpp
Go to the documentation of this file.
1 #ifndef OPENPOSE_WRAPPER_WRAPPER_AUXILIARY_HPP
2 #define OPENPOSE_WRAPPER_WRAPPER_AUXILIARY_HPP
3 
12 
13 namespace op
14 {
27  OP_API void wrapperConfigureSanityChecks(WrapperStructPose& wrapperStructPose,
28  const WrapperStructFace& wrapperStructFace,
29  const WrapperStructHand& wrapperStructHand,
30  const WrapperStructExtra& wrapperStructExtra,
31  const WrapperStructInput& wrapperStructInput,
32  const WrapperStructOutput& wrapperStructOutput,
33  const bool renderOutput,
34  const bool userOutputWsEmpty,
35  const ThreadManagerMode threadManagerMode);
36 
45  OP_API void threadIdPP(unsigned long long& threadId, const bool multiThreadEnabled);
46 
53  template<typename TDatums,
54  typename TDatumsSP = std::shared_ptr<TDatums>,
55  typename TWorker = std::shared_ptr<Worker<TDatumsSP>>>
57  ThreadManager<TDatumsSP>& threadManager, const bool multiThreadEnabled,
58  const ThreadManagerMode threadManagerMode, const WrapperStructPose& wrapperStructPose,
59  const WrapperStructFace& wrapperStructFace, const WrapperStructHand& wrapperStructHand,
60  const WrapperStructExtra& wrapperStructExtra, const WrapperStructInput& wrapperStructInput,
61  const WrapperStructOutput& wrapperStructOutput,
62  const std::array<std::vector<TWorker>, int(WorkerType::Size)>& userWs,
63  const std::array<bool, int(WorkerType::Size)>& userWsOnNewThread);
64 }
65 
66 
67 
68 
69 
70 // Implementation
71 #include <openpose/3d/headers.hpp>
75 #include <openpose/gpu/gpu.hpp>
76 #include <openpose/gui/headers.hpp>
83 namespace op
84 {
85  template<typename TDatums, typename TDatumsSP, typename TWorker>
87  ThreadManager<TDatumsSP>& threadManager, const bool multiThreadEnabledTemp,
88  const ThreadManagerMode threadManagerMode, const WrapperStructPose& wrapperStructPoseTemp,
89  const WrapperStructFace& wrapperStructFace, const WrapperStructHand& wrapperStructHand,
90  const WrapperStructExtra& wrapperStructExtra, const WrapperStructInput& wrapperStructInput,
91  const WrapperStructOutput& wrapperStructOutput,
92  const std::array<std::vector<TWorker>, int(WorkerType::Size)>& userWs,
93  const std::array<bool, int(WorkerType::Size)>& userWsOnNewThread)
94  {
95  try
96  {
97  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
98 
99  // Editable arguments
100  auto wrapperStructPose = wrapperStructPoseTemp;
101  auto multiThreadEnabled = multiThreadEnabledTemp;
102 
103  // Workers
104  TWorker datumProducerW;
105  TWorker scaleAndSizeExtractorW;
106  TWorker cvMatToOpInputW;
107  TWorker cvMatToOpOutputW;
108  std::vector<std::vector<TWorker>> poseExtractorsWs;
109  std::vector<std::vector<TWorker>> poseTriangulationsWs;
110  std::vector<std::vector<TWorker>> jointAngleEstimationsWs;
111  std::vector<TWorker> postProcessingWs;
112  std::vector<TWorker> outputWs;
113  TWorker guiW;
114 
115  // User custom workers
116  const auto& userInputWs = userWs[int(WorkerType::Input)];
117  const auto& userPostProcessingWs = userWs[int(WorkerType::PostProcessing)];
118  const auto& userOutputWs = userWs[int(WorkerType::Output)];
119  const auto userInputWsOnNewThread = userWsOnNewThread[int(WorkerType::Input)];
120  const auto userPostProcessingWsOnNewThread = userWsOnNewThread[int(WorkerType::PostProcessing)];
121  const auto userOutputWsOnNewThread = userWsOnNewThread[int(WorkerType::Output)];
122 
123  // Video seek
124  const auto spVideoSeek = std::make_shared<std::pair<std::atomic<bool>, std::atomic<int>>>();
125  // It cannot be directly included in the constructor (compiler error for copying std::atomic)
126  spVideoSeek->first = false;
127  spVideoSeek->second = 0;
128 
129  // Required parameters
130  const auto renderOutput = wrapperStructPose.renderMode != RenderMode::None
131  || wrapperStructFace.renderMode != RenderMode::None
132  || wrapperStructHand.renderMode != RenderMode::None;
133  const auto renderOutputGpu = wrapperStructPose.renderMode == RenderMode::Gpu
134  || wrapperStructFace.renderMode == RenderMode::Gpu
135  || wrapperStructHand.renderMode == RenderMode::Gpu;
136  const auto renderFace = wrapperStructFace.enable && wrapperStructFace.renderMode != RenderMode::None;
137  const auto renderHand = wrapperStructHand.enable && wrapperStructHand.renderMode != RenderMode::None;
138  const auto renderHandGpu = wrapperStructHand.enable && wrapperStructHand.renderMode == RenderMode::Gpu;
139 
140  // Check no wrong/contradictory flags enabled
141  const auto userOutputWsEmpty = userOutputWs.empty();
142  wrapperConfigureSanityChecks(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
143  wrapperStructInput, wrapperStructOutput, renderOutput, userOutputWsEmpty,
144  threadManagerMode);
145 
146  // Get number threads
147  auto numberThreads = wrapperStructPose.gpuNumber;
148  auto gpuNumberStart = wrapperStructPose.gpuNumberStart;
149  // CPU --> 1 thread or no pose extraction
150  if (getGpuMode() == GpuMode::NoGpu)
151  {
152  numberThreads = (wrapperStructPose.gpuNumber == 0 ? 0 : 1);
153  gpuNumberStart = 0;
154  // Disabling multi-thread makes the code 400 ms faster (2.3 sec vs. 2.7 in i7-6850K)
155  // and fixes the bug that the screen was not properly displayed and only refreshed sometimes
156  // Note: The screen bug could be also fixed by using waitKey(30) rather than waitKey(1)
157  multiThreadEnabled = false;
158  }
159  // GPU --> user picks (<= #GPUs)
160  else
161  {
162  // Get total number GPUs
163  const auto totalGpuNumber = getGpuNumber();
164  // If number GPU < 0 --> set it to all the available GPUs
165  if (numberThreads < 0)
166  {
167  if (totalGpuNumber <= gpuNumberStart)
168  error("Number of initial GPU (`--number_gpu_start`) must be lower than the total number of"
169  " used GPUs (`--number_gpu`)", __LINE__, __FUNCTION__, __FILE__);
170  numberThreads = totalGpuNumber - gpuNumberStart;
171  // Reset initial GPU to 0 (we want them all)
172  // Logging message
173  log("Auto-detecting all available GPUs... Detected " + std::to_string(totalGpuNumber)
174  + " GPU(s), using " + std::to_string(numberThreads) + " of them starting at GPU "
175  + std::to_string(gpuNumberStart) + ".", Priority::High);
176  }
177  // Sanity check
178  if (gpuNumberStart + numberThreads > totalGpuNumber)
179  error("Initial GPU selected (`--number_gpu_start`) + number GPUs to use (`--number_gpu`) must"
180  " be lower or equal than the total number of GPUs in your machine ("
181  + std::to_string(gpuNumberStart) + " + "
182  + std::to_string(numberThreads) + " vs. "
183  + std::to_string(totalGpuNumber) + ").",
184  __LINE__, __FUNCTION__, __FILE__);
185  }
186 
187  // Proper format
188  const auto writeImagesCleaned = formatAsDirectory(wrapperStructOutput.writeImages);
189  const auto writeKeypointCleaned = formatAsDirectory(wrapperStructOutput.writeKeypoint);
190  const auto writeJsonCleaned = formatAsDirectory(wrapperStructOutput.writeJson);
191  const auto writeHeatMapsCleaned = formatAsDirectory(wrapperStructOutput.writeHeatMaps);
192  const auto modelFolder = formatAsDirectory(wrapperStructPose.modelFolder);
193 
194  // Common parameters
195  auto finalOutputSize = wrapperStructPose.outputSize;
196  Point<int> producerSize{-1,-1};
197  const auto oPProducer = (wrapperStructInput.producerSharedPtr != nullptr);
198  if (oPProducer)
199  {
200  // 1. Set producer properties
201  const auto displayProducerFpsMode = (wrapperStructInput.realTimeProcessing
203  wrapperStructInput.producerSharedPtr->setProducerFpsMode(displayProducerFpsMode);
204  wrapperStructInput.producerSharedPtr->set(ProducerProperty::Flip, wrapperStructInput.frameFlip);
205  wrapperStructInput.producerSharedPtr->set(ProducerProperty::Rotation, wrapperStructInput.frameRotate);
206  wrapperStructInput.producerSharedPtr->set(ProducerProperty::AutoRepeat,
207  wrapperStructInput.framesRepeat);
208  // 2. Set finalOutputSize
209  producerSize = Point<int>{(int)wrapperStructInput.producerSharedPtr->get(CV_CAP_PROP_FRAME_WIDTH),
210  (int)wrapperStructInput.producerSharedPtr->get(CV_CAP_PROP_FRAME_HEIGHT)};
211  // Set finalOutputSize to input size if desired
212  if (finalOutputSize.x == -1 || finalOutputSize.y == -1)
213  finalOutputSize = producerSize;
214  }
215 
216  // Producer
217  if (oPProducer)
218  {
219  const auto datumProducer = std::make_shared<DatumProducer<TDatums>>(
220  wrapperStructInput.producerSharedPtr, wrapperStructInput.frameFirst, wrapperStructInput.frameStep,
221  wrapperStructInput.frameLast, spVideoSeek
222  );
223  datumProducerW = std::make_shared<WDatumProducer<TDatumsSP, TDatums>>(datumProducer);
224  }
225  else
226  datumProducerW = nullptr;
227 
228  std::vector<std::shared_ptr<PoseExtractorNet>> poseExtractorNets;
229  std::vector<std::shared_ptr<FaceExtractorNet>> faceExtractorNets;
230  std::vector<std::shared_ptr<HandExtractorNet>> handExtractorNets;
231  std::vector<std::shared_ptr<PoseGpuRenderer>> poseGpuRenderers;
232  std::shared_ptr<PoseCpuRenderer> poseCpuRenderer;
233  if (numberThreads > 0)
234  {
235  // Get input scales and sizes
236  const auto scaleAndSizeExtractor = std::make_shared<ScaleAndSizeExtractor>(
237  wrapperStructPose.netInputSize, finalOutputSize, wrapperStructPose.scalesNumber,
238  wrapperStructPose.scaleGap
239  );
240  scaleAndSizeExtractorW = std::make_shared<WScaleAndSizeExtractor<TDatumsSP>>(scaleAndSizeExtractor);
241 
242  // Input cvMat to OpenPose input & output format
243  const auto cvMatToOpInput = std::make_shared<CvMatToOpInput>(wrapperStructPose.poseModel);
244  cvMatToOpInputW = std::make_shared<WCvMatToOpInput<TDatumsSP>>(cvMatToOpInput);
245  if (renderOutput)
246  {
247  const auto cvMatToOpOutput = std::make_shared<CvMatToOpOutput>();
248  cvMatToOpOutputW = std::make_shared<WCvMatToOpOutput<TDatumsSP>>(cvMatToOpOutput);
249  }
250 
251  // Pose estimators & renderers
252  std::vector<TWorker> cpuRenderers;
253  poseExtractorsWs.clear();
254  poseExtractorsWs.resize(numberThreads);
255  if (wrapperStructPose.enable)
256  {
257  // Pose estimators
258  for (auto gpuId = 0; gpuId < numberThreads; gpuId++)
259  poseExtractorNets.emplace_back(std::make_shared<PoseExtractorCaffe>(
260  wrapperStructPose.poseModel, modelFolder, gpuId + gpuNumberStart,
261  wrapperStructPose.heatMapTypes, wrapperStructPose.heatMapScale,
262  wrapperStructPose.addPartCandidates, wrapperStructPose.enableGoogleLogging
263  ));
264 
265  // Pose renderers
266  if (renderOutputGpu || wrapperStructPose.renderMode == RenderMode::Cpu)
267  {
268  // If wrapperStructPose.renderMode != RenderMode::Gpu but renderOutput, then we create an
269  // alpha = 0 pose renderer in order to keep the removing background option
270  const auto alphaKeypoint = (wrapperStructPose.renderMode != RenderMode::None
271  ? wrapperStructPose.alphaKeypoint : 0.f);
272  const auto alphaHeatMap = (wrapperStructPose.renderMode != RenderMode::None
273  ? wrapperStructPose.alphaHeatMap : 0.f);
274  // GPU rendering
275  if (renderOutputGpu)
276  {
277  for (const auto& poseExtractorNet : poseExtractorNets)
278  {
279  poseGpuRenderers.emplace_back(std::make_shared<PoseGpuRenderer>(
280  wrapperStructPose.poseModel, poseExtractorNet, wrapperStructPose.renderThreshold,
281  wrapperStructPose.blendOriginalFrame, alphaKeypoint,
282  alphaHeatMap, wrapperStructPose.defaultPartToRender
283  ));
284  }
285  }
286  // CPU rendering
287  if (wrapperStructPose.renderMode == RenderMode::Cpu)
288  {
289  poseCpuRenderer = std::make_shared<PoseCpuRenderer>(
290  wrapperStructPose.poseModel, wrapperStructPose.renderThreshold,
291  wrapperStructPose.blendOriginalFrame, alphaKeypoint, alphaHeatMap,
292  wrapperStructPose.defaultPartToRender);
293  cpuRenderers.emplace_back(std::make_shared<WPoseRenderer<TDatumsSP>>(poseCpuRenderer));
294  }
295  }
296  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
297 
298  // Pose extractor(s)
299  poseExtractorsWs.resize(poseExtractorNets.size());
300  const auto personIdExtractor = (wrapperStructExtra.identification
301  ? std::make_shared<PersonIdExtractor>() : nullptr);
302  // Keep top N people
303  // Added right after PoseExtractorNet to avoid:
304  // 1) Rendering people that are later deleted (wrong visualization).
305  // 2) Processing faces and hands on people that will be deleted (speed up).
306  // 3) Running tracking before deleting the people.
307  // Add KeepTopNPeople for each PoseExtractorNet
308  const auto keepTopNPeople = (wrapperStructPose.numberPeopleMax > 0 ?
309  std::make_shared<KeepTopNPeople>(wrapperStructPose.numberPeopleMax)
310  : nullptr);
311  // Person tracker
312  auto personTrackers = std::make_shared<std::vector<std::shared_ptr<PersonTracker>>>();
313  if (wrapperStructExtra.tracking > -1)
314  personTrackers->emplace_back(
315  std::make_shared<PersonTracker>(wrapperStructExtra.tracking == 0));
316  for (auto i = 0u; i < poseExtractorsWs.size(); i++)
317  {
318  // OpenPose keypoint detector + keepTopNPeople
319  // + ID extractor (experimental) + tracking (experimental)
320  const auto poseExtractor = std::make_shared<PoseExtractor>(
321  poseExtractorNets.at(i), keepTopNPeople, personIdExtractor, personTrackers,
322  wrapperStructPose.numberPeopleMax, wrapperStructExtra.tracking);
323  poseExtractorsWs.at(i) = {std::make_shared<WPoseExtractor<TDatumsSP>>(poseExtractor)};
324  // // Just OpenPose keypoint detector
325  // poseExtractorsWs.at(i) = {std::make_shared<WPoseExtractorNet<TDatumsSP>>(
326  // poseExtractorNets.at(i))};
327  }
328 
329  // // (Before tracking / id extractor)
330  // // Added right after PoseExtractorNet to avoid:
331  // // 1) Rendering people that are later deleted (wrong visualization).
332  // // 2) Processing faces and hands on people that will be deleted (speed up).
333  // if (wrapperStructPose.numberPeopleMax > 0)
334  // {
335  // // Add KeepTopNPeople for each PoseExtractorNet
336  // const auto keepTopNPeople = std::make_shared<KeepTopNPeople>(
337  // wrapperStructPose.numberPeopleMax);
338  // for (auto& wPose : poseExtractorsWs)
339  // wPose.emplace_back(std::make_shared<WKeepTopNPeople<TDatumsSP>>(keepTopNPeople));
340  // }
341  }
342 
343 
344  // Face extractor(s)
345  if (wrapperStructFace.enable)
346  {
347  // Face detector
348  // OpenPose face detector
349  if (wrapperStructPose.enable)
350  {
351  const auto faceDetector = std::make_shared<FaceDetector>(wrapperStructPose.poseModel);
352  for (auto& wPose : poseExtractorsWs)
353  wPose.emplace_back(std::make_shared<WFaceDetector<TDatumsSP>>(faceDetector));
354  }
355  // OpenCV face detector
356  else
357  {
358  log("Body keypoint detection is disabled. Hence, using OpenCV face detector (much less"
359  " accurate but faster).", Priority::High);
360  for (auto& wPose : poseExtractorsWs)
361  {
362  // 1 FaceDetectorOpenCV per thread, OpenCV face detector is not thread-safe
363  const auto faceDetectorOpenCV = std::make_shared<FaceDetectorOpenCV>(modelFolder);
364  wPose.emplace_back(
365  std::make_shared<WFaceDetectorOpenCV<TDatumsSP>>(faceDetectorOpenCV)
366  );
367  }
368  }
369  // Face keypoint extractor
370  for (auto gpu = 0u; gpu < poseExtractorsWs.size(); gpu++)
371  {
372  // Face keypoint extractor
373  const auto netOutputSize = wrapperStructFace.netInputSize;
374  const auto faceExtractorNet = std::make_shared<FaceExtractorCaffe>(
375  wrapperStructFace.netInputSize, netOutputSize, modelFolder,
376  gpu + gpuNumberStart, wrapperStructPose.heatMapTypes, wrapperStructPose.heatMapScale,
377  wrapperStructPose.enableGoogleLogging
378  );
379  faceExtractorNets.emplace_back(faceExtractorNet);
380  poseExtractorsWs.at(gpu).emplace_back(
381  std::make_shared<WFaceExtractorNet<TDatumsSP>>(faceExtractorNet));
382  }
383  }
384 
385  // Hand extractor(s)
386  if (wrapperStructHand.enable)
387  {
388  const auto handDetector = std::make_shared<HandDetector>(wrapperStructPose.poseModel);
389  for (auto gpu = 0u; gpu < poseExtractorsWs.size(); gpu++)
390  {
391  // Hand detector
392  // If tracking
393  if (wrapperStructHand.tracking)
394  poseExtractorsWs.at(gpu).emplace_back(
395  std::make_shared<WHandDetectorTracking<TDatumsSP>>(handDetector)
396  );
397  // If detection
398  else
399  poseExtractorsWs.at(gpu).emplace_back(
400  std::make_shared<WHandDetector<TDatumsSP>>(handDetector));
401  // Hand keypoint extractor
402  const auto netOutputSize = wrapperStructHand.netInputSize;
403  const auto handExtractorNet = std::make_shared<HandExtractorCaffe>(
404  wrapperStructHand.netInputSize, netOutputSize, modelFolder,
405  gpu + gpuNumberStart, wrapperStructHand.scalesNumber, wrapperStructHand.scaleRange,
406  wrapperStructPose.heatMapTypes, wrapperStructPose.heatMapScale,
407  wrapperStructPose.enableGoogleLogging
408  );
409  handExtractorNets.emplace_back(handExtractorNet);
410  poseExtractorsWs.at(gpu).emplace_back(
411  std::make_shared<WHandExtractorNet<TDatumsSP>>(handExtractorNet)
412  );
413  // If tracking
414  if (wrapperStructHand.tracking)
415  poseExtractorsWs.at(gpu).emplace_back(
416  std::make_shared<WHandDetectorUpdate<TDatumsSP>>(handDetector)
417  );
418  }
419  }
420 
421  // Pose renderer(s)
422  if (!poseGpuRenderers.empty())
423  for (auto i = 0u; i < poseExtractorsWs.size(); i++)
424  poseExtractorsWs.at(i).emplace_back(std::make_shared<WPoseRenderer<TDatumsSP>>(
425  poseGpuRenderers.at(i)
426  ));
427 
428  // Face renderer(s)
429  if (renderFace)
430  {
431  // CPU rendering
432  if (wrapperStructFace.renderMode == RenderMode::Cpu)
433  {
434  // Construct face renderer
435  const auto faceRenderer = std::make_shared<FaceCpuRenderer>(wrapperStructFace.renderThreshold,
436  wrapperStructFace.alphaKeypoint,
437  wrapperStructFace.alphaHeatMap);
438  // Add worker
439  cpuRenderers.emplace_back(std::make_shared<WFaceRenderer<TDatumsSP>>(faceRenderer));
440  }
441  // GPU rendering
442  else if (wrapperStructFace.renderMode == RenderMode::Gpu)
443  {
444  for (auto i = 0u; i < poseExtractorsWs.size(); i++)
445  {
446  // Construct face renderer
447  const auto faceRenderer = std::make_shared<FaceGpuRenderer>(
448  wrapperStructFace.renderThreshold, wrapperStructFace.alphaKeypoint,
449  wrapperStructFace.alphaHeatMap
450  );
451  // Performance boost -> share spGpuMemory for all renderers
452  if (!poseGpuRenderers.empty())
453  {
454  const bool isLastRenderer = !renderHandGpu;
455  const auto renderer = std::static_pointer_cast<PoseGpuRenderer>(
456  poseGpuRenderers.at(i)
457  );
458  faceRenderer->setSharedParametersAndIfLast(renderer->getSharedParameters(),
459  isLastRenderer);
460  }
461  // Add worker
462  poseExtractorsWs.at(i).emplace_back(
463  std::make_shared<WFaceRenderer<TDatumsSP>>(faceRenderer));
464  }
465  }
466  else
467  error("Unknown RenderMode.", __LINE__, __FUNCTION__, __FILE__);
468  }
469 
470  // Hand renderer(s)
471  if (renderHand)
472  {
473  // CPU rendering
474  if (wrapperStructHand.renderMode == RenderMode::Cpu)
475  {
476  // Construct hand renderer
477  const auto handRenderer = std::make_shared<HandCpuRenderer>(wrapperStructHand.renderThreshold,
478  wrapperStructHand.alphaKeypoint,
479  wrapperStructHand.alphaHeatMap);
480  // Add worker
481  cpuRenderers.emplace_back(std::make_shared<WHandRenderer<TDatumsSP>>(handRenderer));
482  }
483  // GPU rendering
484  else if (wrapperStructHand.renderMode == RenderMode::Gpu)
485  {
486  for (auto i = 0u; i < poseExtractorsWs.size(); i++)
487  {
488  // Construct hands renderer
489  const auto handRenderer = std::make_shared<HandGpuRenderer>(
490  wrapperStructHand.renderThreshold, wrapperStructHand.alphaKeypoint,
491  wrapperStructHand.alphaHeatMap
492  );
493  // Performance boost -> share spGpuMemory for all renderers
494  if (!poseGpuRenderers.empty())
495  {
496  const bool isLastRenderer = true;
497  const auto renderer = std::static_pointer_cast<PoseGpuRenderer>(
498  poseGpuRenderers.at(i)
499  );
500  handRenderer->setSharedParametersAndIfLast(renderer->getSharedParameters(),
501  isLastRenderer);
502  }
503  // Add worker
504  poseExtractorsWs.at(i).emplace_back(
505  std::make_shared<WHandRenderer<TDatumsSP>>(handRenderer));
506  }
507  }
508  else
509  error("Unknown RenderMode.", __LINE__, __FUNCTION__, __FILE__);
510  }
511 
512  // 3-D reconstruction
513  poseTriangulationsWs.clear();
514  if (wrapperStructExtra.reconstruct3d)
515  {
516  // For all (body/face/hands): PoseTriangulations ~30 msec, 8 GPUS ~30 msec for keypoint estimation
517  poseTriangulationsWs.resize(fastMax(1, int(poseExtractorsWs.size() / 4)));
518  for (auto i = 0u ; i < poseTriangulationsWs.size() ; i++)
519  {
520  const auto poseTriangulation = std::make_shared<PoseTriangulation>(
521  wrapperStructExtra.minViews3d);
522  poseTriangulationsWs.at(i) = {std::make_shared<WPoseTriangulation<TDatumsSP>>(
523  poseTriangulation)};
524  }
525  }
526  // Itermediate workers (e.g. OpenPose format to cv::Mat, json & frames recorder, ...)
527  postProcessingWs.clear();
528  // // Person ID identification (when no multi-thread and no dependency on tracking)
529  // if (wrapperStructExtra.identification)
530  // {
531  // const auto personIdExtractor = std::make_shared<PersonIdExtractor>();
532  // postProcessingWs.emplace_back(
533  // std::make_shared<WPersonIdExtractor<TDatumsSP>>(personIdExtractor)
534  // );
535  // }
536  // Frames processor (OpenPose format -> cv::Mat format)
537  if (renderOutput)
538  {
539  postProcessingWs = mergeVectors(postProcessingWs, cpuRenderers);
540  const auto opOutputToCvMat = std::make_shared<OpOutputToCvMat>();
541  postProcessingWs.emplace_back(std::make_shared<WOpOutputToCvMat<TDatumsSP>>(opOutputToCvMat));
542  }
543  // Re-scale pose if desired
544  // If desired scale is not the current input
545  if (wrapperStructPose.keypointScale != ScaleMode::InputResolution
546  // and desired scale is not output when size(input) = size(output)
547  && !(wrapperStructPose.keypointScale == ScaleMode::OutputResolution &&
548  (finalOutputSize == producerSize || finalOutputSize.x <= 0 || finalOutputSize.y <= 0))
549  // and desired scale is not net output when size(input) = size(net output)
550  && !(wrapperStructPose.keypointScale == ScaleMode::NetOutputResolution
551  && producerSize == wrapperStructPose.netInputSize))
552  {
553  // Then we must rescale the keypoints
554  auto keypointScaler = std::make_shared<KeypointScaler>(wrapperStructPose.keypointScale);
555  postProcessingWs.emplace_back(std::make_shared<WKeypointScaler<TDatumsSP>>(keypointScaler));
556  }
557  }
558 
559  // IK/Adam
560  const auto displayAdam = wrapperStructOutput.displayMode == DisplayMode::DisplayAdam
561  || (wrapperStructOutput.displayMode == DisplayMode::DisplayAll
562  && wrapperStructExtra.ikThreads > 0);
563  jointAngleEstimationsWs.clear();
564 #ifdef USE_3D_ADAM_MODEL
565  if (wrapperStructExtra.ikThreads > 0)
566  {
567  jointAngleEstimationsWs.resize(wrapperStructExtra.ikThreads);
568  // Pose extractor(s)
569  for (auto i = 0u; i < jointAngleEstimationsWs.size(); i++)
570  {
571  const auto jointAngleEstimation = std::make_shared<JointAngleEstimation>(displayAdam);
572  jointAngleEstimationsWs.at(i) = {std::make_shared<WJointAngleEstimation<TDatumsSP>>(
573  jointAngleEstimation)};
574  }
575  }
576 #endif
577 
578  // Output workers
579  outputWs.clear();
580  // Send information (e.g., to Unity) though UDP client-server communication
581 #ifdef USE_3D_ADAM_MODEL
582  if (!wrapperStructOutput.udpHost.empty() && !wrapperStructOutput.udpPort.empty())
583  {
584  const auto udpSender = std::make_shared<UdpSender>(wrapperStructOutput.udpHost,
585  wrapperStructOutput.udpPort);
586  outputWs.emplace_back(std::make_shared<WUdpSender<TDatumsSP>>(udpSender));
587  }
588 #endif
589  // Write people pose data on disk (json for OpenCV >= 3, xml, yml...)
590  if (!writeKeypointCleaned.empty())
591  {
592  const auto keypointSaver = std::make_shared<KeypointSaver>(writeKeypointCleaned,
593  wrapperStructOutput.writeKeypointFormat);
594  outputWs.emplace_back(std::make_shared<WPoseSaver<TDatumsSP>>(keypointSaver));
595  if (wrapperStructFace.enable)
596  outputWs.emplace_back(std::make_shared<WFaceSaver<TDatumsSP>>(keypointSaver));
597  if (wrapperStructHand.enable)
598  outputWs.emplace_back(std::make_shared<WHandSaver<TDatumsSP>>(keypointSaver));
599  }
600  // Write OpenPose output data on disk in json format (body/hand/face keypoints, body part locations if
601  // enabled, etc.)
602  if (!writeJsonCleaned.empty())
603  {
604  const auto peopleJsonSaver = std::make_shared<PeopleJsonSaver>(writeJsonCleaned);
605  outputWs.emplace_back(std::make_shared<WPeopleJsonSaver<TDatumsSP>>(peopleJsonSaver));
606  }
607  // Write people pose data on disk (COCO validation json format)
608  if (!wrapperStructOutput.writeCocoJson.empty())
609  {
610  // If humanFormat: bigger size (& maybe slower to process), but easier for user to read it
611  const auto humanFormat = true;
612  const auto cocoJsonSaver = std::make_shared<CocoJsonSaver>(wrapperStructOutput.writeCocoJson,
613  humanFormat, CocoJsonFormat::Body);
614  outputWs.emplace_back(std::make_shared<WCocoJsonSaver<TDatumsSP>>(cocoJsonSaver));
615  }
616  // Write people foot pose data on disk (COCO validation json format for foot data)
617  if (!wrapperStructOutput.writeCocoFootJson.empty())
618  {
619  // If humanFormat: bigger size (& maybe slower to process), but easier for user to read it
620  const auto humanFormat = true;
621  const auto cocoJsonSaver = std::make_shared<CocoJsonSaver>(wrapperStructOutput.writeCocoFootJson,
622  humanFormat, CocoJsonFormat::Foot);
623  outputWs.emplace_back(std::make_shared<WCocoJsonSaver<TDatumsSP>>(cocoJsonSaver));
624  }
625  // Write frames as desired image format on hard disk
626  if (!writeImagesCleaned.empty())
627  {
628  const auto imageSaver = std::make_shared<ImageSaver>(writeImagesCleaned,
629  wrapperStructOutput.writeImagesFormat);
630  outputWs.emplace_back(std::make_shared<WImageSaver<TDatumsSP>>(imageSaver));
631  }
632  // Write frames as *.avi video on hard disk
633  const auto producerFps = (wrapperStructInput.producerSharedPtr == nullptr ?
634  0. : wrapperStructInput.producerSharedPtr->get(CV_CAP_PROP_FPS));
635  const auto originalVideoFps = (wrapperStructOutput.writeVideoFps > 0 ?
636  wrapperStructOutput.writeVideoFps
637  : producerFps);
638  if (!wrapperStructOutput.writeVideo.empty())
639  {
640  if (!oPProducer)
641  error("Video file can only be recorded inside `wrapper/wrapper.hpp` if the producer"
642  " is one of the default ones (e.g. video, webcam, ...).",
643  __LINE__, __FUNCTION__, __FILE__);
644  if (finalOutputSize.x <= 0 || finalOutputSize.y <= 0)
645  error("Video can only be recorded if outputSize is fixed (e.g. video, webcam, IP camera),"
646  "but not for a image directory.", __LINE__, __FUNCTION__, __FILE__);
647  const auto videoSaver = std::make_shared<VideoSaver>(
648  wrapperStructOutput.writeVideo, CV_FOURCC('M','J','P','G'), originalVideoFps, finalOutputSize
649  );
650  outputWs.emplace_back(std::make_shared<WVideoSaver<TDatumsSP>>(videoSaver));
651  }
652  // Write joint angles as *.bvh file on hard disk
653 #ifdef USE_3D_ADAM_MODEL
654  if (!wrapperStructOutput.writeBvh.empty())
655  {
656  const auto bvhSaver = std::make_shared<BvhSaver>(
657  wrapperStructOutput.writeBvh, JointAngleEstimation::getTotalModel(), originalVideoFps
658  );
659  outputWs.emplace_back(std::make_shared<WBvhSaver<TDatumsSP>>(bvhSaver));
660  }
661 #endif
662  // Write heat maps as desired image format on hard disk
663  if (!writeHeatMapsCleaned.empty())
664  {
665  const auto heatMapSaver = std::make_shared<HeatMapSaver>(writeHeatMapsCleaned,
666  wrapperStructOutput.writeHeatMapsFormat);
667  outputWs.emplace_back(std::make_shared<WHeatMapSaver<TDatumsSP>>(heatMapSaver));
668  }
669  // Add frame information for GUI
670  const bool guiEnabled = (wrapperStructOutput.displayMode != DisplayMode::NoDisplay);
671  // If this WGuiInfoAdder instance is placed before the WImageSaver or WVideoSaver, then the resulting
672  // recorded frames will look exactly as the final displayed image by the GUI
673  if (wrapperStructOutput.guiVerbose && (guiEnabled || !userOutputWs.empty()
674  || threadManagerMode == ThreadManagerMode::Asynchronous
675  || threadManagerMode == ThreadManagerMode::AsynchronousOut))
676  {
677  const auto guiInfoAdder = std::make_shared<GuiInfoAdder>(numberThreads, guiEnabled);
678  outputWs.emplace_back(std::make_shared<WGuiInfoAdder<TDatumsSP>>(guiInfoAdder));
679  }
680  // Minimal graphical user interface (GUI)
681  guiW = nullptr;
682  if (guiEnabled)
683  {
684  // PoseRenderers to Renderers
685  std::vector<std::shared_ptr<Renderer>> renderers;
686  if (wrapperStructPose.renderMode == RenderMode::Cpu)
687  renderers.emplace_back(std::static_pointer_cast<Renderer>(poseCpuRenderer));
688  else
689  for (const auto& poseGpuRenderer : poseGpuRenderers)
690  renderers.emplace_back(std::static_pointer_cast<Renderer>(poseGpuRenderer));
691  // Display
692  // Adam (+3-D/2-D) display
693  if (displayAdam)
694  {
695 #ifdef USE_3D_ADAM_MODEL
696  // Gui
697  const auto gui = std::make_shared<GuiAdam>(
698  finalOutputSize, wrapperStructOutput.fullScreen, threadManager.getIsRunningSharedPtr(),
699  spVideoSeek, poseExtractorNets, faceExtractorNets, handExtractorNets, renderers,
700  wrapperStructOutput.displayMode, JointAngleEstimation::getTotalModel(),
701  wrapperStructOutput.writeVideoAdam
702  );
703  // WGui
704  guiW = {std::make_shared<WGuiAdam<TDatumsSP>>(gui)};
705 #endif
706  }
707  // 3-D (+2-D) display
708  else if (wrapperStructOutput.displayMode == DisplayMode::Display3D
709  || wrapperStructOutput.displayMode == DisplayMode::DisplayAll)
710  {
711  // Gui
712  const auto gui = std::make_shared<Gui3D>(
713  finalOutputSize, wrapperStructOutput.fullScreen, threadManager.getIsRunningSharedPtr(),
714  spVideoSeek, poseExtractorNets, faceExtractorNets, handExtractorNets, renderers,
715  wrapperStructPose.poseModel, wrapperStructOutput.displayMode
716  );
717  // WGui
718  guiW = {std::make_shared<WGui3D<TDatumsSP>>(gui)};
719  }
720  // 2-D display
721  else if (wrapperStructOutput.displayMode == DisplayMode::Display2D)
722  {
723  // Gui
724  const auto gui = std::make_shared<Gui>(
725  finalOutputSize, wrapperStructOutput.fullScreen, threadManager.getIsRunningSharedPtr(),
726  spVideoSeek, poseExtractorNets, faceExtractorNets, handExtractorNets, renderers
727  );
728  // WGui
729  guiW = {std::make_shared<WGui<TDatumsSP>>(gui)};
730  }
731  else
732  error("Unknown DisplayMode.", __LINE__, __FUNCTION__, __FILE__);
733  }
734  // Set wrapper as configured
735  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
736 
737 
738 
739 
740 
741  // The less number of queues -> the less threads opened, and potentially the less lag
742 
743  // Sanity checks
744  if ((datumProducerW == nullptr) == (userInputWs.empty())
745  && threadManagerMode != ThreadManagerMode::Asynchronous
746  && threadManagerMode != ThreadManagerMode::AsynchronousIn)
747  {
748  const auto message = "You need to have 1 and only 1 producer selected. You can introduce your own"
749  " producer by using setWorker(WorkerType::Input, ...) or use the OpenPose"
750  " default producer by configuring it in the configure function) or use the"
751  " ThreadManagerMode::Asynchronous(In) mode.";
752  error(message, __LINE__, __FUNCTION__, __FILE__);
753  }
754  if (outputWs.empty() && userOutputWs.empty() && guiW == nullptr
755  && threadManagerMode != ThreadManagerMode::Asynchronous
756  && threadManagerMode != ThreadManagerMode::AsynchronousOut)
757  {
758  error("No output selected.", __LINE__, __FUNCTION__, __FILE__);
759  }
760 
761  // Thread Manager
762  // Clean previous thread manager (avoid configure to crash the program if used more than once)
763  threadManager.reset();
764  unsigned long long threadId = 0ull;
765  auto queueIn = 0ull;
766  auto queueOut = 1ull;
767  // After producer
768  // ID generator (before any multi-threading or any function that requires the ID)
769  const auto wIdGenerator = std::make_shared<WIdGenerator<TDatumsSP>>();
770  std::vector<TWorker> workersAux{wIdGenerator};
771  // Scale & cv::Mat to OP format
772  if (scaleAndSizeExtractorW != nullptr)
773  workersAux = mergeVectors(workersAux, {scaleAndSizeExtractorW});
774  if (cvMatToOpInputW != nullptr)
775  workersAux = mergeVectors(workersAux, {cvMatToOpInputW});
776  // cv::Mat to output format
777  if (cvMatToOpOutputW != nullptr)
778  workersAux = mergeVectors(workersAux, {cvMatToOpOutputW});
779 
780  // Producer
781  // If custom user Worker and uses its own thread
782  if (!userInputWs.empty() && userInputWsOnNewThread)
783  {
784  // Thread 0, queues 0 -> 1
785  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
786  threadManager.add(threadId, userInputWs, queueIn++, queueOut++);
787  threadIdPP(threadId, multiThreadEnabled);
788  }
789  // If custom user Worker in same thread
790  else if (!userInputWs.empty())
791  workersAux = mergeVectors(userInputWs, workersAux);
792  // If OpenPose producer (same thread)
793  else if (datumProducerW != nullptr)
794  workersAux = mergeVectors({datumProducerW}, workersAux);
795  // Otherwise
796  else if (threadManagerMode != ThreadManagerMode::Asynchronous
797  && threadManagerMode != ThreadManagerMode::AsynchronousIn)
798  error("No input selected.", __LINE__, __FUNCTION__, __FILE__);
799  // Thread 0 or 1, queues 0 -> 1
800  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
801  threadManager.add(threadId, workersAux, queueIn++, queueOut++);
802  // Increase thread
803  threadIdPP(threadId, multiThreadEnabled);
804 
805  // Pose estimation & rendering
806  // Thread 1 or 2...X, queues 1 -> 2, X = 2 + #GPUs
807  if (!poseExtractorsWs.empty())
808  {
809  if (multiThreadEnabled)
810  {
811  for (auto& wPose : poseExtractorsWs)
812  {
813  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
814  threadManager.add(threadId, wPose, queueIn, queueOut);
815  threadIdPP(threadId, multiThreadEnabled);
816  }
817  queueIn++;
818  queueOut++;
819  // Sort frames - Required own thread
820  if (poseExtractorsWs.size() > 1u)
821  {
822  const auto wQueueOrderer = std::make_shared<WQueueOrderer<TDatumsSP>>();
823  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
824  threadManager.add(threadId, wQueueOrderer, queueIn++, queueOut++);
825  threadIdPP(threadId, multiThreadEnabled);
826  }
827  }
828  else
829  {
830  if (poseExtractorsWs.size() > 1)
831  log("Multi-threading disabled, only 1 thread running. All GPUs have been disabled but the"
832  " first one, which is defined by gpuNumberStart (e.g. in the OpenPose demo, it is set"
833  " with the `--num_gpu_start` flag).", Priority::High);
834  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
835  threadManager.add(threadId, poseExtractorsWs.at(0), queueIn++, queueOut++);
836  }
837  }
838  // Assemble all frames from same time instant (3-D module)
839  const auto wQueueAssembler = std::make_shared<WQueueAssembler<TDatumsSP, TDatums>>();
840  // 3-D reconstruction
841  if (!poseTriangulationsWs.empty())
842  {
843  // Assemble frames
844  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
845  threadManager.add(threadId, wQueueAssembler, queueIn++, queueOut++);
846  threadIdPP(threadId, multiThreadEnabled);
847  // 3-D reconstruction
848  if (multiThreadEnabled)
849  {
850  for (auto& wPoseTriangulations : poseTriangulationsWs)
851  {
852  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
853  threadManager.add(threadId, wPoseTriangulations, queueIn, queueOut);
854  threadIdPP(threadId, multiThreadEnabled);
855  }
856  queueIn++;
857  queueOut++;
858  // Sort frames
859  if (poseTriangulationsWs.size() > 1u)
860  {
861  const auto wQueueOrderer = std::make_shared<WQueueOrderer<TDatumsSP>>();
862  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
863  threadManager.add(threadId, wQueueOrderer, queueIn++, queueOut++);
864  threadIdPP(threadId, multiThreadEnabled);
865  }
866  }
867  else
868  {
869  if (poseTriangulationsWs.size() > 1)
870  log("Multi-threading disabled, only 1 thread running for 3-D triangulation.",
872  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
873  threadManager.add(threadId, poseTriangulationsWs.at(0), queueIn++, queueOut++);
874  }
875  }
876  else
877  postProcessingWs = mergeVectors({wQueueAssembler}, postProcessingWs);
878  // Adam/IK step
879  if (!jointAngleEstimationsWs.empty())
880  {
881  if (multiThreadEnabled)
882  {
883  for (auto& wJointAngleEstimator : jointAngleEstimationsWs)
884  {
885  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
886  threadManager.add(threadId, wJointAngleEstimator, queueIn, queueOut);
887  threadIdPP(threadId, multiThreadEnabled);
888  }
889  queueIn++;
890  queueOut++;
891  // Sort frames
892  if (jointAngleEstimationsWs.size() > 1)
893  {
894  const auto wQueueOrderer = std::make_shared<WQueueOrderer<TDatumsSP>>();
895  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
896  threadManager.add(threadId, wQueueOrderer, queueIn++, queueOut++);
897  threadIdPP(threadId, multiThreadEnabled);
898  }
899  }
900  else
901  {
902  if (jointAngleEstimationsWs.size() > 1)
903  log("Multi-threading disabled, only 1 thread running for joint angle estimation.",
905  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
906  threadManager.add(threadId, jointAngleEstimationsWs.at(0), queueIn++, queueOut++);
907  }
908  }
909  // Post processing workers
910  if (!postProcessingWs.empty())
911  {
912  // Combining postProcessingWs and outputWs
913  outputWs = mergeVectors(postProcessingWs, outputWs);
914  // // If I wanna split them
915  // log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
916  // threadManager.add(threadId, postProcessingWs, queueIn++, queueOut++);
917  // threadIdPP(threadId, multiThreadEnabled);
918  }
919  // If custom user Worker and uses its own thread
920  if (!userPostProcessingWs.empty())
921  {
922  // If custom user Worker in its own thread
923  if (userPostProcessingWsOnNewThread)
924  {
925  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
926  threadManager.add(threadId, userPostProcessingWs, queueIn++, queueOut++);
927  threadIdPP(threadId, multiThreadEnabled);
928  }
929  // If custom user Worker in same thread
930  // Merge with outputWs
931  else
932  outputWs = mergeVectors(outputWs, userPostProcessingWs);
933  }
934  // Output workers
935  if (!outputWs.empty())
936  {
937  // Thread 4 or 5, queues 4 -> 5
938  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
939  threadManager.add(threadId, outputWs, queueIn++, queueOut++);
940  threadIdPP(threadId, multiThreadEnabled);
941  }
942  // User output worker
943  // Thread Y, queues Q -> Q+1
944  if (!userOutputWs.empty())
945  {
946  if (userOutputWsOnNewThread)
947  {
948  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
949  threadManager.add(threadId, userOutputWs, queueIn++, queueOut++);
950  threadIdPP(threadId, multiThreadEnabled);
951  }
952  else
953  {
954  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
955  threadManager.add(threadId-1, userOutputWs, queueIn++, queueOut++);
956  }
957  }
958  // OpenPose GUI
959  if (guiW != nullptr)
960  {
961  // Thread Y+1, queues Q+1 -> Q+2
962  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
963  threadManager.add(threadId, guiW, queueIn++, queueOut++);
964  threadIdPP(threadId, multiThreadEnabled);
965  }
966  log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
967  }
968  catch (const std::exception& e)
969  {
970  error(e.what(), __LINE__, __FUNCTION__, __FILE__);
971  }
972  }
973 }
974 
975 #endif // OPENPOSE_WRAPPER_WRAPPER_AUXILIARY_HPP
OP_API void threadIdPP(unsigned long long &threadId, const bool multiThreadEnabled)
Definition: wHandDetectorTracking.hpp:11
Definition: wGuiInfoAdder.hpp:11
std::array< T, N > array
Definition: cl2.hpp:594
float alphaHeatMap
Definition: wrapperStructFace.hpp:46
bool guiVerbose
Definition: wrapperStructOutput.hpp:31
std::string writeHeatMapsFormat
Definition: wrapperStructOutput.hpp:106
Definition: wPoseSaver.hpp:12
Definition: wrapperStructPose.hpp:17
std::string writeKeypoint
Definition: wrapperStructOutput.hpp:44
float alphaKeypoint
Definition: wrapperStructFace.hpp:39
bool frameFlip
Definition: wrapperStructInput.hpp:49
Definition: wFaceSaver.hpp:12
float alphaKeypoint
Definition: wrapperStructHand.hpp:61
int tracking
Definition: wrapperStructExtra.hpp:39
Definition: wPeopleJsonSaver.hpp:11
Definition: wHandRenderer.hpp:11
double writeVideoFps
Definition: wrapperStructOutput.hpp:111
Definition: wImageSaver.hpp:11
std::string udpPort
Definition: wrapperStructOutput.hpp:135
std::string writeImages
Definition: wrapperStructOutput.hpp:78
std::shared_ptr< std::atomic< bool > > getIsRunningSharedPtr()
Definition: threadManager.hpp:40
T fastMax(const T a, const T b)
Definition: fastMath.hpp:70
float alphaHeatMap
Definition: wrapperStructHand.hpp:68
bool framesRepeat
Definition: wrapperStructInput.hpp:60
Definition: wKeypointScaler.hpp:11
int minViews3d
Definition: wrapperStructExtra.hpp:27
std::string writeHeatMaps
Definition: wrapperStructOutput.hpp:100
std::string writeCocoJson
Definition: wrapperStructOutput.hpp:67
void add(const unsigned long long threadId, const std::vector< TWorker > &tWorkers, const unsigned long long queueInId, const unsigned long long queueOutId)
Definition: threadManager.hpp:125
Definition: wFaceDetector.hpp:11
void reset()
Definition: threadManager.hpp:157
Definition: wrapperStructFace.hpp:15
OP_API void wrapperConfigureSanityChecks(WrapperStructPose &wrapperStructPose, const WrapperStructFace &wrapperStructFace, const WrapperStructHand &wrapperStructHand, const WrapperStructExtra &wrapperStructExtra, const WrapperStructInput &wrapperStructInput, const WrapperStructOutput &wrapperStructOutput, const bool renderOutput, const bool userOutputWsEmpty, const ThreadManagerMode threadManagerMode)
OP_API void error(const std::string &message, const int line=-1, const std::string &function="", const std::string &file="")
bool fullScreen
Definition: wrapperStructOutput.hpp:37
std::string udpHost
Definition: wrapperStructOutput.hpp:130
DataFormat writeKeypointFormat
Definition: wrapperStructOutput.hpp:51
Definition: wFaceExtractorNet.hpp:11
std::string writeCocoFootJson
Definition: wrapperStructOutput.hpp:72
RenderMode renderMode
Definition: wrapperStructPose.hpp:81
float scaleRange
Definition: wrapperStructHand.hpp:41
OP_API GpuMode getGpuMode()
RenderMode renderMode
Definition: wrapperStructFace.hpp:33
bool tracking
Definition: wrapperStructHand.hpp:49
bool enable
Definition: wrapperStructFace.hpp:20
Definition: wFaceDetectorOpenCV.hpp:11
Definition: wVideoSaver.hpp:11
Definition: wUdpSender.hpp:11
Definition: wOpOutputToCvMat.hpp:11
int ikThreads
Definition: wrapperStructExtra.hpp:46
bool realTimeProcessing
Definition: wrapperStructInput.hpp:44
OP_API std::string formatAsDirectory(const std::string &directoryPathString)
Definition: wHandDetectorUpdate.hpp:11
Definition: wrapperStructInput.hpp:14
unsigned long long frameStep
Definition: wrapperStructInput.hpp:33
Definition: wHandExtractorNet.hpp:11
std::string writeBvh
Definition: wrapperStructOutput.hpp:125
Definition: wFaceRenderer.hpp:11
std::shared_ptr< Producer > producerSharedPtr
Definition: wrapperStructInput.hpp:20
Definition: wPoseRenderer.hpp:11
std::string writeJson
Definition: wrapperStructOutput.hpp:61
OP_API void log(const std::string &message, const Priority priority=Priority::Max, const int line=-1, const std::string &function="", const std::string &file="")
Definition: wCocoJsonSaver.hpp:11
Definition: wrapperStructExtra.hpp:13
Definition: wrapperStructHand.hpp:15
float renderThreshold
Definition: wrapperStructHand.hpp:76
Definition: poseGpuRenderer.hpp:13
Definition: wHandDetector.hpp:11
std::string writeVideoAdam
Definition: wrapperStructOutput.hpp:118
std::vector< T > mergeVectors(const std::vector< T > &vectorA, const std::vector< T > &vectorB)
Definition: standard.hpp:40
Definition: wHandSaver.hpp:12
std::string writeVideo
Definition: wrapperStructOutput.hpp:93
unsigned long long frameLast
Definition: wrapperStructInput.hpp:39
bool identification
Definition: wrapperStructExtra.hpp:32
DisplayMode displayMode
Definition: wrapperStructOutput.hpp:25
Point< int > netInputSize
Definition: wrapperStructFace.hpp:27
void setSharedParametersAndIfLast(const std::tuple< std::shared_ptr< float * >, std::shared_ptr< bool >, std::shared_ptr< std::atomic< unsigned int >>, std::shared_ptr< std::atomic< unsigned long long >>, std::shared_ptr< const unsigned int >> &tuple, const bool isLast)
Definition: wHeatMapSaver.hpp:11
bool enable
Definition: wrapperStructHand.hpp:20
ThreadManagerMode
Definition: enumClasses.hpp:9
OP_API void configureThreadManager(ThreadManager< TDatumsSP > &threadManager, const bool multiThreadEnabled, const ThreadManagerMode threadManagerMode, const WrapperStructPose &wrapperStructPose, const WrapperStructFace &wrapperStructFace, const WrapperStructHand &wrapperStructHand, const WrapperStructExtra &wrapperStructExtra, const WrapperStructInput &wrapperStructInput, const WrapperStructOutput &wrapperStructOutput, const std::array< std::vector< TWorker >, int(WorkerType::Size)> &userWs, const std::array< bool, int(WorkerType::Size)> &userWsOnNewThread)
Definition: wrapperAuxiliary.hpp:86
#define OP_API
Definition: macros.hpp:18
float renderThreshold
Definition: wrapperStructFace.hpp:54
int frameRotate
Definition: wrapperStructInput.hpp:55
std::string writeImagesFormat
Definition: wrapperStructOutput.hpp:86
unsigned long long frameFirst
Definition: wrapperStructInput.hpp:26
Definition: wrapperStructOutput.hpp:14
bool reconstruct3d
Definition: wrapperStructExtra.hpp:21
OP_API int getGpuNumber()
Point< int > netInputSize
Definition: wrapperStructHand.hpp:27
int scalesNumber
Definition: wrapperStructHand.hpp:35
RenderMode renderMode
Definition: wrapperStructHand.hpp:55