diff --git a/.github/issue_template.md b/.github/issue_template.md index fb524ae6c2b1199d5287db04750a8139bb9d0b71..24ad1ed242857f9617605532a86e4874f80f969d 100644 --- a/.github/issue_template.md +++ b/.github/issue_template.md @@ -1,5 +1,5 @@ ### Posting rules -1. **Duplicated posts will not be answered**. Check the [FAQ](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/faq.md) section, other GitHub issues, and general documentation before posting. E.g. **low-speed, out-of-memory, output format, 0-people detected, installation issues, ...**). +1. **Duplicated posts will not be answered**. Check the [FAQ](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/faq.md) section, other GitHub issues, and general documentation before posting. E.g., **low-speed, out-of-memory, output format, 0-people detected, installation issues, ...**). 2. **Fill** the **Your System Configuration section (all of it or it will not be answered!)** if you are facing an error or unexpected behavior. Feature requests or some other type of posts might not require it. 3. **No questions about training or 3rd party libraries**: - OpenPose only implements testing. diff --git a/README.md b/README.md index 786c13f7773e8a34020df134d84ed43244c2de69..549ee2db1b39c48fd4addb3972b290a0a8280d37 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ Otherwise, check [doc/installation.md](doc/installation.md) for instructions on ## Quick Start Most users do not need the OpenPose C++/Python API, but can simply use the OpenPose Demo: -- **OpenPose Demo**: To easily process images/video/webcam and display/save the results. See [doc/demo_overview.md](doc/demo_overview.md). E.g. run OpenPose in a video with: +- **OpenPose Demo**: To easily process images/video/webcam and display/save the results. See [doc/demo_overview.md](doc/demo_overview.md). E.g., run OpenPose in a video with: ``` # Ubuntu ./build/examples/openpose/openpose.bin --video examples/media/video.avi @@ -121,7 +121,7 @@ bin\OpenPoseDemo.exe --video examples\media\video.avi - **Standalone face or hand detector**: - **Face** keypoint detection **without body** keypoint detection: If you want to speed it up (but also reduce amount of detected faces), check the OpenCV-face-detector approach in [doc/standalone_face_or_hand_keypoint_detector.md](doc/standalone_face_or_hand_keypoint_detector.md). - - **Use your own face/hand detector**: You can use the hand and/or face keypoint detectors with your own face or hand detectors, rather than using the body detector. E.g. useful for camera views at which the hands are visible but not the body (OpenPose detector would fail). See [doc/standalone_face_or_hand_keypoint_detector.md](doc/standalone_face_or_hand_keypoint_detector.md). + - **Use your own face/hand detector**: You can use the hand and/or face keypoint detectors with your own face or hand detectors, rather than using the body detector. E.g., useful for camera views at which the hands are visible but not the body (OpenPose detector would fail). See [doc/standalone_face_or_hand_keypoint_detector.md](doc/standalone_face_or_hand_keypoint_detector.md). diff --git a/doc/demo_overview.md b/doc/demo_overview.md index 58647779a6ffd3f7948ccf4234fb5caf49ad80cb..cfdcc5901058fc02d29a860cf00db9222ef6f5ac 100644 --- a/doc/demo_overview.md +++ b/doc/demo_overview.md @@ -132,7 +132,7 @@ Each flag is divided into flag name, default value, and description. 1. Debugging/Other - DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for low priority messages and 4 for important ones."); -- DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the error."); +- DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful for 1) Cases where it is needed a low latency (e.g., webcam in real-time scenarios with low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the error."); - DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some runtime statistics at this frame number."); 2. Producer @@ -146,11 +146,11 @@ Each flag is divided into flag name, default value, and description. - DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP."); - DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0."); - DEFINE_uint64(frame_step, 1, "Step or gap between processed frames. E.g., `--frame_step 5` would read and process frames 0, 5, 10, etc.."); -- DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to 10, it will process 11 frames (0-10)."); -- DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g. for real time webcam demonstrations)."); +- DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g., if set to 10, it will process 11 frames (0-10)."); +- DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g., for real time webcam demonstrations)."); - DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270."); - DEFINE_bool(frames_repeat, false, "Repeat frames when finished."); -- DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is too long, it will skip frames. If it is too fast, it will slow it down."); +- DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g., for video). If the processing time is too long, it will skip frames. If it is too fast, it will slow it down."); - DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located."); - DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the `camera_parameter_folder` camera parameters; if true, it will not undistortionate, i.e., it will leave it as it is."); @@ -164,8 +164,8 @@ Each flag is divided into flag name, default value, and description. 4. OpenPose Body Pose - DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face keypoint detection."); -- DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), `MPI_4_layers` (15 keypoints, even faster but less accurate)."); -- DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is decreased, the speed increases. For maximum speed-accuracy balance, it should keep the closest aspect ratio possible to the images or videos to be processed. Using `-1` in any of the dimensions, OP will choose the optimal resolution depending on the other value introduced by the user. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 videos, e.g. full HD (1980x1080) and HD (1280x720) resolutions."); +- DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g., `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), `MPI_4_layers` (15 keypoints, even faster but less accurate)."); +- DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is decreased, the speed increases. For maximum speed-accuracy balance, it should keep the closest aspect ratio possible to the images or videos to be processed. Using `-1` in any of the dimensions, OP will choose the optimal resolution depending on the other value introduced by the user. E.g., the default `-1x368` is equivalent to `656x368` in 16:9 videos, e.g., full HD (1980x1080) and HD (1280x720) resolutions."); - DEFINE_int32(scale_number, 1, "Number of scales to average."); - DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1. If you want to change the initial scale, you actually want to multiply the `net_resolution` by your desired initial scale."); @@ -177,14 +177,14 @@ Each flag is divided into flag name, default value, and description. - DEFINE_bool(part_candidates, false, "Also enable `write_json` in order to save this information. If true, it will fill the op::Datum::poseCandidates array with the body part candidates. Candidates refer to all the detected body parts, before being assembled into people. Note that the number of candidates is equal or higher than the number of final body parts (i.e. after being assembled into people). The empty body parts are filled with 0s. Program speed will slightly decrease. Not required for OpenPose, enable it only if you intend to explicitly use this information."); 6. OpenPose Face -- DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g. `model_folder`. Note that this will considerable slow down the performance and increse the required GPU memory. In addition, the greater number of people on the image, the slower OpenPose will be."); +- DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g., `model_folder`. Note that this will considerable slow down the performance and increse the required GPU memory. In addition, the greater number of people on the image, the slower OpenPose will be."); - DEFINE_string(face_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the face keypoint detector. 320x320 usually works fine while giving a substantial speed up when multiple faces on the image."); 7. OpenPose Hand -- DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g. `model_folder`. Analogously to `--face`, it will also slow down the performance, increase the required GPU memory and its speed depends on the number of people."); +- DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g., `model_folder`. Analogously to `--face`, it will also slow down the performance, increase the required GPU memory and its speed depends on the number of people."); - DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the hand keypoint detector."); - DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4."); -- DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2."); +- DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range between smallest and biggest scale. The scales will be centered in ratio 1. E.g., if scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2."); - DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it simply looks for hands in positions at which hands were located in previous frames, but it does not guarantee the same person ID among frames."); 8. OpenPose 3-D Reconstruction @@ -198,7 +198,7 @@ Each flag is divided into flag name, default value, and description. 10. OpenPose Rendering Pose - DEFINE_double(render_threshold, 0.05, "Only estimated keypoints whose score confidences are higher than this threshold will be rendered. Generally, a high threshold (> 0.5) will only render very clear body parts; while small thresholds (~0.1) will also output guessed and occluded keypoints, but also more false positives (i.e. wrong detections)."); -- DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render both `outputData` and `cvOutputData` with the original image and desired body part to be shown (i.e. keypoints, heat maps or PAFs)."); +- DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering (slower but greater functionality, e.g., `alpha_X` flags). If -1, it will pick CPU if CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render both `outputData` and `cvOutputData` with the original image and desired body part to be shown (i.e. keypoints, heat maps or PAFs)."); - DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will hide it. Only valid for GPU rendering."); - DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the heatmap, 0 will only show the frame. Only valid for GPU rendering."); @@ -216,16 +216,17 @@ Each flag is divided into flag name, default value, and description. 13. Display - DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle)."); -- DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g. number of current frame and people). It does not affect the pose rendering."); +- DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g., number of current frame and people). It does not affect the pose rendering."); - DEFINE_int32(display, -1, "Display mode: -1 for automatic selection; 0 for no display (useful if there is no X server and/or to slightly speed up the processing if visual output is not required); 2 for 2-D display; 3 for 3-D display (if `--3d` enabled); and 1 for both 2-D and 3-D display."); 14. Result Saving - DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format."); -- DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV function cv::imwrite for all compatible extensions."); +- DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g., png, jpg or bmp. Check the OpenCV function cv::imwrite for all compatible extensions."); - DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag `camera_fps` controls FPS."); - DEFINE_string(write_json, "", "Directory to write OpenPose output in JSON format. It includes body, hand, and face pose keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled)."); - DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format."); - DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format."); +- DEFINE_int32(write_coco_json_variant, 0, "Currently, this option is experimental and only makes effect on car JSON generation. It selects the COCO variant for cocoJsonSaver."); - DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag must be enabled."); - DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`. For lossless compression, recommended `png` for integer `heatmaps_scale` and `float` for floating values."); - DEFINE_string(write_keypoint, "", "(Deprecated, use `write_json`) Directory to write the people pose keypoint data. Set format with `write_keypoint_format`."); diff --git a/doc/faq.md b/doc/faq.md index c68654263cd3ec864f52b25a9e1115c0554f7dd1..c9f407e3f93d028b322e4d26702995059941bcee 100644 --- a/doc/faq.md +++ b/doc/faq.md @@ -35,7 +35,7 @@ OpenPose - Frequently Asked Question (FAQ) **A**: Check the [OpenPose Benchmark](https://docs.google.com/spreadsheets/d/1-DynFGvoScvfWDA1P4jDInCkbD4lg0IKOYbXgEq0sK0/edit#gid=0) to discover the approximate speed of your graphics card. Some speed tips: 1. Use cuDNN 5.1 (cuDNN 6 is ~10% slower). - 2. Reduce the `--net_resolution` (e.g. to 320x176) (lower accuracy). Note: For maximum accuracy, follow [doc/quick_start.md#maximum-accuracy-configuration](./quick_start.md#maximum-accuracy-configuration). + 2. Reduce the `--net_resolution` (e.g., to 320x176) (lower accuracy). Note: For maximum accuracy, follow [doc/quick_start.md#maximum-accuracy-configuration](./quick_start.md#maximum-accuracy-configuration). 3. For face, reduce the `--face_net_resolution`. The resolution 320x320 usually works pretty decently. 4. Use the `MPI_4_layers` model (lower accuracy and lower number of parts). 5. Change GPU rendering by CPU rendering to get approximately +0.5 FPS (`--render_pose 1`). diff --git a/doc/installation.md b/doc/installation.md index eaa449eed43ea737ca15585020df510a7b996f70..949a27e5fb733a5aa2b69be0b71eb9bec08234f7 100644 --- a/doc/installation.md +++ b/doc/installation.md @@ -213,7 +213,7 @@ Check OpenPose was properly installed by running it on the default images, video ### OpenPose from other Projects (Ubuntu and Mac) If you only intend to use the OpenPose demo, you might skip this step. This step is only recommended if you plan to use the OpenPose API from other projects. -To install the OpenPose headers and libraries into the system environment path (e.g. `/usr/local/` or `/usr/`), run the following command. +To install the OpenPose headers and libraries into the system environment path (e.g., `/usr/local/` or `/usr/`), run the following command. ``` cd build/ sudo make install @@ -318,7 +318,7 @@ If the default installation fails (i.e., the one explained above), instal Caffe #### OpenCL Version -If you have an AMD graphics card, you can compile OpenPose with the OpenCL option. To manually select the OpenCL Version, open CMake GUI mentioned above, and set the `GPU_MODE` flag to `OPENCL`. +If you have an AMD graphics card, you can compile OpenPose with the OpenCL option. To manually select the OpenCL Version, open CMake GUI mentioned above, and set the `GPU_MODE` flag to `OPENCL`. **Very important:** If you compiled previously the CPU-only or CUDA versions on that same OpenPose folder, you will have to manually delete de `build` directory and run the installation steps from scratch. Otherwise, many weird errors will appear. The OpenCL version has been tested on Ubuntu and Windows. This has been tested only on AMD Vega series and NVIDIA 10 series graphics cards. Please email us if you have issues with other operating systems or graphics cards. @@ -364,7 +364,7 @@ After installation, check the [doc/modules/calibration_module.md](./modules/cali #### Compiling without cuDNN The [cuDNN](https://developer.nvidia.com/cudnn) library is not mandatory, but required for full keypoint detection accuracy. In case your graphics card is not compatible with cuDNN, you can disable it by unchecking `USE_CUDNN` in CMake. -Then, you would have to reduce the `--net_resolution` flag to fit the model into the GPU memory. You can try values like `640x320`, `320x240`, `320x160`, or `160x80` to see your GPU memory capabilities. After finding the maximum approximate resolution that your GPU can handle without throwing an out-of-memory error, adjust the `net_resolution` ratio to your image or video to be processed (see the `--net_resolution` explanation from [doc/demo_overview.md](./demo_overview.md)), or use `-1` (e.g. `--net_resolution -1x320`). +Then, you would have to reduce the `--net_resolution` flag to fit the model into the GPU memory. You can try values like `640x320`, `320x240`, `320x160`, or `160x80` to see your GPU memory capabilities. After finding the maximum approximate resolution that your GPU can handle without throwing an out-of-memory error, adjust the `net_resolution` ratio to your image or video to be processed (see the `--net_resolution` explanation from [doc/demo_overview.md](./demo_overview.md)), or use `-1` (e.g., `--net_resolution -1x320`). diff --git a/doc/installation_deprecated.md b/doc/installation_deprecated.md index 2d0a1b35a100e4b5b7d8d29fa7b113edea6d88ae..af0a463e470a864b3b5ad45e7819aa0ebdb0e22b 100644 --- a/doc/installation_deprecated.md +++ b/doc/installation_deprecated.md @@ -44,7 +44,7 @@ Recommended installation method, it is simpler and offers more customization set CUDA, cuDNN, OpenCV and Atlas must be already installed on your machine: 1. [CUDA](https://developer.nvidia.com/cuda-80-ga2-download-archive) must be installed. You should reboot your machine after installing CUDA. - 2. [cuDNN](https://developer.nvidia.com/cudnn): Once you have downloaded it, just unzip it and copy (merge) the contents on the CUDA folder, e.g. `/usr/local/cuda-8.0/`. Note: We found OpenPose working ~10% faster with cuDNN 5.1 compared to cuDNN 6. Otherwise, check [Compiling without cuDNN](#compiling-without-cudnn). + 2. [cuDNN](https://developer.nvidia.com/cudnn): Once you have downloaded it, just unzip it and copy (merge) the contents on the CUDA folder, e.g., `/usr/local/cuda-8.0/`. Note: We found OpenPose working ~10% faster with cuDNN 5.1 compared to cuDNN 6. Otherwise, check [Compiling without cuDNN](#compiling-without-cudnn). 3. OpenCV can be installed with `apt-get install libopencv-dev`. If you have compiled OpenCV 3 by your own, follow [Manual Compilation](#manual-compilation). After both Makefile.config files have been generated, edit them and uncomment the line `# OPENCV_VERSION := 3`. You might alternatively modify all `Makefile.config.UbuntuXX` files and then run the scripts in step 2. 4. In addition, OpenCV 3 does not incorporate the `opencv_contrib` module by default. Assuming you have OpenCV 3 compiled with the contrib module and you want to use it, append `opencv_contrib` at the end of the line `LIBRARIES += opencv_core opencv_highgui opencv_imgproc` in the `Makefile` file. 5. Atlas can be installed with `sudo apt-get install libatlas-base-dev`. Instead of Atlas, you can use OpenBLAS or Intel MKL by modifying the line `BLAS := atlas` in the same way as previosuly mentioned for the OpenCV version selection. @@ -61,7 +61,7 @@ bash ./ubuntu/install_caffe_and_openpose_if_cuda8.sh ### Installation - Manual Compilation -Alternatively to the script installation, if you want to use CUDA 7, avoid using sh scripts, change some configuration labels (e.g. OpenCV version), etc., then: +Alternatively to the script installation, if you want to use CUDA 7, avoid using sh scripts, change some configuration labels (e.g., OpenCV version), etc., then: 1. Install the [Caffe prerequisites](http://caffe.berkeleyvision.org/installation.html). 2. Compile Caffe and OpenPose by running these lines: ``` @@ -73,7 +73,7 @@ Alternatively to the script installation, if you want to use CUDA 7, avoid using cp Makefile.config.Ubuntu14_cuda8.example Makefile.config # Ubuntu 14, cuda 8 cp Makefile.config.Ubuntu16_cuda7.example Makefile.config # Ubuntu 16, cuda 7 cp Makefile.config.Ubuntu16_cuda8.example Makefile.config # Ubuntu 16, cuda 8 - # Change any custom flag from the resulting Makefile.config (e.g. OpenCV 3, Atlas/OpenBLAS/MKL, etc.) + # Change any custom flag from the resulting Makefile.config (e.g., OpenCV 3, Atlas/OpenBLAS/MKL, etc.) # Compile Caffe make all -j`nproc` && make distribute -j`nproc` @@ -84,7 +84,7 @@ Alternatively to the script installation, if you want to use CUDA 7, avoid using cp ubuntu/Makefile.example Makefile # Same file cp command as the one used for Caffe cp ubuntu/Makefile.config.Ubuntu14_cuda7.example Makefile.config - # Change any custom flag from the resulting Makefile.config (e.g. OpenCV 3, Atlas/OpenBLAS/MKL, etc.) + # Change any custom flag from the resulting Makefile.config (e.g., OpenCV 3, Atlas/OpenBLAS/MKL, etc.) make all -j`nproc` ``` @@ -97,7 +97,7 @@ Alternatively to the script installation, if you want to use CUDA 7, avoid using make clean make all -j$(NUM_CORES) ``` -**Highly important**: There are 2 `Makefile.config.Ubuntu##.example` analogous files, one in the main folder and one in [3rdparty/caffe/](../3rdparty/caffe/), corresponding to OpenPose and Caffe configuration files respectively. Any change must be done to both files (e.g. OpenCV 3 flag, Atlab/OpenBLAS/MKL flag, etc.). E.g. for CUDA 8 and Ubuntu16: [3rdparty/caffe/Makefile.config.Ubuntu16_cuda8.example](../3rdparty/caffe/Makefile.config.Ubuntu16.example) and [ubuntu/Makefile.config.Ubuntu16_cuda8.example](../ubuntu/Makefile.config.Ubuntu16_cuda8.example). +**Highly important**: There are 2 `Makefile.config.Ubuntu##.example` analogous files, one in the main folder and one in [3rdparty/caffe/](../3rdparty/caffe/), corresponding to OpenPose and Caffe configuration files respectively. Any change must be done to both files (e.g., OpenCV 3 flag, Atlab/OpenBLAS/MKL flag, etc.). E.g., for CUDA 8 and Ubuntu16: [3rdparty/caffe/Makefile.config.Ubuntu16_cuda8.example](../3rdparty/caffe/Makefile.config.Ubuntu16.example) and [ubuntu/Makefile.config.Ubuntu16_cuda8.example](../ubuntu/Makefile.config.Ubuntu16_cuda8.example). @@ -112,7 +112,7 @@ make clean && cd 3rdparty/caffe && make clean ### Uninstallation -You just need to remove the OpenPose folder, by default called `openpose/`. E.g. `rm -rf openpose/`. +You just need to remove the OpenPose folder, by default called `openpose/`. E.g., `rm -rf openpose/`. diff --git a/doc/library_add_new_module.md b/doc/library_add_new_module.md index 501c879718065ada880912b2c2f4fc194723fd09..09228b37f1ddc6fc0275e6c537c67369b031562d 100644 --- a/doc/library_add_new_module.md +++ b/doc/library_add_new_module.md @@ -4,9 +4,9 @@ OpenPose Library - Steps to Add a New Module ## Developping Steps In order to add a new module, these are the recommended steps in order to develop it: -1. Create a folder with its name in the `experimental/` module, e.g. `experimental/hair/`. +1. Create a folder with its name in the `experimental/` module, e.g., `experimental/hair/`. 2. Implement all the functionality in one `Worker`. I.e., inherit from `Worker` and implement all the functionality on that class (copy the examples from any Worker subclass). - 1. The first letter of the class name should be `W` (e.g. `WHairExtractor`). + 1. The first letter of the class name should be `W` (e.g., `WHairExtractor`). 2. To initially simplify development: 1. Optionally (higher debug info), you might initially create the Worker as a non-templated class, assuming it uses std::shared_ptr> instead of directly using a template class (following the `examples/tutorial_api_cpp` synchronous examples). While developing, templates provide more confusing debugging info. Turn the class into a template after being initially developed. 2. Optionally (for development speed), use op::Datum as unique argument of your auxiliary functions within that worker. @@ -14,8 +14,8 @@ In order to add a new module, these are the recommended steps in order to develo 4. If you are using your own custom Caffe -> initially change the Caffe for your version. It should directly work. 3. Copy the design from `pose/WPoseExtractor`. 3. To test it: - 1. Add the functionality to `Wrapper`, use the `experimental` namespace for the new Struct (e.g. `experimental::HairStruct`) that the `Wrapper` will use. Do not change any function name from `Wrapper`, just add a new `configure`, with the new `HairStruct` or modify the existing ones without changing their names. - 2. Add a demo (e.g. `examples/openpose/rthair.cpp`) to test it. + 1. Add the functionality to `Wrapper`, use the `experimental` namespace for the new Struct (e.g., `experimental::HairStruct`) that the `Wrapper` will use. Do not change any function name from `Wrapper`, just add a new `configure`, with the new `HairStruct` or modify the existing ones without changing their names. + 2. Add a demo (e.g., `examples/openpose/rthair.cpp`) to test it. 4. Split the `Worker` into as many Workers as required. 5. If the Workers need extra data from `Datum`, simply add into `Datum` the new variables required (without removing/modifying any previous variables!). 6. Read also the release steps before starting this developping phase. @@ -25,7 +25,7 @@ In order to add a new module, these are the recommended steps in order to develo ## Release Steps After the code is running and ready to be merged, in order to officially release the new module: -1. Move the functionality of each `Worker` class to the non-template class (e.g. `WHairExtractor` to `HairExtractor`). `WHairExtractor` will simply wrap `HairExtractor`. This will reduce compiling time for the user. See examples from other modules. +1. Move the functionality of each `Worker` class to the non-template class (e.g., `WHairExtractor` to `HairExtractor`). `WHairExtractor` will simply wrap `HairExtractor`. This will reduce compiling time for the user. See examples from other modules. 2. If you are using a custom Caffe version, move the custom code into the OpenPose library and change back Caffe to the default (most updated) version. 3. Move the module from `experimental/hair/` to `hair/`. 4. Remove `experimental` namespaces (e.g., from `Wrapper` and `Hair`) and turn Workers into template classes. diff --git a/doc/library_how_to_develop.md b/doc/library_how_to_develop.md index 0dcddc01516c8fd5f8b5ebd9f95ee7023dc41eda..625b243aca9b20620f104d86c63ce0c0fbe30b0c 100644 --- a/doc/library_how_to_develop.md +++ b/doc/library_how_to_develop.md @@ -20,9 +20,9 @@ OpenPose C++ API - How to Develop OpenPose 1. Only `//` comments are allowed in the code, `/* */` should not be used. 2. There should be a (at least) 1-line comment for each block of code inside each function. 5. Loops and statements: - 1. There should be a space between the keyword (`if`, `for`, etc) and the parenthesis, e.g.: `if (true)`. Wrong: `if(true)`. Note: So they can be easily located with Ctrl + F. + 1. There should be a space between the keyword (`if`, `for`, etc) and the parenthesis, e.g., `if (true)`. Wrong: `if(true)`. Note: So they can be easily located with Ctrl + F. 2. Braces should be added in the following line with respect to the loop/statement keyword. See example in point 3. - 3. 1-line loops/statements should not contain braces. E.g.: + 3. 1-line loops/statements should not contain braces. E.g., ``` if (booleanParameter) anotherParameter = 25; @@ -40,7 +40,7 @@ else 2. They should be sorted in this order: 1. Std libraries. 2. OS libraries. - 3. 3rd party libraries (e.g. Caffe, OpenCV). + 3. 3rd party libraries (e.g., Caffe, OpenCV). 4. OpenPose libraries. 5. If it is a cpp file, the last one should be its own hpp. 3. Inside each of the previous groups, it should be sorted alphabetically. diff --git a/doc/library_introduction.md b/doc/library_introduction.md index 61d23f37ef84437c10ea6952e5e4c07586750951..2b3ec639e3f9f2c78cfcf0a8cc6f0e8117246ed9 100644 --- a/doc/library_introduction.md +++ b/doc/library_introduction.md @@ -1,7 +1,7 @@ OpenPose C++ API - Introduction ==================================== -The C++ API is ideal if you want to e.g. change internal functions and/or extend the OpenPose functionality. In order to learn the basics: +The C++ API is ideal if you want to e.g., change internal functions and/or extend the OpenPose functionality. In order to learn the basics: 1. Take a look at the [library Quick Start section](../README.md#quick-start) from the README. 2. OpenPose Overview: Learn the basics about the library source code in [doc/library_overview.md](./library_overview.md). diff --git a/doc/library_overview.md b/doc/library_overview.md index 735a17c31f58d43e9a0f7910db38da04f88fdf9d..108dc3c8cfee9b68e819bbc6cd6577cbba69d124 100644 --- a/doc/library_overview.md +++ b/doc/library_overview.md @@ -65,7 +65,7 @@ There are several functions to get information about the allocated data: ### Datum - The OpenPose Basic Piece of Information Between Threads -The `Datum` class has all the variables that our Workers need to share to each other. The user can inherit from `op::Datum` in order to add extra functionality (e.g. if he want to add new Workers and they require extra information between them). We highly recommend not to modify the `op::Datum` source code. Instead, just inherit it and tell the Workers and `ThreadManager` to use your inherited class. No changes are needed in the OpenPose source code for this task. +The `Datum` class has all the variables that our Workers need to share to each other. The user can inherit from `op::Datum` in order to add extra functionality (e.g., if he want to add new Workers and they require extra information between them). We highly recommend not to modify the `op::Datum` source code. Instead, just inherit it and tell the Workers and `ThreadManager` to use your inherited class. No changes are needed in the OpenPose source code for this task. ``` UserDatum : public op::Datum {/* op::Datum + extra variables */} @@ -120,7 +120,7 @@ There are 3 basic configuration modes: single-threading, multi-threading and sma threadManager.add(threadId++, wPose, queueIn++, queueOut++); // Thread 2, queues 3 -> 3 ``` -3. Smart multi-threading: Some classes are much more faster than others (e.g. pose estimation takes ~100 ms while extracting frames from a video only ~10 ms). In addition, any machine has a limited number of threads. Therefore, the library allows the user to merge the faster threads in order to potentially speed up the code. Check the [real-time pose demo](../examples/openpose/openpose.cpp) too see a more complete example. +3. Smart multi-threading: Some classes are much more faster than others (e.g., pose estimation takes ~100 ms while extracting frames from a video only ~10 ms). In addition, any machine has a limited number of threads. Therefore, the library allows the user to merge the faster threads in order to potentially speed up the code. Check the [real-time pose demo](../examples/openpose/openpose.cpp) too see a more complete example. ``` auto threadId = 0; auto queueIn = 0; @@ -132,18 +132,18 @@ There are 3 basic configuration modes: single-threading, multi-threading and sma #### Thread Id: In order to have X different threads, you just need X different thread ids in the `add()` function. There should not be any missing thread or queue id. I.e., when `start` is called, all the thread ids from 0 to max_thread_id must have been added with the `add()` function, as well as all queue ids from 0 to the maximum queue id introduced. -The threads will be started following the thread id order (first the lowest id, last the highest one). In practice, thread id ordering might negatively affect the program execution by adding some lag. I.e., if the thread ids are assigned in complete opposite order to the temporal order of the Workers (e.g. first the GUI and lastly the webcam reader), then during the first few iterations the GUI Worker will have an empty queue until all other Workers have processed at least one frame. +The threads will be started following the thread id order (first the lowest id, last the highest one). In practice, thread id ordering might negatively affect the program execution by adding some lag. I.e., if the thread ids are assigned in complete opposite order to the temporal order of the Workers (e.g., first the GUI and lastly the webcam reader), then during the first few iterations the GUI Worker will have an empty queue until all other Workers have processed at least one frame. Within each thread, the Workers are executed in the order that they have been added to `ThreadManager` by the `add()` function. #### Queue Id: In addition, each queue id is forced to be the input and output of at least 1 Worker sequence. Special cases are the queue id 0 (only forced to be input of >= 1 Workers) and max_queue_id (forced to be output of >=1 Workers). This prevent users from accidentally forgetting connecting some queue ids. -Recursive queuing is allowed. E.g. a Worker might work from queue 0 to 1, another one from 1 to 2, and a third one from 2 to 1, creating a recursive queue/threading. However, the index 0 is reserved for the first queue, and the maximum index for the last one. +Recursive queuing is allowed. E.g., a Worker might work from queue 0 to 1, another one from 1 to 2, and a third one from 2 to 1, creating a recursive queue/threading. However, the index 0 is reserved for the first queue, and the maximum index for the last one. ### The Worker Template Class - The Parent Class of All Workers -Classes starting by the letter `W` + upper case letter (e.g. `WGui`) directly or indirectly inherit from Worker. They can be directly added to the `ThreadManager` class so they can access and/or modify the data as well as be parallelized automatically. +Classes starting by the letter `W` + upper case letter (e.g., `WGui`) directly or indirectly inherit from Worker. They can be directly added to the `ThreadManager` class so they can access and/or modify the data as well as be parallelized automatically. The easiest way to create your own Worker is to inherit Worker, and implement the work() function such us it just calls a wrapper to your desired functionality (check the source code of some of our basic Workers). Since the Worker classes are templates, they are always compiled. Therefore, including your desired functionality in a different file will let you compile it only once. Otherwise, it would be compiled any time that any code which uses your worker is compiled. @@ -163,9 +163,9 @@ We suggest users to also start their inherited `Worker` classes with the `W` ### All Workers Wrap a Non-Worker Class -All Workers wrap and call a non-Worker non-template equivalent which actually performs their functionality. E.g. `WPoseExtractor` and `PoseExtractor`. In this way, threading and functionality are completely decoupled. This gives us the best of templates and normal classes: +All Workers wrap and call a non-Worker non-template equivalent which actually performs their functionality. E.g., `WPoseExtractor` and `PoseExtractor`. In this way, threading and functionality are completely decoupled. This gives us the best of templates and normal classes: -1. Templates allow us to use different classes, e.g. the user could use his own specific equivalent to `op::Datum`. However, they must be compiled any time that any function that uses them changes. +1. Templates allow us to use different classes, e.g., the user could use his own specific equivalent to `op::Datum`. However, they must be compiled any time that any function that uses them changes. 2. Classes can be compiled only once, and later the algorithm just use them. However, they can only be used with specific arguments. @@ -181,7 +181,7 @@ By separating functionality and their `Worker` wrappers, we get the good of b The human body pose detection is wrapped into the `WPoseExtractor` worker and its equivalent non-template PoseExtractor. In addition, it can be rendered and/or blended into the original frame with `(W)PoseRenderer` class. ### PoseExtractor Class -Currently, only `PoseExtractorCaffe` is implemented, which uses the Caffe framework. We might add other famous frameworks later (e.g. Torch or TensorFlow). If you compile our library with any other framework, please email us or make a pull request! We are really interested in adding any other Deep Net framework, and the code is mostly prepared for it. Just create the equivalent `PoseExtractorDesiredFramework` and make the pull request! +Currently, only `PoseExtractorCaffe` is implemented, which uses the Caffe framework. We might add other famous frameworks later (e.g., Torch or TensorFlow). If you compile our library with any other framework, please email us or make a pull request! We are really interested in adding any other Deep Net framework, and the code is mostly prepared for it. Just create the equivalent `PoseExtractorDesiredFramework` and make the pull request! #### Constructor In order to be initialized, `PoseExtractorCaffe` has the following constructor and parameters: `PoseExtractorCaffe(const Point& netInputSize, const Point& netOutputSize, const Point& outputSize, const int scaleNumber, const double scaleGap, const PoseModel poseModel, const std::string& modelsFolder, const int gpuId)`. @@ -194,7 +194,7 @@ In order to be initialized, `PoseExtractorCaffe` has the following constructor a 4. `scaleNumber` and `scaleGap` specify the multi-scale parameters. Explained in the [README.md](../README.md), in the demo section. -5. `poseModel` specifies the model to load (e.g. COCO or MPI). +5. `poseModel` specifies the model to load (e.g., COCO or MPI). 6. `modelsFolder` is the resolution of the last layer of the deep net. I.e., the resulting heat-maps will have this size. @@ -234,4 +234,4 @@ In order to render the detected human pose, run `std::pair ren 3. `scaleNetToOutput` is given by `PoseExtractor::getScaleNetToOutput()`. -4. The resulting std::pair has the element rendered id, and its name. E.g. <0, "Nose"> or <19, "Part Affinity Fields">. +4. The resulting std::pair has the element rendered id, and its name. E.g., <0, "Nose"> or <19, "Part Affinity Fields">. diff --git a/doc/modules/3d_reconstruction_module.md b/doc/modules/3d_reconstruction_module.md index 98418ead4ce18a595fd646ba77b0dea01b4ce85f..64e912923b98682c80c47c30c0a1dfc4cd1f4143 100644 --- a/doc/modules/3d_reconstruction_module.md +++ b/doc/modules/3d_reconstruction_module.md @@ -39,7 +39,7 @@ In order to increase the 3-D reconstruction accuracy, OpenPose optionally perfor - 3-D reconstruction of body, face, and hands for 1 person. - If more than 1 person is detected per camera, the algorithm will just try to match person 0 on each camera, which will potentially correspond to different people in the scene. Thus, the 3-D reconstruction will completely fail. - Only points with high threshold with respect to each one of the cameras are reprojected (and later rendered). An alternative for > 4 cameras could potentially do 3-D reprojection and render all points with good views in more than N different cameras (not implemented here). -- Only Direct linear transformation (DLT) is applied for reconstruction. Non-linear optimization methods (e.g. from Ceres Solver) will potentially improve results (not implemented). +- Only Direct linear transformation (DLT) is applied for reconstruction. Non-linear optimization methods (e.g., from Ceres Solver) will potentially improve results (not implemented). - Basic OpenGL rendering with the `freeglut` library. diff --git a/doc/modules/calibration_module.md b/doc/modules/calibration_module.md index 28b18cf2c5b27e3459f8dfa14130ddf7f14298be..a8466294f5f0002004409800be0cfc82b73bbe96 100644 --- a/doc/modules/calibration_module.md +++ b/doc/modules/calibration_module.md @@ -109,7 +109,7 @@ Note: In order to maximize calibration quality, **do not reuse the same video se 1. Translation vector - Global distance: 1. Manually open each one of the generated XML files from the folder indicated by the flag `--camera_parameter_folder` (or the default one indicated by the `--help` flag if the former was not used). 2. The field `CameraMatrix` is a 3 x 4 matrix (you can see that the subfield `rows` in that file is 3 and `cols` is 4). - 3. Order the matrix in that 3 x 4 shape (e.g. by copying in a different text file with the shape of 3 rows and 4 columns). + 3. Order the matrix in that 3 x 4 shape (e.g., by copying in a different text file with the shape of 3 rows and 4 columns). 4. The 3 first components of the last column of the `CameraMatrix` field define the global `translation` (in meters) with respect to the global origin (in our case camera 1). 5. Thus, the distance between that camera and the origin camera 1 should be (approximately) equal to the L2-norm of the `translation` vector. 2. Translation vector - Relative x-y-z distances: diff --git a/doc/output.md b/doc/output.md index 0d7ca12ba3c3dd4c5a9fd47f42c38067df5d521d..2daababb0f68ec6df1ca09c1a952b1c89f6e6ada 100644 --- a/doc/output.md +++ b/doc/output.md @@ -72,7 +72,7 @@ Both of them follow the keypoint ordering described in the [Keypoint Ordering](# ### Keypoint Ordering -The body part mapping order of any body model (e.g. COCO, MPI) can be extracted from the C++ API by using the `getPoseBodyPartMapping(const PoseModel poseModel)` function available in [poseParameters.hpp](../include/openpose/pose/poseParameters.hpp): +The body part mapping order of any body model (e.g., COCO, MPI) can be extracted from the C++ API by using the `getPoseBodyPartMapping(const PoseModel poseModel)` function available in [poseParameters.hpp](../include/openpose/pose/poseParameters.hpp): ``` // C++ API call #include diff --git a/doc/quick_start.md b/doc/quick_start.md index 692769f66eeffce9c017b80aab277ab8991fa3ea..a715a6f4d5b0fb312db57417e8ceabf38a405650 100644 --- a/doc/quick_start.md +++ b/doc/quick_start.md @@ -152,7 +152,7 @@ build\x64\Release\OpenPoseDemo.exe --flir_camera --3d --number_people_max 1 --fa # Ubuntu and Mac (same flags for Windows version) # Optionally add `--face` and/or `--hand` to include face and/or hands # Assuming 3 cameras -# Note: We highly recommend to reduce `--output_resolution`. E.g. for 3 cameras recording at 1920x1080, the resulting image is (3x1920)x1080, so we recommend e.g. 640x360 (x3 reduction). +# Note: We highly recommend to reduce `--output_resolution`. E.g., for 3 cameras recording at 1920x1080, the resulting image is (3x1920)x1080, so we recommend e.g. 640x360 (x3 reduction). # Video ./build/examples/openpose/openpose.bin --video output_folder_path/video.avi --3d_views 3 --3d --number_people_max 1 --output_resolution {desired_output_resolution} # Images diff --git a/doc/release_notes.md b/doc/release_notes.md index 74bbd756c80ff074e9045578b7f8db157d506a03..2cc4037861496e8ec46d0c4f591b6711ac829072 100644 --- a/doc/release_notes.md +++ b/doc/release_notes.md @@ -94,7 +94,7 @@ OpenPose Library - Release Notes 7. Added 3-D reconstruction demo. 8. Auto-detection of the camera index. 9. Speed up of ~30% in op::floatPtrToUCharCvMat. - 10. COCO extractor now extracts image ID from the image name itslef (format "string_%d"). Before, only working with validation test, now applicable to e.g. test sets. + 10. COCO extractor now extracts image ID from the image name itslef (format "string_%d"). Before, only working with validation test, now applicable to e.g., test sets. 11. Changed display texts, added `OpenPose` name. 2. Main bugs fixed: 1. Pycaffe can now be imported from Python. @@ -107,7 +107,7 @@ OpenPose Library - Release Notes 1. Added CMake installer for Ubuntu. 2. Added how to use keypoint data in `examples/tutorial_wrapper/`. 3. Added flag for warnings of type `-Wsign-compare` and removed in code. - 4. Slightly improved accuracy by considering ears-shoulder connection (e.g. +0.4 mAP for 1 scale in validation set). + 4. Slightly improved accuracy by considering ears-shoulder connection (e.g., +0.4 mAP for 1 scale in validation set). 2. Main bugs fixed: 1. Windows version crashing with std::map copy. @@ -115,8 +115,8 @@ OpenPose Library - Release Notes ## OpenPose 1.2.0 (Nov 3, 2017) 1. Main improvements: - 1. Speed increase when processing images with different aspect ratios. E.g. ~20% increase over 3.7k COCO validation images on 1 scale. - 2. Huge speed increase and memory reduction when processing multi-scale. E.g. over 3.7k COCO validation images on 4 scales: ~40% (~770 to ~450 sec) speed increase, ~25% memory reduction (from ~8.9 to ~6.7 GB / GPU). + 1. Speed increase when processing images with different aspect ratios. E.g., ~20% increase over 3.7k COCO validation images on 1 scale. + 2. Huge speed increase and memory reduction when processing multi-scale. E.g., over 3.7k COCO validation images on 4 scales: ~40% (~770 to ~450 sec) speed increase, ~25% memory reduction (from ~8.9 to ~6.7 GB / GPU). 3. Slightly increase of accuracy given the fixed mini-bugs. 4. Added IP camera support. 5. Output images can have the input size, OpenPose able to change its size for each image and not required fixed size anymore. @@ -130,9 +130,9 @@ OpenPose Library - Release Notes 10. COCO JSON file outputs 0 as score for non-detected keypoints. 11. Added example for OpenPose for user asynchronous output and cleaned all `tutorial_wrapper/` examples. 12. Added `-1` option for `--net_resolution` in order to auto-select the best possible aspect ratio given the user input. - 13. Net resolution can be dynamically changed (e.g. for images with different size). + 13. Net resolution can be dynamically changed (e.g., for images with different size). 14. Added example to add functionality/modules to OpenPose. - 15. Added `--disable_multi_thread` flag in order to allow debug and/or highly reduce the latency (e.g. when using webcam in real-time). + 15. Added `--disable_multi_thread` flag in order to allow debug and/or highly reduce the latency (e.g., when using webcam in real-time). 16. Allowed to output images without any rendering. 2. Functions or parameters renamed: 1. OpenPose able to change its size and initial size dynamically: @@ -157,7 +157,7 @@ OpenPose Library - Release Notes 2. More efficient non-processing version (i.e., if all keypoint extractors are disabled, and only image extraction and display/saving operations are performed). 3. Heat maps scaling: Added `--heatmaps_scale` to OpenPoseDemo, added option not to scale the heatmaps, and added custom `float` format to save heatmaps in floating format. 4. Detector of the number of GPU also considers the initial GPU index given by the user. - 5. Added `--write_json` as new version of `--write_keypoint_json`. It includes the body part candidates (if enabled), as well as any extra information added in the future (e.g. person ID). + 5. Added `--write_json` as new version of `--write_keypoint_json`. It includes the body part candidates (if enabled), as well as any extra information added in the future (e.g., person ID). 6. Body part candidates can be retrieved in op::Datum and saved with `--write_json`. 2. Functions or parameters renamed: 1. `PoseParameters` splitted into `PoseParameters` and `PoseParametersRender` and const parameters turned into functions for more clarity. @@ -189,7 +189,7 @@ OpenPose Library - Release Notes 17. Flag `--write_json` includes 3-D keypoints. 18. 3-D reconstruction module can be used with images and videos. Flag `--3d_views` added to allow `--image_dir` and `--video` allow loading stereo images. 19. Flag `--camera_resolution` applicable to `--flir_camera`. - 20. Throw error message if requested GPU IDs does not exist (e.g. asking for 2 GPUs starting in ID 1 if there is only 2 GPUs in total). + 20. Throw error message if requested GPU IDs does not exist (e.g., asking for 2 GPUs starting in ID 1 if there is only 2 GPUs in total). 21. VideoSaver (`--write_video`) compatible with multi-camera setting. It will save all the different views concatenated. 22. OpenPose small GUI rescale the verbose text to the displayed image, to avoid the text to be either too big or small. 23. OpenPose small GUI shows the frame number w.r.t. the original producer, rather than the frame id. E.g., if video is started at frame 30, OpenPose will display 30 rather than 0 in the first frame. diff --git a/doc/standalone_face_or_hand_keypoint_detector.md b/doc/standalone_face_or_hand_keypoint_detector.md index f00bb8adee2373fa0f4425aa73895b854626a7ea..fbd516df60b67fd25d05ee572097814d5a592537 100644 --- a/doc/standalone_face_or_hand_keypoint_detector.md +++ b/doc/standalone_face_or_hand_keypoint_detector.md @@ -14,7 +14,7 @@ There are 2 ways to add the OpenPose face keypoint detector to your own code wit 1. Easiest solution: Forget about the `OpenPose demo` and `wrapper/wrapper.hpp`, and instead use the `include/openpose/face/faceExtractorNet.hpp` class with the output of your face detector. Recommended if you do not wanna use any other OpenPose functionality. -2. Elegant solution: If you wanna use the whole OpenPose framework, simply copy `include/wrapper/wrapper.hpp` as e.g. `examples/userCode/wrapperFace.hpp`, and change our `FaceDetector` or `FaceDetectorOpenCV` class by your custom face detector class inside your `WrapperFace` class. If you wanna omit the Pose keypoint detection for a big speed up if you do not need it, you can simply use the `body_disable` flag. +2. Elegant solution: If you wanna use the whole OpenPose framework, simply copy `include/wrapper/wrapper.hpp` as e.g., `examples/userCode/wrapperFace.hpp`, and change our `FaceDetector` or `FaceDetectorOpenCV` class by your custom face detector class inside your `WrapperFace` class. If you wanna omit the Pose keypoint detection for a big speed up if you do not need it, you can simply use the `body_disable` flag. Note: both `FaceExtractor` and `HandExtractor` classes requires as input **squared rectangles**. In addition, the function **`initializationOnThread()` must be called only once, and inside the same thread where `forwardPass` is gonna be run**. diff --git a/examples/openpose/openpose.cpp b/examples/openpose/openpose.cpp index 7805d321b1fefc9d7b6db09dba29f3a1af32e959..f66e8116c60055003c5ef6c335f297c750c2978b 100755 --- a/examples/openpose/openpose.cpp +++ b/examples/openpose/openpose.cpp @@ -100,9 +100,9 @@ int openPoseDemo() const op::WrapperStructOutput wrapperStructOutput{ op::flagsToDisplayMode(FLAGS_display, FLAGS_3d), !FLAGS_no_gui_verbose, FLAGS_fullscreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, - FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, - FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, - FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, + FLAGS_write_images_format, FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, + FLAGS_write_heatmaps_format, FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapper.configure(wrapperStructOutput); // Set to single-thread (for sequential processing and/or debugging and/or reducing latency) if (FLAGS_disable_multi_thread) diff --git a/examples/tests/pose_accuracy_car_val.sh b/examples/tests/pose_accuracy_car_val.sh index f2c8c9d48c0f92c61818323f5058a444e51a7210..f9e4adb76572590be0688ce6c773811824dc2566 100644 --- a/examples/tests/pose_accuracy_car_val.sh +++ b/examples/tests/pose_accuracy_car_val.sh @@ -15,16 +15,20 @@ clear && clear # ID 50006 <--> #frames = 3559 --> ~ 4 min at 15fps # Parameters -IMAGE_FOLDER=/mnt/DataUbuntu/openpose_train/dataset/COCO/car_dataset/Dataset/images_jpg_val/ +IMAGE_FOLDER_CF=/home/gines/devel/images/car-fusion_val/ +IMAGE_FOLDER_P3=/home/gines/devel/images/pascal3d+_val/ +IMAGE_FOLDER_A7=/home/gines/devel/images/veri-776_val/ JSON_FOLDER=../evaluation/coco_val_jsons/ OP_BIN=./build/examples/openpose/openpose.bin +GPUS=-1 +# GPUS=1 # 1 scale -$OP_BIN --image_dir $IMAGE_FOLDER --write_coco_json ${JSON_FOLDER}car_1.json --display 0 --render_pose 0 --model_pose CAR_12 +$OP_BIN --image_dir $IMAGE_FOLDER_CF --write_coco_json_variant 0 --write_coco_json ${JSON_FOLDER}processed_carfusion_val_1.json --model_pose CAR_22 --display 0 --render_pose 0 --num_gpu ${GPUS} +$OP_BIN --image_dir $IMAGE_FOLDER_P3 --write_coco_json_variant 1 --write_coco_json ${JSON_FOLDER}processed_pascal3dplus_val_1.json --model_pose CAR_22 --display 0 --render_pose 0 --num_gpu ${GPUS} +$OP_BIN --image_dir $IMAGE_FOLDER_A7 --write_coco_json_variant 2 --write_coco_json ${JSON_FOLDER}processed_veri776_val_1.json --model_pose CAR_22 --display 0 --render_pose 0 --num_gpu ${GPUS} # # 4 scales -# $OP_BIN --image_dir $IMAGE_FOLDER --write_coco_json ${JSON_FOLDER}1_4.json --display 0 --render_pose 0 --model_pose CAR_12 --scale_number 4 --scale_gap 0.25 --net_resolution "1312x736" -# $OP_BIN --image_dir $IMAGE_FOLDER --write_coco_json ${JSON_FOLDER}1_4.json --display 0 --render_pose 0 --model_pose CAR_12 --scale_number 4 --scale_gap 0.25 --net_resolution "1312x736" --frame_last 1005 - -# # Debugging - Rendered frames saved -# $OP_BIN --image_dir $IMAGE_FOLDER --write_images ${JSON_FOLDER}frameOutput --display 0 +# $OP_BIN --image_dir $IMAGE_FOLDER_CF --write_coco_json_variant 0 --write_coco_json ${JSON_FOLDER}processed_carfusion_val_4.json --model_pose CAR_22 --display 0 --render_pose 0 --scale_number 4 --scale_gap 0.25 --net_resolution "1312x736" --num_gpu ${GPUS} +# $OP_BIN --image_dir $IMAGE_FOLDER_P3 --write_coco_json_variant 1 --write_coco_json ${JSON_FOLDER}processed_pascal3dplus_val_4.json --model_pose CAR_22 --display 0 --render_pose 0 --scale_number 4 --scale_gap 0.25 --net_resolution "1312x736" --num_gpu ${GPUS} +# $OP_BIN --image_dir $IMAGE_FOLDER_A7 --write_coco_json_variant 2 --write_coco_json ${JSON_FOLDER}processed_veri776_val_4.json --model_pose CAR_22 --display 0 --render_pose 0 --scale_number 4 --scale_gap 0.25 --net_resolution "1312x736" --num_gpu ${GPUS} diff --git a/examples/tests/wrapperHandFromJsonTest.hpp b/examples/tests/wrapperHandFromJsonTest.hpp index 90dc15da4c655506bde25812d8924578b8e94973..12a484cbd1c92a09a26eb53909072cf1e4987745 100644 --- a/examples/tests/wrapperHandFromJsonTest.hpp +++ b/examples/tests/wrapperHandFromJsonTest.hpp @@ -227,7 +227,7 @@ namespace op cpuRenderers.emplace_back(std::make_shared>(handRenderer)); } - // Itermediate workers (e.g. OpenPose format to cv::Mat, json & frames recorder, ...) + // Itermediate workers (e.g., OpenPose format to cv::Mat, json & frames recorder, ...) mPostProcessingWs.clear(); // Frame buffer and ordering if (spWPoses.size() > 1) diff --git a/examples/tutorial_add_module/1_custom_post_processing.cpp b/examples/tutorial_add_module/1_custom_post_processing.cpp index 9f5c969ace7f0e2df17757cc3859480afb6c2c97..861f22ac98da2378fdefd5fece3ce086bf3ae40e 100644 --- a/examples/tutorial_add_module/1_custom_post_processing.cpp +++ b/examples/tutorial_add_module/1_custom_post_processing.cpp @@ -14,7 +14,7 @@ // Syntax rules // 1. Class/template variables start by up (unique_ptr), sp (sahred_ptr), p (pointer) or m (non-pointer), and they -// have no underscores, e.g.: mThisIsAVariable. +// have no underscores, e.g., mThisIsAVariable. // 2. The internal temporary function variable equivalent would be thisIsAVariable. // 3. Every line cannot have more than 120 characters. // 4. If extra classes and files are required, add those extra files inside the OpenPose include and src folders, @@ -113,9 +113,9 @@ int tutorialAddModule1() const op::WrapperStructOutput wrapperStructOutput{ op::flagsToDisplayMode(FLAGS_display, FLAGS_3d), !FLAGS_no_gui_verbose, FLAGS_fullscreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, - FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, - FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, - FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, + FLAGS_write_images_format, FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, + FLAGS_write_heatmaps_format, FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapperT.configure(wrapperStructOutput); // Custom post-processing diff --git a/examples/tutorial_api_cpp/3_keypoints_from_image_configurable.cpp b/examples/tutorial_api_cpp/3_keypoints_from_image_configurable.cpp index dad00f9026278b875f4516bf6fc8d70a5fe67af1..a719643f53aae376a5a7b3cec8d9caee0ffad7fa 100644 --- a/examples/tutorial_api_cpp/3_keypoints_from_image_configurable.cpp +++ b/examples/tutorial_api_cpp/3_keypoints_from_image_configurable.cpp @@ -115,9 +115,9 @@ int tutorialApiCpp3() const op::WrapperStructOutput wrapperStructOutput{ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json, - FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video, - FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam, - FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, FLAGS_write_images_format, + FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, + FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapper.configure(wrapperStructOutput); // Set to single-thread (for sequential processing and/or debugging and/or reducing latency) if (FLAGS_disable_multi_thread) diff --git a/examples/tutorial_api_cpp/4_asynchronous_loop_custom_input_and_output.cpp b/examples/tutorial_api_cpp/4_asynchronous_loop_custom_input_and_output.cpp index 2f8971aec501ed14e858989d6714823d208988b9..36a9ffb018fda4a580d251b3c58b29cf9f81c53d 100644 --- a/examples/tutorial_api_cpp/4_asynchronous_loop_custom_input_and_output.cpp +++ b/examples/tutorial_api_cpp/4_asynchronous_loop_custom_input_and_output.cpp @@ -248,9 +248,9 @@ int tutorialApiCpp4() const op::WrapperStructOutput wrapperStructOutput{ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json, - FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video, - FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam, - FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, FLAGS_write_images_format, + FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, + FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapperT.configure(wrapperStructOutput); // Set to single-thread (for sequential processing and/or debugging and/or reducing latency) if (FLAGS_disable_multi_thread) diff --git a/examples/tutorial_api_cpp/5_asynchronous_loop_custom_output.cpp b/examples/tutorial_api_cpp/5_asynchronous_loop_custom_output.cpp index 5cefcf4efbc805862fb8a08296ff3d48177f28d5..f6c17b2c05828ef4a3f95170240c089ef618b16d 100644 --- a/examples/tutorial_api_cpp/5_asynchronous_loop_custom_output.cpp +++ b/examples/tutorial_api_cpp/5_asynchronous_loop_custom_output.cpp @@ -198,9 +198,9 @@ int tutorialApiCpp5() const op::WrapperStructOutput wrapperStructOutput{ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json, - FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video, - FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam, - FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, FLAGS_write_images_format, + FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, + FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapperT.configure(wrapperStructOutput); // Set to single-thread (for sequential processing and/or debugging and/or reducing latency) if (FLAGS_disable_multi_thread) diff --git a/examples/tutorial_api_cpp/6_synchronous_custom_postprocessing.cpp b/examples/tutorial_api_cpp/6_synchronous_custom_postprocessing.cpp index e749cf7ac8aa7d0b276d708daeb34e5b839dab67..44ad32686c45c9f4217d45f2efdfe8f9f1bedde7 100644 --- a/examples/tutorial_api_cpp/6_synchronous_custom_postprocessing.cpp +++ b/examples/tutorial_api_cpp/6_synchronous_custom_postprocessing.cpp @@ -163,9 +163,9 @@ int tutorialApiCpp6() const op::WrapperStructOutput wrapperStructOutput{ op::flagsToDisplayMode(FLAGS_display, FLAGS_3d), !FLAGS_no_gui_verbose, FLAGS_fullscreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, - FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, - FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, - FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, + FLAGS_write_images_format, FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, + FLAGS_write_heatmaps_format, FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapperT.configure(wrapperStructOutput); // Set to single-thread (for sequential processing and/or debugging and/or reducing latency) if (FLAGS_disable_multi_thread) diff --git a/examples/tutorial_api_cpp/7_synchronous_custom_input.cpp b/examples/tutorial_api_cpp/7_synchronous_custom_input.cpp index b575d3626707e51faeae943d342f420963d17747..05eb5722bee56df36102d9fcf046f24fec80c7a4 100644 --- a/examples/tutorial_api_cpp/7_synchronous_custom_input.cpp +++ b/examples/tutorial_api_cpp/7_synchronous_custom_input.cpp @@ -163,7 +163,7 @@ int tutorialApiCpp7() op::WrapperT> opWrapperT; // Initializing the user custom classes - // Frames producer (e.g. video, webcam, ...) + // Frames producer (e.g., video, webcam, ...) auto wUserInput = std::make_shared(FLAGS_image_dir); // Add custom processing const auto workerInputOnNewThread = true; @@ -196,9 +196,9 @@ int tutorialApiCpp7() const op::WrapperStructOutput wrapperStructOutput{ op::flagsToDisplayMode(FLAGS_display, FLAGS_3d), !FLAGS_no_gui_verbose, FLAGS_fullscreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, - FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, - FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, - FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, + FLAGS_write_images_format, FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, + FLAGS_write_heatmaps_format, FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapperT.configure(wrapperStructOutput); // Set to single-thread (for sequential processing and/or debugging and/or reducing latency) if (FLAGS_disable_multi_thread) diff --git a/examples/tutorial_api_cpp/8_synchronous_custom_output.cpp b/examples/tutorial_api_cpp/8_synchronous_custom_output.cpp index 36828436cc082ab51f55dc5a24be3777c6c3256a..50bc51da51ca44d9e82527e54f480e657deb22cc 100644 --- a/examples/tutorial_api_cpp/8_synchronous_custom_output.cpp +++ b/examples/tutorial_api_cpp/8_synchronous_custom_output.cpp @@ -213,9 +213,9 @@ int tutorialApiCpp8() const op::WrapperStructOutput wrapperStructOutput{ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json, - FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video, - FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam, - FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, FLAGS_write_images_format, + FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, + FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapperT.configure(wrapperStructOutput); // Set to single-thread (for sequential processing and/or debugging and/or reducing latency) if (FLAGS_disable_multi_thread) diff --git a/examples/tutorial_api_cpp/9_synchronous_custom_all.cpp b/examples/tutorial_api_cpp/9_synchronous_custom_all.cpp index fafe5280117e6bd3913a8942b99ba6b0a676f8f2..65fc12f9dff674f512acf868b2f1aae3883d7baa 100644 --- a/examples/tutorial_api_cpp/9_synchronous_custom_all.cpp +++ b/examples/tutorial_api_cpp/9_synchronous_custom_all.cpp @@ -255,7 +255,7 @@ int tutorialApiCpp9() const bool enableGoogleLogging = true; // Initializing the user custom classes - // Frames producer (e.g. video, webcam, ...) + // Frames producer (e.g., video, webcam, ...) auto wUserInput = std::make_shared(FLAGS_image_dir); // Processing auto wUserPostProcessing = std::make_shared(); @@ -305,9 +305,9 @@ int tutorialApiCpp9() const op::WrapperStructOutput wrapperStructOutput{ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json, - FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video, - FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam, - FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; + FLAGS_write_coco_foot_json, FLAGS_write_coco_json_variant, FLAGS_write_images, FLAGS_write_images_format, + FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, + FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port}; opWrapperT.configure(wrapperStructOutput); // Set to single-thread (for sequential processing and/or debugging and/or reducing latency) if (FLAGS_disable_multi_thread) diff --git a/examples/tutorial_developer/pose_1_extract_from_image.cpp b/examples/tutorial_developer/pose_1_extract_from_image.cpp index 62d67eff8d2248e3aaa1e439b3241cd8b04eed2c..a76ad6942664a952c1a26bd22d14ee05f17f2697 100644 --- a/examples/tutorial_developer/pose_1_extract_from_image.cpp +++ b/examples/tutorial_developer/pose_1_extract_from_image.cpp @@ -22,9 +22,9 @@ #include #include -// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help` +// See all the available parameter options withe the `--help` flag. E.g., `build/examples/openpose/openpose.bin --help` // Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose -// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. +// executable. E.g., for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. // Debugging/Other DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while" " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for" @@ -32,15 +32,15 @@ DEFINE_int32(logging_level, 3, "The logging level. Inte // Producer DEFINE_string(image_path, "examples/media/COCO_val2014_000000000192.jpg", "Process the desired image."); // OpenPose -DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), " +DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g., `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), " "`MPI_4_layers` (15 keypoints, even faster but less accurate)."); DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located."); DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is" " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the" " closest aspect ratio possible to the images or videos to be processed. Using `-1` in" " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's" - " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions," - " e.g. full HD (1980x1080) and HD (1280x720) resolutions."); + " input value. E.g., the default `-1x368` is equivalent to `656x368` in 16:9 resolutions," + " e.g., full HD (1980x1080) and HD (1280x720) resolutions."); DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the" " input image resolution."); DEFINE_int32(num_gpu_start, 0, "GPU device start number."); diff --git a/examples/tutorial_developer/pose_2_extract_pose_or_heatmat_from_image.cpp b/examples/tutorial_developer/pose_2_extract_pose_or_heatmat_from_image.cpp index dde4e00bddca20ba891a8421195c9a5a3abf1df5..d75558cd45099b3c62551e284a6ccacb27a571cf 100644 --- a/examples/tutorial_developer/pose_2_extract_pose_or_heatmat_from_image.cpp +++ b/examples/tutorial_developer/pose_2_extract_pose_or_heatmat_from_image.cpp @@ -22,9 +22,9 @@ #include #include -// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help` +// See all the available parameter options withe the `--help` flag. E.g., `build/examples/openpose/openpose.bin --help` // Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose -// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. +// executable. E.g., for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. // Debugging/Other DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while" " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for" @@ -32,15 +32,15 @@ DEFINE_int32(logging_level, 3, "The logging level. Inte // Producer DEFINE_string(image_path, "examples/media/COCO_val2014_000000000192.jpg", "Process the desired image."); // OpenPose -DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), " +DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g., `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), " "`MPI_4_layers` (15 keypoints, even faster but less accurate)."); DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located."); DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is" " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the" " closest aspect ratio possible to the images or videos to be processed. Using `-1` in" " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's" - " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions," - " e.g. full HD (1980x1080) and HD (1280x720) resolutions."); + " input value. E.g., the default `-1x368` is equivalent to `656x368` in 16:9 resolutions," + " e.g., full HD (1980x1080) and HD (1280x720) resolutions."); DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the" " input image resolution."); DEFINE_int32(num_gpu_start, 0, "GPU device start number."); diff --git a/examples/tutorial_developer/thread_1_openpose_read_and_display.cpp b/examples/tutorial_developer/thread_1_openpose_read_and_display.cpp index 16b236049c7791013f7b9e115077c352c4a4566b..86c634ba8bffb5cdb2114e35df230dd5544bc063 100644 --- a/examples/tutorial_developer/thread_1_openpose_read_and_display.cpp +++ b/examples/tutorial_developer/thread_1_openpose_read_and_display.cpp @@ -21,9 +21,9 @@ #include #include -// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help` +// See all the available parameter options withe the `--help` flag. E.g., `build/examples/openpose/openpose.bin --help` // Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose -// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. +// executable. E.g., for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. // Debugging/Other DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while" " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for" @@ -45,7 +45,7 @@ DEFINE_int32(flir_camera_index, -1, "Select -1 (default) to " camera index to run, where 0 corresponds to the detected flir camera with the lowest" " serial number, and `n` to the `n`-th lowest serial number camera."); DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP."); -DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is" +DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g., for video). If the processing time is" " too long, it will skip frames. If it is too fast, it will slow it down."); DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located."); DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the" @@ -104,7 +104,7 @@ int tutorialDeveloperThread1() typedef std::shared_ptr TypedefDatums; op::ThreadManager threadManager; // Step 5 - Initializing the worker classes - // Frames producer (e.g. video, webcam, ...) + // Frames producer (e.g., video, webcam, ...) auto DatumProducer = std::make_shared>(producerSharedPtr); auto wDatumProducer = std::make_shared>(DatumProducer); // GUI (Display) diff --git a/examples/tutorial_developer/thread_2_user_processing_function.cpp b/examples/tutorial_developer/thread_2_user_processing_function.cpp index c2fe50eaacb94eee80a6e15d02c870341f489717..6e62644d7be57baa26a9b924020c116302574e56 100644 --- a/examples/tutorial_developer/thread_2_user_processing_function.cpp +++ b/examples/tutorial_developer/thread_2_user_processing_function.cpp @@ -22,9 +22,9 @@ #include #include -// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help` +// See all the available parameter options withe the `--help` flag. E.g., `build/examples/openpose/openpose.bin --help` // Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose -// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. +// executable. E.g., for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. // Debugging/Other DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while" " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for" @@ -46,7 +46,7 @@ DEFINE_int32(flir_camera_index, -1, "Select -1 (default) to " camera index to run, where 0 corresponds to the detected flir camera with the lowest" " serial number, and `n` to the `n`-th lowest serial number camera."); DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP."); -DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is" +DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g., for video). If the processing time is" " too long, it will skip frames. If it is too fast, it will slow it down."); DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located."); DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the" @@ -138,7 +138,7 @@ int tutorialDeveloperThread2() typedef std::shared_ptr TypedefDatums; op::ThreadManager threadManager; // Step 5 - Initializing the worker classes - // Frames producer (e.g. video, webcam, ...) + // Frames producer (e.g., video, webcam, ...) auto DatumProducer = std::make_shared>(producerSharedPtr); auto wDatumProducer = std::make_shared>(DatumProducer); // Specific WUserClass diff --git a/examples/tutorial_developer/thread_3_user_input_processing_and_output.cpp b/examples/tutorial_developer/thread_3_user_input_processing_and_output.cpp index d576898775d559dbd634aa67541e272af99a329b..9137ba674325b64db6cf49bbccb9a07d2d000a39 100644 --- a/examples/tutorial_developer/thread_3_user_input_processing_and_output.cpp +++ b/examples/tutorial_developer/thread_3_user_input_processing_and_output.cpp @@ -27,9 +27,9 @@ // #include // #include -// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help` +// See all the available parameter options withe the `--help` flag. E.g., `build/examples/openpose/openpose.bin --help` // Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose -// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. +// executable. E.g., for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. // Debugging/Other DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while" " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for" @@ -188,7 +188,7 @@ int openPoseTutorialThread3() typedef std::shared_ptr> TypedefWorker; op::ThreadManager threadManager; // Step 3 - Initializing the worker classes - // Frames producer (e.g. video, webcam, ...) + // Frames producer (e.g., video, webcam, ...) TypedefWorker wUserInput = std::make_shared(FLAGS_image_dir); // Processing TypedefWorker wUserProcessing = std::make_shared(); diff --git a/examples/tutorial_developer/thread_4_user_input_processing_output_and_datum.cpp b/examples/tutorial_developer/thread_4_user_input_processing_output_and_datum.cpp index 71ba2e200fdf13922db15bf6d3700059fc00610d..2e337ced58bcb3c159efdb92e3bab4e0b0715608 100644 --- a/examples/tutorial_developer/thread_4_user_input_processing_output_and_datum.cpp +++ b/examples/tutorial_developer/thread_4_user_input_processing_output_and_datum.cpp @@ -27,9 +27,9 @@ // #include // #include -// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help` +// See all the available parameter options withe the `--help` flag. E.g., `build/examples/openpose/openpose.bin --help` // Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose -// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. +// executable. E.g., for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. // Debugging/Other DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while" " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for" @@ -201,7 +201,7 @@ int openPoseTutorialThread4() typedef std::shared_ptr> TypedefWorker; op::ThreadManager threadManager; // Step 3 - Initializing the worker classes - // Frames producer (e.g. video, webcam, ...) + // Frames producer (e.g., video, webcam, ...) TypedefWorker wUserInput = std::make_shared(FLAGS_image_dir); // Processing TypedefWorker wUserProcessing = std::make_shared(); diff --git a/include/openpose/core/array.hpp b/include/openpose/core/array.hpp index 4cb0e97053aead2a90b62898bd294a4faf88e285..25c623a1388b4bc25fd22eb477d1a9ea9051d30f 100644 --- a/include/openpose/core/array.hpp +++ b/include/openpose/core/array.hpp @@ -407,7 +407,7 @@ namespace op /** * It returns a string with the whole array data. Useful for debugging. - * The format is: values separated by a space, and a enter for each dimension. E.g.: + * The format is: values separated by a space, and a enter for each dimension. E.g., * For the Array{2, 2, 3}, it will print: * Array::toString(): * x1 x2 x3 diff --git a/include/openpose/core/datum.hpp b/include/openpose/core/datum.hpp index d1bc3622798af26f5674d51f87c271d38245529e..e989484a92d922e2b7e1067d6c9ec81ecf55451d 100644 --- a/include/openpose/core/datum.hpp +++ b/include/openpose/core/datum.hpp @@ -25,7 +25,7 @@ namespace op unsigned long long subIdMax; /**< Datum maximum sub-ID. Used to sort the Datums if multi-threading is used. */ /** - * Name used when saving the data to disk (e.g. `write_images` or `write_keypoint` flags in the demo). + * Name used when saving the data to disk (e.g., `write_images` or `write_keypoint` flags in the demo). */ std::string name; @@ -57,14 +57,14 @@ namespace op /** * Rendered image in Array format. * It consists of a blending of the cvInputData and the pose/body part(s) heatmap/PAF(s). - * If rendering is disabled (e.g. `no_render_pose` flag in the demo), outputData will be empty. + * If rendering is disabled (e.g., `no_render_pose` flag in the demo), outputData will be empty. * Size: 3 x output_net_height x output_net_width */ Array outputData; /** * Rendered image in cv::Mat uchar format. - * It has been resized to the desired output resolution (e.g. `resolution` flag in the demo). + * It has been resized to the desired output resolution (e.g., `resolution` flag in the demo). * If outputData is empty, cvOutputData will also be empty. * Size: (output_height x output_width) x 3 channels */ @@ -73,8 +73,8 @@ namespace op // ------------------------------ Resulting Array data parameters ------------------------------ // /** * Body pose (x,y,score) locations for each person in the image. - * It has been resized to the desired output resolution (e.g. `resolution` flag in the demo). - * Size: #people x #body parts (e.g. 18 for COCO or 15 for MPI) x 3 ((x,y) coordinates + score) + * It has been resized to the desired output resolution (e.g., `resolution` flag in the demo). + * Size: #people x #body parts (e.g., 18 for COCO or 15 for MPI) x 3 ((x,y) coordinates + score) */ Array poseKeypoints; @@ -91,7 +91,7 @@ namespace op * Body pose global confidence/score for each person in the image. * It does not only consider the score of each body keypoint, but also the score of each PAF association. * Optimized for COCO evaluation metric. - * It will highly penalyze people with missing body parts (e.g. cropped people on the borders of the image). + * It will highly penalyze people with missing body parts (e.g., cropped people on the borders of the image). * If poseKeypoints is empty, poseScores will also be empty. * Size: #people */ @@ -168,7 +168,7 @@ namespace op // ---------------------------------------- 3-D Reconstruction parameters ---------------------------------------- // /** * Body pose (x,y,z,score) locations for each person in the image. - * Size: #people x #body parts (e.g. 18 for COCO or 15 for MPI) x 4 ((x,y,z) coordinates + score) + * Size: #people x #body parts (e.g., 18 for COCO or 15 for MPI) x 4 ((x,y,z) coordinates + score) */ Array poseKeypoints3D; diff --git a/include/openpose/face/faceExtractorCaffe.hpp b/include/openpose/face/faceExtractorCaffe.hpp index 3f43740b6b16c00881783d58fbd71c491815df80..9948b1a14a598c33e92beb5df6c32c7e603fba41 100644 --- a/include/openpose/face/faceExtractorCaffe.hpp +++ b/include/openpose/face/faceExtractorCaffe.hpp @@ -38,7 +38,7 @@ namespace op * @param faceRectangles location of the faces in the image. It is a length-variable std::vector, where * each index corresponds to a different person in the image. Internally, a op::Rectangle * (similar to cv::Rect for floating values) with the position of that face (or 0,0,0,0 if - * some face is missing, e.g. if a specific person has only half of the body inside the image). + * some face is missing, e.g., if a specific person has only half of the body inside the image). * @param cvInputData Original image in cv::Mat format and BGR format. */ void forwardPass(const std::vector>& faceRectangles, const cv::Mat& cvInputData); diff --git a/include/openpose/face/faceExtractorNet.hpp b/include/openpose/face/faceExtractorNet.hpp index a717fdba1398ac05ac74e517b534c94a1cc402b4..7376aaa7d062fa565290a3f8df5d9ab1c785fa58 100644 --- a/include/openpose/face/faceExtractorNet.hpp +++ b/include/openpose/face/faceExtractorNet.hpp @@ -40,7 +40,7 @@ namespace op * @param faceRectangles location of the faces in the image. It is a length-variable std::vector, where * each index corresponds to a different person in the image. Internally, a op::Rectangle * (similar to cv::Rect for floating values) with the position of that face (or 0,0,0,0 if - * some face is missing, e.g. if a specific person has only half of the body inside the image). + * some face is missing, e.g., if a specific person has only half of the body inside the image). * @param cvInputData Original image in cv::Mat format and BGR format. */ virtual void forwardPass(const std::vector>& faceRectangles, const cv::Mat& cvInputData) = 0; diff --git a/include/openpose/filestream/cocoJsonSaver.hpp b/include/openpose/filestream/cocoJsonSaver.hpp index 0fb5e6a6ee1bb075b06b1142009744c2a437eb5a..2a7494128492c4d8db08af4e7bdeb754b4b2ded2 100644 --- a/include/openpose/filestream/cocoJsonSaver.hpp +++ b/include/openpose/filestream/cocoJsonSaver.hpp @@ -21,7 +21,8 @@ namespace op * will be saved. */ explicit CocoJsonSaver(const std::string& filePathToSave, const bool humanReadable = true, - const CocoJsonFormat cocoJsonFormat = CocoJsonFormat::Body); + const CocoJsonFormat cocoJsonFormat = CocoJsonFormat::Body, + const int mCocoJsonVariant = 0); virtual ~CocoJsonSaver(); @@ -29,6 +30,7 @@ namespace op private: const CocoJsonFormat mCocoJsonFormat; + const int mCocoJsonVariant; JsonOfstream mJsonOfstream; bool mFirstElementAdded; diff --git a/include/openpose/filestream/enumClasses.hpp b/include/openpose/filestream/enumClasses.hpp index d9315d0689031646b8d054aceaf79df1df0f57ca..878a27e259ee01a91a9b3e919b67419a384bfd29 100644 --- a/include/openpose/filestream/enumClasses.hpp +++ b/include/openpose/filestream/enumClasses.hpp @@ -10,6 +10,7 @@ namespace op Yaml, Yml, }; + enum class CocoJsonFormat : unsigned char { Body, diff --git a/include/openpose/flags.hpp b/include/openpose/flags.hpp index e70a94f26ba2deeaf5db3f1d35ed3ac50244a635..13e2db000c6ca6b3ee968dfdf7d9a9da35da530c 100644 --- a/include/openpose/flags.hpp +++ b/include/openpose/flags.hpp @@ -12,15 +12,15 @@ namespace gflags = google; #endif -// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help` +// See all the available parameter options withe the `--help` flag. E.g., `build/examples/openpose/openpose.bin --help` // Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose -// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. +// executable. E.g., for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`. // Debugging/Other DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while" " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for" " low priority messages and 4 for important ones."); DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful" - " for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with" + " for 1) Cases where it is needed a low latency (e.g., webcam in real-time scenarios with" " low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the" " error."); DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some" @@ -49,12 +49,12 @@ DEFINE_string(ip_camera, "", "String with the IP came DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0."); DEFINE_uint64(frame_step, 1, "Step or gap between processed frames. E.g., `--frame_step 5` would read and process frames" " 0, 5, 10, etc.."); -DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to" +DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g., if set to" " 10, it will process 11 frames (0-10)."); -DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g. for real time webcam demonstrations)."); +DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g., for real time webcam demonstrations)."); DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270."); DEFINE_bool(frames_repeat, false, "Repeat frames when finished."); -DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is" +DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g., for video). If the processing time is" " too long, it will skip frames. If it is too fast, it will slow it down."); DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located."); DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the" @@ -85,14 +85,14 @@ DEFINE_int32(number_people_max, -1, "This parameter will lim // OpenPose Body Pose DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face" " keypoint detection."); -DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), " +DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g., `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), " "`MPI_4_layers` (15 keypoints, even faster but less accurate)."); DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is" " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the" " closest aspect ratio possible to the images or videos to be processed. Using `-1` in" " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's" - " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions," - " e.g. full HD (1980x1080) and HD (1280x720) resolutions."); + " input value. E.g., the default `-1x368` is equivalent to `656x368` in 16:9 resolutions," + " e.g., full HD (1980x1080) and HD (1280x720) resolutions."); DEFINE_int32(scale_number, 1, "Number of scales to average."); DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1." " If you want to change the initial scale, you actually want to multiply the" @@ -134,7 +134,7 @@ DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squ DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results" " were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4."); DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range" - " between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if" + " between smallest and biggest scale. The scales will be centered in ratio 1. E.g., if" " scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2."); DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate" " is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it" @@ -173,7 +173,7 @@ DEFINE_double(render_threshold, 0.05, "Only estimated keypoint " while small thresholds (~0.1) will also output guessed and occluded keypoints, but also" " more false positives (i.e. wrong detections)."); DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering" - " (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if" + " (slower but greater functionality, e.g., `alpha_X` flags). If -1, it will pick CPU if" " CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render" " both `outputData` and `cvOutputData` with the original image and desired body part to be" " shown (i.e. keypoints, heat maps or PAFs)."); @@ -196,7 +196,7 @@ DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_hea #ifndef OPENPOSE_FLAGS_DISABLE_DISPLAY // Display DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle)."); -DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g. number of current frame and people). It" +DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g., number of current frame and people). It" " does not affect the pose rendering."); DEFINE_int32(display, -1, "Display mode: -1 for automatic selection; 0 for no display (useful if there is no X server" " and/or to slightly speed up the processing if visual output is not required); 2 for 2-D" @@ -204,7 +204,7 @@ DEFINE_int32(display, -1, "Display mode: -1 for au #endif // OPENPOSE_FLAGS_DISABLE_DISPLAY // Result Saving DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format."); -DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV" +DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g., png, jpg or bmp. Check the OpenCV" " function cv::imwrite for all compatible extensions."); DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the" " final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag" @@ -213,6 +213,8 @@ DEFINE_string(write_json, "", "Directory to write Open " keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled)."); DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format."); DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format."); +DEFINE_int32(write_coco_json_variant, 0, "Currently, this option is experimental and only makes effect on car JSON generation. It" + " selects the COCO variant for cocoJsonSaver."); DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag" " must be enabled."); DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`." @@ -223,9 +225,9 @@ DEFINE_string(write_keypoint, "", "(Deprecated, use `write DEFINE_string(write_keypoint_format, "yml", "(Deprecated, use `write_json`) File extension and format for `write_keypoint`: json, xml," " yaml & yml. Json not available for OpenCV < 3.0, use `write_json` instead."); // Result Saving - Extra Algorithms -DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g.: `~/Desktop/adamResult.avi`. Flag `camera_fps`" +DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g., `~/Desktop/adamResult.avi`. Flag `camera_fps`" " controls FPS."); -DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g.: `~/Desktop/mocapResult.bvh`."); +DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g., `~/Desktop/mocapResult.bvh`."); // UDP communication DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`."); DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication."); diff --git a/include/openpose/gui/frameDisplayer.hpp b/include/openpose/gui/frameDisplayer.hpp index d303c3c5229c95b14af7c71344cfadf54ddf638e..7c853c970c30aa6194fdef95c2a471a95c252ee4 100644 --- a/include/openpose/gui/frameDisplayer.hpp +++ b/include/openpose/gui/frameDisplayer.hpp @@ -30,13 +30,13 @@ namespace op void initializationOnThread(); /** - * This function set the new FrameDisplayer::FullScreenMode (e.g. full screen). + * This function set the new FrameDisplayer::FullScreenMode (e.g., full screen). * @param fullScreenMode New FrameDisplayer::FullScreenMode state. */ void setFullScreenMode(const FullScreenMode fullScreenMode); /** - * This function switch between full screen and windowed modes (e.g. when double-click on video players or + * This function switch between full screen and windowed modes (e.g., when double-click on video players or * Ctrt+Enter are presed). */ void switchFullScreenMode(); diff --git a/include/openpose/hand/handExtractorCaffe.hpp b/include/openpose/hand/handExtractorCaffe.hpp index bc7bb1a86b73f5b98cdbbcab0dae32c1806cb12c..1362a37d6719c57ea106d3068374b2899e3acd32 100644 --- a/include/openpose/hand/handExtractorCaffe.hpp +++ b/include/openpose/hand/handExtractorCaffe.hpp @@ -49,7 +49,7 @@ namespace op * each index corresponds to a different person in the image. Internally the std::vector, a std::array of 2 * elements: index 0 and 1 for left and right hand respectively. Inside each array element, a * op::Rectangle (similar to cv::Rect for floating values) with the position of that hand (or 0,0,0,0 if - * some hand is missing, e.g. if a specific person has only half of the body inside the image). + * some hand is missing, e.g., if a specific person has only half of the body inside the image). * @param cvInputData Original image in cv::Mat format and BGR format. */ void forwardPass(const std::vector, 2>> handRectangles, const cv::Mat& cvInputData); diff --git a/include/openpose/hand/handExtractorNet.hpp b/include/openpose/hand/handExtractorNet.hpp index 10ef59c9c05b82ec896150952feca68bed815eed..627911edb44a3364097fa35eec8eb6d54ef1b307 100644 --- a/include/openpose/hand/handExtractorNet.hpp +++ b/include/openpose/hand/handExtractorNet.hpp @@ -45,7 +45,7 @@ namespace op * each index corresponds to a different person in the image. Internally the std::vector, a std::array of 2 * elements: index 0 and 1 for left and right hand respectively. Inside each array element, a * op::Rectangle (similar to cv::Rect for floating values) with the position of that hand (or 0,0,0,0 if - * some hand is missing, e.g. if a specific person has only half of the body inside the image). + * some hand is missing, e.g., if a specific person has only half of the body inside the image). * @param cvInputData Original image in cv::Mat format and BGR format. */ virtual void forwardPass(const std::vector, 2>> handRectangles, diff --git a/include/openpose/producer/producer.hpp b/include/openpose/producer/producer.hpp index 341eec013850698bda44b062353cd4fbf3c53acc..e3926dcf5c8af4ebbb07a4039c1865e92985d3bd 100644 --- a/include/openpose/producer/producer.hpp +++ b/include/openpose/producer/producer.hpp @@ -10,7 +10,7 @@ namespace op { /** * Producer is an abstract class to extract frames from a source (image directory, video file, - * webcam stream, etc.). It has the basic and common functions (e.g. getFrame, release & isOpened). + * webcam stream, etc.). It has the basic and common functions (e.g., getFrame, release & isOpened). */ class OP_API Producer { @@ -59,7 +59,7 @@ namespace op virtual std::vector getCameraIntrinsics() = 0; /** - * This function returns a unique frame name (e.g. the frame number for video, the + * This function returns a unique frame name (e.g., the frame number for video, the * frame counter for webcam, the image name for image directory reader, etc.). * @return std::string with an unique frame name. */ diff --git a/include/openpose/producer/videoCaptureReader.hpp b/include/openpose/producer/videoCaptureReader.hpp index f22518657b1b7acd374fe4ac6e5451d9fdcd48ac..23dbf0dc6068ce687f728e6a96031336d1c30d85 100644 --- a/include/openpose/producer/videoCaptureReader.hpp +++ b/include/openpose/producer/videoCaptureReader.hpp @@ -10,7 +10,7 @@ namespace op { /** * VideoCaptureReader is an abstract class to extract frames from a cv::VideoCapture source (video file, - * webcam stream, etc.). It has the basic and common functions of the cv::VideoCapture class (e.g. get, set, etc.). + * webcam stream, etc.). It has the basic and common functions of the cv::VideoCapture class (e.g., get, set, etc.). */ class OP_API VideoCaptureReader : public Producer { diff --git a/include/openpose/producer/videoReader.hpp b/include/openpose/producer/videoReader.hpp index 35403bff35290d10053951f360cd2c774e4a0b2b..05190b59382ef55623f8380e4838b98e4ac55078 100644 --- a/include/openpose/producer/videoReader.hpp +++ b/include/openpose/producer/videoReader.hpp @@ -8,7 +8,7 @@ namespace op { /** - * VideoReader is a wrapper of the cv::VideoCapture class for video. It allows controlling a video (e.g. extracting + * VideoReader is a wrapper of the cv::VideoCapture class for video. It allows controlling a video (e.g., extracting * frames, setting resolution & fps, etc). */ class OP_API VideoReader : public VideoCaptureReader diff --git a/include/openpose/thread/worker.hpp b/include/openpose/thread/worker.hpp index 10c2506d086de54ead9e8c0d3b26c138273fd6b3..ff3620e0f96c9631e134082e8bce3d06cfeefa52 100644 --- a/include/openpose/thread/worker.hpp +++ b/include/openpose/thread/worker.hpp @@ -27,7 +27,8 @@ namespace op mIsRunning = false; } - // Virtual in case some function needs spetial stopping (e.g. buffers might not stop inmediately and need a few iterations) + // Virtual in case some function needs spetial stopping (e.g., buffers might not stop inmediately and need a + // few iterations) inline virtual void tryStop() { stop(); diff --git a/include/openpose/utilities/fastMath.hpp b/include/openpose/utilities/fastMath.hpp index 9db6dea616647654a157128fe9a0601147a49b44..a09bc7d6b9e649573269c50c2e2865a185785852 100644 --- a/include/openpose/utilities/fastMath.hpp +++ b/include/openpose/utilities/fastMath.hpp @@ -5,7 +5,7 @@ namespace op { // Use op::round/max/min for basic types (int, char, long, float, double, etc). Never with classes! // `std::` alternatives uses 'const T&' instead of 'const T' as argument. - // E.g. std::round is really slow (~300 ms vs ~10 ms when I individually apply it to each element of a whole + // E.g., std::round is really slow (~300 ms vs ~10 ms when I individually apply it to each element of a whole // image array // Round functions diff --git a/include/openpose/wrapper/wrapperAuxiliary.hpp b/include/openpose/wrapper/wrapperAuxiliary.hpp index 848ede2e8be10bf7fc2ba73a7f3f383d7998c437..2042f316ab25f86571379205ec2515bea5d0683f 100644 --- a/include/openpose/wrapper/wrapperAuxiliary.hpp +++ b/include/openpose/wrapper/wrapperAuxiliary.hpp @@ -527,7 +527,7 @@ namespace op poseTriangulation)}; } } - // Itermediate workers (e.g. OpenPose format to cv::Mat, json & frames recorder, ...) + // Itermediate workers (e.g., OpenPose format to cv::Mat, json & frames recorder, ...) postProcessingWs.clear(); // // Person ID identification (when no multi-thread and no dependency on tracking) // if (wrapperStructExtra.identification) @@ -617,7 +617,8 @@ namespace op wrapperStructOutput.writeCocoJson, humanFormat, (wrapperStructPose.poseModel != PoseModel::CAR_22 && wrapperStructPose.poseModel != PoseModel::CAR_12 - ? CocoJsonFormat::Body : CocoJsonFormat::Car)); + ? CocoJsonFormat::Body : CocoJsonFormat::Car), + wrapperStructOutput.writeCocoJsonVariant); outputWs.emplace_back(std::make_shared>(cocoJsonSaver)); } // Write people foot pose data on disk (COCO validation json format for foot data) @@ -646,10 +647,10 @@ namespace op { if (!oPProducer) error("Video file can only be recorded inside `wrapper/wrapper.hpp` if the producer" - " is one of the default ones (e.g. video, webcam, ...).", + " is one of the default ones (e.g., video, webcam, ...).", __LINE__, __FUNCTION__, __FILE__); if (finalOutputSize.x <= 0 || finalOutputSize.y <= 0) - error("Video can only be recorded if outputSize is fixed (e.g. video, webcam, IP camera)," + error("Video can only be recorded if outputSize is fixed (e.g., video, webcam, IP camera)," "but not for a image directory.", __LINE__, __FUNCTION__, __FILE__); const auto videoSaver = std::make_shared( wrapperStructOutput.writeVideo, CV_FOURCC('M','J','P','G'), originalVideoFps, finalOutputSize @@ -836,7 +837,7 @@ namespace op { if (poseExtractorsWs.size() > 1) log("Multi-threading disabled, only 1 thread running. All GPUs have been disabled but the" - " first one, which is defined by gpuNumberStart (e.g. in the OpenPose demo, it is set" + " first one, which is defined by gpuNumberStart (e.g., in the OpenPose demo, it is set" " with the `--num_gpu_start` flag).", Priority::High); log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__); threadManager.add(threadId, poseExtractorsWs.at(0), queueIn++, queueOut++); diff --git a/include/openpose/wrapper/wrapperStructHand.hpp b/include/openpose/wrapper/wrapperStructHand.hpp index 48505be670922c614cbc321addea25dd2f019e73..8d8064574615c1c7d13d064876e06aaa8c4e509f 100644 --- a/include/openpose/wrapper/wrapperStructHand.hpp +++ b/include/openpose/wrapper/wrapperStructHand.hpp @@ -35,7 +35,7 @@ namespace op int scalesNumber; /** - * Total range between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if + * Total range between smallest and biggest scale. The scales will be centered in ratio 1. E.g., if * scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2. */ float scaleRange; diff --git a/include/openpose/wrapper/wrapperStructInput.hpp b/include/openpose/wrapper/wrapperStructInput.hpp index b112be3b8536d8dd88d3a80491dd7bb1779508c8..fc1737d3ba649657ac15d8bffba9d528f11c938f 100644 --- a/include/openpose/wrapper/wrapperStructInput.hpp +++ b/include/openpose/wrapper/wrapperStructInput.hpp @@ -62,7 +62,7 @@ namespace op int frameRotate; /** - * Whether to re-open the producer if it reaches the end (e.g. video or image directory after the last frame). + * Whether to re-open the producer if it reaches the end (e.g., video or image directory after the last frame). */ bool framesRepeat; diff --git a/include/openpose/wrapper/wrapperStructOutput.hpp b/include/openpose/wrapper/wrapperStructOutput.hpp index f21e0aa50175ecf42a85285cdab263f6dfad4a9a..119cf6919c42c14dfa9a82f41bc17de6445ecedb 100644 --- a/include/openpose/wrapper/wrapperStructOutput.hpp +++ b/include/openpose/wrapper/wrapperStructOutput.hpp @@ -71,6 +71,12 @@ namespace op */ std::string writeCocoFootJson; + /** + * Experimental option (only makes effect on car JSON generation). + * It selects the COCO variant for cocoJsonSaver. + */ + int writeCocoJsonVariant; + /** * Rendered image saving folder. * If it is empty (default), it is disabled. @@ -80,7 +86,7 @@ namespace op /** * Rendered image saving folder format. * Check your OpenCV version documentation for a list of compatible formats. - * E.g. png, jpg, etc. + * E.g., png, jpg, etc. * If writeImages is empty (default), it makes no effect. */ std::string writeImagesFormat; @@ -143,12 +149,12 @@ namespace op const bool fullScreen = false, const std::string& writeKeypoint = "", const DataFormat writeKeypointFormat = DataFormat::Xml, const std::string& writeJson = "", const std::string& writeCocoJson = "", - const std::string& writeCocoFootJson = "", const std::string& writeImages = "", - const std::string& writeImagesFormat = "", const std::string& writeVideo = "", - const double writeVideoFps = 30., const std::string& writeHeatMaps = "", - const std::string& writeHeatMapsFormat = "", const std::string& writeVideoAdam = "", - const std::string& writeBvh = "", const std::string& udpHost = "", - const std::string& udpPort = ""); + const std::string& writeCocoFootJson = "", const int writeCocoJsonVariant = 1, + const std::string& writeImages = "", const std::string& writeImagesFormat = "", + const std::string& writeVideo = "", const double writeVideoFps = 30., + const std::string& writeHeatMaps = "", const std::string& writeHeatMapsFormat = "", + const std::string& writeVideoAdam = "", const std::string& writeBvh = "", + const std::string& udpHost = "", const std::string& udpPort = ""); }; } diff --git a/src/openpose/face/faceDetector.cpp b/src/openpose/face/faceDetector.cpp index 01d981e8699f38acadc9c9cd7ffaff04bb7a2f9f..38d19a2c59109499534ea3e547f22cb8b079f6d9 100644 --- a/src/openpose/face/faceDetector.cpp +++ b/src/openpose/face/faceDetector.cpp @@ -39,7 +39,7 @@ namespace op const auto rEyeScoreAbove = (posePtr[rEye*3+2] > threshold); auto counter = 0; - // Face and neck given (e.g. MPI) + // Face and neck given (e.g., MPI) if (headNose == lEar && lEar == rEar) { if (neckScoreAbove && headNoseScoreAbove) @@ -49,7 +49,7 @@ namespace op faceSize = 1.33f * getDistance(poseKeypoints, personIndex, neck, headNose); } } - // Face as average between different body keypoints (e.g. COCO) + // Face as average between different body keypoints (e.g., COCO) else { // factor * dist(neck, headNose) diff --git a/src/openpose/filestream/cocoJsonSaver.cpp b/src/openpose/filestream/cocoJsonSaver.cpp index f115e3a024e8ce652b9b766b6a3ef45f0f3b0287..e0aabb8e9ef4899f45cb997c2034e3a05f75c64c 100644 --- a/src/openpose/filestream/cocoJsonSaver.cpp +++ b/src/openpose/filestream/cocoJsonSaver.cpp @@ -4,8 +4,9 @@ namespace op { CocoJsonSaver::CocoJsonSaver(const std::string& filePathToSave, const bool humanReadable, - const CocoJsonFormat cocoJsonFormat) : + const CocoJsonFormat cocoJsonFormat, const int cocoJsonVariant) : mCocoJsonFormat{cocoJsonFormat}, + mCocoJsonVariant{cocoJsonVariant}, mJsonOfstream{filePathToSave, humanReadable}, mFirstElementAdded{false} { @@ -94,11 +95,23 @@ namespace op // Car else if (mCocoJsonFormat == CocoJsonFormat::Car) { + // Car12 if (numberBodyParts == 12) indexesInCocoOrder = std::vector{0,1,2,3, 4,5,6,7, 8, 8,9,10,11, 11}; + // Car22 else if (numberBodyParts == 22) - for (auto i = 0 ; i < 22 ; i++) - indexesInCocoOrder.emplace_back(i); + { + // Dataset 1 + if (mCocoJsonVariant == 0) + indexesInCocoOrder = std::vector{0,1,2,3, 6,7, 12,13,14,15, 16,17}; + // Dataset 2 + else if (mCocoJsonVariant == 1) + indexesInCocoOrder = std::vector{0,1,2,3, 6,7, 12,13,14,15, 20,21}; + // Dataset 3 + else if (mCocoJsonVariant == 2) + for (auto i = 0 ; i < 20 ; i++) + indexesInCocoOrder.emplace_back(i); + } } // Sanity check if (indexesInCocoOrder.empty()) diff --git a/src/openpose/net/bodyPartConnectorBase.cpp b/src/openpose/net/bodyPartConnectorBase.cpp index c3d42bb6bcfe20ed95efb1f78298cd06cfca913c..9954d747d88a326ea96f2d98ffacb5d5c29158b9 100644 --- a/src/openpose/net/bodyPartConnectorBase.cpp +++ b/src/openpose/net/bodyPartConnectorBase.cpp @@ -77,7 +77,7 @@ namespace op const auto vectorSize = numberBodyParts+1; const auto peaksOffset = 3*(maxPeaks+1); const auto heatMapOffset = heatMapSize.area(); - // Iterate over it PAF connection, e.g. neck-nose, neck-Lshoulder, etc. + // Iterate over it PAF connection, e.g., neck-nose, neck-Lshoulder, etc. for (auto pairIndex = 0u; pairIndex < numberBodyPartPairs; pairIndex++) { const auto bodyPartA = bodyPartPairs[2*pairIndex]; @@ -87,11 +87,11 @@ namespace op const auto numberPeaksA = intRound(candidateAPtr[0]); const auto numberPeaksB = intRound(candidateBPtr[0]); - // E.g. neck-nose connection. If one of them is empty (e.g. no noses detected) + // E.g., neck-nose connection. If one of them is empty (e.g., no noses detected) // Add the non-empty elements into the peopleVector if (numberPeaksA == 0 || numberPeaksB == 0) { - // E.g. neck-nose connection. If no necks, add all noses + // E.g., neck-nose connection. If no necks, add all noses // Change w.r.t. other if (numberPeaksA == 0) // numberPeaksB == 0 or not { @@ -140,7 +140,7 @@ namespace op } } } - // E.g. neck-nose connection. If no noses, add all necks + // E.g., neck-nose connection. If no noses, add all necks else // if (numberPeaksA != 0 && numberPeaksB == 0) { // Non-MPI @@ -189,7 +189,7 @@ namespace op } } } - // E.g. neck-nose connection. If necks and noses, look for maximums + // E.g., neck-nose connection. If necks and noses, look for maximums else // if (numberPeaksA != 0 && numberPeaksB != 0) { // (score, indexA, indexB). Inverted order for easy std::sort @@ -202,17 +202,17 @@ namespace op + (numberBodyPartsAndBkg + mapIdx[2*pairIndex]) * heatMapOffset; const auto* mapY = heatMapPtr + (numberBodyPartsAndBkg + mapIdx[2*pairIndex+1]) * heatMapOffset; - // E.g. neck-nose connection. For each neck + // E.g., neck-nose connection. For each neck for (auto i = 1; i <= numberPeaksA; i++) { - // E.g. neck-nose connection. For each nose + // E.g., neck-nose connection. For each nose for (auto j = 1; j <= numberPeaksB; j++) { // Initial PAF auto scoreAB = getScoreAB(i, j, candidateAPtr, candidateBPtr, mapX, mapY, heatMapSize, interThreshold, interMinAboveThreshold); - // E.g. neck-nose connection. If possible PAF between neck i, nose j --> add + // E.g., neck-nose connection. If possible PAF between neck i, nose j --> add // parts score + connection score if (scoreAB > 1e-6) allABConnections.emplace_back(std::make_tuple(scoreAB, i, j)); @@ -222,16 +222,16 @@ namespace op else if (!pairScores.empty()) { const auto firstIndex = (int)pairIndex*pairScores.getSize(1)*pairScores.getSize(2); - // E.g. neck-nose connection. For each neck + // E.g., neck-nose connection. For each neck for (auto i = 0; i < numberPeaksA; i++) { const auto iIndex = firstIndex + i*pairScores.getSize(2); - // E.g. neck-nose connection. For each nose + // E.g., neck-nose connection. For each nose for (auto j = 0; j < numberPeaksB; j++) { const auto scoreAB = pairScores[iIndex + j]; - // E.g. neck-nose connection. If possible PAF between neck i, nose j --> add + // E.g., neck-nose connection. If possible PAF between neck i, nose j --> add // parts score + connection score if (scoreAB > 1e-6) // +1 because peaksPtr starts with counter @@ -396,16 +396,16 @@ namespace op const auto numberPeaksA = intRound(candidateAPtr[0]); const auto numberPeaksB = intRound(candidateBPtr[0]); const auto firstIndex = (int)pairIndex*pairScores.getSize(1)*pairScores.getSize(2); - // E.g. neck-nose connection. For each neck + // E.g., neck-nose connection. For each neck for (auto indexA = 0; indexA < numberPeaksA; indexA++) { const auto iIndex = firstIndex + indexA*pairScores.getSize(2); - // E.g. neck-nose connection. For each nose + // E.g., neck-nose connection. For each nose for (auto indexB = 0; indexB < numberPeaksB; indexB++) { const auto scoreAB = pairScores[iIndex + indexB]; - // E.g. neck-nose connection. If possible PAF between neck indexA, nose indexB --> add + // E.g., neck-nose connection. If possible PAF between neck indexA, nose indexB --> add // parts score + connection score if (scoreAB > 1e-6) { diff --git a/src/openpose/net/resizeAndMergeCaffe.cpp b/src/openpose/net/resizeAndMergeCaffe.cpp index 282d8684983ef4c819c68587f27182691ece8569..3ab4f5eafb28cf26db5fd8c698705e51aa09dde9 100644 --- a/src/openpose/net/resizeAndMergeCaffe.cpp +++ b/src/openpose/net/resizeAndMergeCaffe.cpp @@ -78,8 +78,8 @@ namespace op auto topShape = bottomBlob->shape(); topShape[0] = (mergeFirstDimension ? 1 : bottomBlob->shape(0)); // -1 and later +1 to take into account that we are using 0-based index - // E.g. 100x100 image --> 200x200 --> 0-99 to 0-199 --> scale = 199/99 (not 2!) - // E.g. 101x101 image --> 201x201 --> scale = 2 + // E.g., 100x100 image --> 200x200 --> 0-99 to 0-199 --> scale = 199/99 (not 2!) + // E.g., 101x101 image --> 201x201 --> scale = 2 // Test: pixel 0 --> 0, pixel 99 (ex 1) --> 199, pixel 100 (ex 2) --> 200 topShape[2] = intRound((topShape[2]*netFactor - 1.f) * scaleFactor) + 1; topShape[3] = intRound((topShape[3]*netFactor - 1.f) * scaleFactor) + 1; diff --git a/src/openpose/pose/poseExtractorCaffe.cpp b/src/openpose/pose/poseExtractorCaffe.cpp index 03cf98477989db418dd9258db5e718e7d83a2acb..990ca43fe088fd77380de85f41ad040af08e14a9 100644 --- a/src/openpose/pose/poseExtractorCaffe.cpp +++ b/src/openpose/pose/poseExtractorCaffe.cpp @@ -230,7 +230,7 @@ namespace op // Reshape blobs if required // Note: In order to resize to input size to have same results as Matlab, uncomment the commented // lines - // Note: For dynamic sizes (e.g. a folder with images of different aspect ratio) + // Note: For dynamic sizes (e.g., a folder with images of different aspect ratio) if (!vectorsAreEqual(upImpl->mNetInput4DSizes.at(i), inputNetData[i].getSize())) // || !vectorsAreEqual(upImpl->mScaleInputToNetInputs, scaleInputToNetInputs)) { diff --git a/src/openpose/producer/producer.cpp b/src/openpose/producer/producer.cpp index f29536544aa9e6d1dff63dfa85a810cffe9a5b90..06b4e77382feb1896f2a16fc4b274f42850881a2 100644 --- a/src/openpose/producer/producer.cpp +++ b/src/openpose/producer/producer.cpp @@ -57,7 +57,7 @@ namespace op if (isOpened()) { // If ProducerFpsMode::OriginalFps, then force producer to keep the frame rate of the frames producer - // sources (e.g. a video) + // sources (e.g., a video) keepDesiredFrameRate(); // Get frame frames = getRawFrames(); diff --git a/src/openpose/producer/spinnakerWrapper.cpp b/src/openpose/producer/spinnakerWrapper.cpp index 1b84212d83ef8c9edeac4f2f8df287e4ab84972e..b04334cacbb3cd590c70e34c0dc0a72044b5642a 100644 --- a/src/openpose/producer/spinnakerWrapper.cpp +++ b/src/openpose/producer/spinnakerWrapper.cpp @@ -658,7 +658,7 @@ namespace op #ifdef USE_FLIR_CAMERA try { - // Clean previous unclosed builds (e.g. if core dumped in the previous code using the cameras) + // Clean previous unclosed builds (e.g., if core dumped in the previous code using the cameras) release(); upImpl->mInitialized = true; diff --git a/src/openpose/utilities/flagsToOpenPose.cpp b/src/openpose/utilities/flagsToOpenPose.cpp index a71340a72c76535954dde821c39f0605be17e862..ca8ebac25bd455f314bbdabaf6ccfecc801d5142 100644 --- a/src/openpose/utilities/flagsToOpenPose.cpp +++ b/src/openpose/utilities/flagsToOpenPose.cpp @@ -115,7 +115,7 @@ namespace op try { log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__); - // Avoid duplicates (e.g. selecting at the time camera & video) + // Avoid duplicates (e.g., selecting at the time camera & video) if (int(!imageDirectory.empty()) + int(!videoPath.empty()) + int(webcamIndex > 0) + int(flirCamera) + int(!ipCameraPath.empty()) > 1) error("Selected simultaneously" @@ -275,7 +275,7 @@ namespace op { Point point; const auto nRead = sscanf(pointString.c_str(), "%dx%d", &point.x, &point.y); - checkE(nRead, 2, "Invalid resolution format: `" + pointString + "`, it should be e.g. `" + pointExample + checkE(nRead, 2, "Invalid resolution format: `" + pointString + "`, it should be e.g., `" + pointExample + "`.", __LINE__, __FUNCTION__, __FILE__); return point; } diff --git a/src/openpose/wrapper/wrapperAuxiliary.cpp b/src/openpose/wrapper/wrapperAuxiliary.cpp index 3fefac5d3a3524d3b0c88ef306097f07284e47e4..c68c6626a5176e36b3bd5998a6abf97238fe21a9 100644 --- a/src/openpose/wrapper/wrapperAuxiliary.cpp +++ b/src/openpose/wrapper/wrapperAuxiliary.cpp @@ -91,7 +91,7 @@ namespace op // Warnings if (guiEnabled && wrapperStructOutput.guiVerbose && !renderOutput) { - const auto message = "No render is enabled (e.g. `--render_pose 0`), so you might also want to" + const auto message = "No render is enabled (e.g., `--render_pose 0`), so you might also want to" " remove the display (set `--display 0` or `--no_gui_verbose`). If you" " simply want to use OpenPose to record video/images without keypoints, you" " only need to set `--num_gpu 0`." + additionalMessage; diff --git a/src/openpose/wrapper/wrapperStructOutput.cpp b/src/openpose/wrapper/wrapperStructOutput.cpp index 8dad6789b5f3e5558c96088eacdcdf6c7f8a130d..e07fbcf94e9224347d719e863dc11f36766ac8cf 100644 --- a/src/openpose/wrapper/wrapperStructOutput.cpp +++ b/src/openpose/wrapper/wrapperStructOutput.cpp @@ -6,9 +6,9 @@ namespace op const bool fullScreen_, const std::string& writeKeypoint_, const DataFormat writeKeypointFormat_, const std::string& writeJson_, const std::string& writeCocoJson_, const std::string& writeCocoFootJson_, - const std::string& writeImages_, const std::string& writeImagesFormat_, - const std::string& writeVideo_, const double writeVideoFps_, - const std::string& writeHeatMaps_, + const int writeCocoJsonVariant_, const std::string& writeImages_, + const std::string& writeImagesFormat_, const std::string& writeVideo_, + const double writeVideoFps_, const std::string& writeHeatMaps_, const std::string& writeHeatMapsFormat_, const std::string& writeVideoAdam_, const std::string& writeBvh_, const std::string& udpHost_, const std::string& udpPort_) : @@ -20,6 +20,7 @@ namespace op writeJson{writeJson_}, writeCocoJson{writeCocoJson_}, writeCocoFootJson{writeCocoFootJson_}, + writeCocoJsonVariant{writeCocoJsonVariant_}, writeImages{writeImages_}, writeImagesFormat{writeImagesFormat_}, writeVideo{writeVideo_},