From b343aaa08256d7ee9507710d6830ec0a8d670c5c Mon Sep 17 00:00:00 2001 From: "Eric.L" Date: Fri, 15 Oct 2021 13:35:44 +0800 Subject: [PATCH] add cc.cpp --- Azure_Kinect_Lab/src/main_cc.cpp | 811 +++++++++++++++++++++++++++++++ 1 file changed, 811 insertions(+) create mode 100644 Azure_Kinect_Lab/src/main_cc.cpp diff --git a/Azure_Kinect_Lab/src/main_cc.cpp b/Azure_Kinect_Lab/src/main_cc.cpp new file mode 100644 index 0000000..a525763 --- /dev/null +++ b/Azure_Kinect_Lab/src/main_cc.cpp @@ -0,0 +1,811 @@ +#pragma comment(lib, "k4a.lib") +#include +#include +#include +#include +#include +#include + +using namespace cv; + +#include +#include +#include +#include +#include +#include +using namespace std; +//**************Symbolic Constant Macros (defines) ************* +#define STREAM_RUN_TIME_SEC 4 +#define ERROR_START_STREAM_TIME 10000 +#define K4A_DEPTH_MODE_NFOV_2X2BINNED_EXPECTED_SIZE 184320 // 368640 +#define K4A_DEPTH_MODE_NFOV_UNBINNED_EXPECTED_SIZE 737280 // 1474560 +#define K4A_DEPTH_MODE_WFOV_2X2BINNED_EXPECTED_SIZE 524288 // 1048576 +#define K4A_DEPTH_MODE_WFOV_UNBINNED_EXPECTED_SIZE 2097152 // 4194304 +#define K4A_DEPTH_MODE_PASSIVE_IR_EXPECTED_SIZE 2097152 // 4194304 +#define DEPTH_MODE_EXPECTED_FPS_30 30 +#define DEPTH_MODE_EXPECTED_FPS_15 15 +#define DEPTH_MODE_EXPECTED_FPS_5 5 +#define MAX_BUFFER_SIZE 256 +#define SERIAL_NUMBER_SIZE 12 + +static void clean_up(k4a_device_t device) +{ + if (device != NULL) + { + k4a_device_close(device); + } +} +/********* 获取 2.5D 深度渲染图 ********/ +static bool get_depth_image(cv::Mat depth, cv::Mat depth_show, k4a_calibration_t calibration) +{ + float fx_d = calibration.depth_camera_calibration.intrinsics.parameters.param.fx; + float fy_d = calibration.depth_camera_calibration.intrinsics.parameters.param.fy; + float cx_d = calibration.depth_camera_calibration.intrinsics.parameters.param.cx; + float cy_d = calibration.depth_camera_calibration.intrinsics.parameters.param.cy; + for (int row = 0; row < depth.rows; ++row) + { + for (int col = 0; col < depth.cols; ++col) + { + + CvPoint3D32f v; + float z = depth.at(row, col); + if (z > 0) + { + v.z = (float)(z) / 1000; + v.x = (col - cx_d) * v.z / fx_d; + v.y = (row - cy_d) * v.z / fy_d; + //v.w() = 1; + //printf("%f,%f,%f", v.x,v.y, v.z); + float d_c = 0.75; + if (v.z <= d_c) + { + depth_show.at(row, col) = (unsigned char)(v.z * 256 * 1. / d_c); + } + else + { + depth_show.at(row, col) = (unsigned char)(0); + } + } + else { + depth_show.at(row, col) = (unsigned char)(0); + } + } + } + return true; +} + +/********* 获取 ir 深度渲染图 ********/ +static bool get_ir_image(cv::Mat ir_show, cv::Mat ir_show_8u, float dd_thr) +{ + for (int row = 0; row < ir_show.rows; ++row) + { + for (int col = 0; col < ir_show.cols; ++col) + { + + if (ir_show.at(row, col) > dd_thr) + { + ir_show_8u.at(row, col) = 255; + } + else + { + ir_show_8u.at(row, col) = (unsigned char)(ir_show.at(row, col) / dd_thr * 255); + } + } + } + return true; +} +static bool point_cloud_color_to_depth(k4a_transformation_t transformation_handle, + const k4a_image_t depth_image, + const k4a_image_t color_image, + cv::Mat depth, + cv::Mat color_show, + cv::Mat ir_show, + k4a_calibration_t calibration, + int ir_thr, + uint64_t id +) +{ + int depth_image_width_pixels = k4a_image_get_width_pixels(depth_image); + int depth_image_height_pixels = k4a_image_get_height_pixels(depth_image); + k4a_image_t transformed_color_image = NULL; + if (K4A_RESULT_SUCCEEDED != k4a_image_create(K4A_IMAGE_FORMAT_COLOR_BGRA32, + depth_image_width_pixels, + depth_image_height_pixels, + depth_image_width_pixels * 4 * (int)sizeof(uint8_t), + &transformed_color_image)) + { + printf("Failed to create transformed color image\n"); + return false; + } + else { + printf("create transformed color image \n"); + } + + k4a_image_t point_cloud_image = NULL; + if (K4A_RESULT_SUCCEEDED != k4a_image_create(K4A_IMAGE_FORMAT_CUSTOM, + depth_image_width_pixels, + depth_image_height_pixels, + depth_image_width_pixels * 3 * (int)sizeof(int16_t), + &point_cloud_image)) + { + printf("Failed to create point cloud image\n"); + return false; + } + else { + printf("create point cloud image \n"); + } + + if (K4A_RESULT_SUCCEEDED != k4a_transformation_color_image_to_depth_camera(transformation_handle, + depth_image, + color_image, + transformed_color_image)) + { + printf("Failed to compute transformed color image\n"); + return false; + } + else { + printf("compute transformed color image \n"); + } + + if (K4A_RESULT_SUCCEEDED != k4a_transformation_depth_image_to_point_cloud(transformation_handle, + depth_image, + K4A_CALIBRATION_TYPE_DEPTH, + point_cloud_image)) + { + printf("Failed to compute point cloud\n"); + return false; + } + else { + printf("compute point cloud \n"); + } + + //------------------------- + uint8_t* Ptr_c = (uint8_t*)k4a_image_get_buffer(transformed_color_image); + int _resolution_y = k4a_image_get_height_pixels(depth_image); + int _resolution_x = k4a_image_get_width_pixels(depth_image); + cv::Mat c_show(_resolution_y, _resolution_x, CV_8UC4); + memcpy(c_show.data, Ptr_c, sizeof(uint8_t) * _resolution_x * _resolution_y * 4); + //namedWindow("transformed_color_image", 0); + //imshow("transformed_color_image", c_show); + //----------------------- + float fx_d = calibration.depth_camera_calibration.intrinsics.parameters.param.fx; + float fy_d = calibration.depth_camera_calibration.intrinsics.parameters.param.fy; + float cx_d = calibration.depth_camera_calibration.intrinsics.parameters.param.cx; + float cy_d = calibration.depth_camera_calibration.intrinsics.parameters.param.cy; + float k1 = calibration.depth_camera_calibration.intrinsics.parameters.param.k1; + float k2 = calibration.depth_camera_calibration.intrinsics.parameters.param.k2; + float k3 = calibration.depth_camera_calibration.intrinsics.parameters.param.k3; + float p1 = calibration.depth_camera_calibration.intrinsics.parameters.param.p1; + float p2 = calibration.depth_camera_calibration.intrinsics.parameters.param.p2; + // + float fx_c = calibration.color_camera_calibration.intrinsics.parameters.param.fx; + float fy_c = calibration.color_camera_calibration.intrinsics.parameters.param.fy; + float cx_c = calibration.color_camera_calibration.intrinsics.parameters.param.cx; + float cy_c = calibration.color_camera_calibration.intrinsics.parameters.param.cy; + float k1c = calibration.color_camera_calibration.intrinsics.parameters.param.k1; + float k2c = calibration.color_camera_calibration.intrinsics.parameters.param.k2; + float k3c = calibration.color_camera_calibration.intrinsics.parameters.param.k3; + float p1c = calibration.color_camera_calibration.intrinsics.parameters.param.p1; + float p2c = calibration.color_camera_calibration.intrinsics.parameters.param.p2; + // + + cv::Mat depth_color(_resolution_y, _resolution_x, CV_8UC3); + vectorpts_3d; + pts_3d.clear(); + for (int row = 0; row < depth.rows; ++row) + { + for (int col = 0; col < depth.cols; ++col) + { + CvPoint3D32f v; + float z = depth.at(row, col); + if (z > 0) + { + v.z = (float)(z) / 1000; // 转换为单位米 + v.x = (col - cx_d) * v.z / fx_d; + v.y = (row - cy_d) * v.z / fy_d; + //v.w() = 1; + //printf("%f,%f,%f", v.x,v.y, v.z); + float d_c = 0.6; + if ((v.z <= 0.6)) + { + pts_3d.push_back(v); + if (ir_show.at(row, col) > ir_thr) + { + depth_color.at(row, col)[0] = c_show.at(row, col)[0]; + depth_color.at(row, col)[1] = c_show.at(row, col)[1]; + depth_color.at(row, col)[2] = c_show.at(row, col)[2]; + } + else + { + depth_color.at(row, col)[0] = 0; + depth_color.at(row, col)[1] = 0; + depth_color.at(row, col)[2] = 0; + } + } + else + { + depth_color.at(row, col)[0] = 0; + depth_color.at(row, col)[1] = 0; + depth_color.at(row, col)[2] = 0; + } + } + } + } + + //namedWindow("depth_color", 0); + //imshow("depth_color", depth_color); + char buf[256]; + printf("img_id : %llu", id); + sprintf(buf, "E:/git_quick/azure_kinect_lab/Azure_Kinect_Lab/datasets/%llu.jpg", id); + imwrite(buf, depth_color); + + //使用迭代器访问元素 + printf(" pts 3D size : %d\n", pts_3d.size()); + cv::Mat color_distortion(depth_color.rows, depth_color.cols, CV_8UC3); + resize(color_show, color_distortion, color_distortion.size(), 0, 0, INTER_LINEAR); + vector::iterator it; + // + cv::Mat depth2color(3, 4, CV_32FC1); + depth2color.at(0, 0) = calibration.color_camera_calibration.extrinsics.rotation[0]; + depth2color.at(0, 1) = calibration.color_camera_calibration.extrinsics.rotation[1]; + depth2color.at(0, 2) = calibration.color_camera_calibration.extrinsics.rotation[2]; + depth2color.at(0, 3) = calibration.color_camera_calibration.extrinsics.translation[0] / 1000; + //depth2color(0, 3) = calib.color_camera_calibration.extrinsics.translation[0]; + depth2color.at(1, 0) = calibration.color_camera_calibration.extrinsics.rotation[3]; + depth2color.at(1, 1) = calibration.color_camera_calibration.extrinsics.rotation[4]; + depth2color.at(1, 2) = calibration.color_camera_calibration.extrinsics.rotation[5]; + depth2color.at(1, 3) = calibration.color_camera_calibration.extrinsics.translation[1] / 1000; + //depth2color(1, 3) = calib.color_camera_calibration.extrinsics.translation[1]; + depth2color.at(2, 0) = calibration.color_camera_calibration.extrinsics.rotation[6]; + depth2color.at(2, 1) = calibration.color_camera_calibration.extrinsics.rotation[7]; + depth2color.at(2, 2) = calibration.color_camera_calibration.extrinsics.rotation[8]; + depth2color.at(2, 3) = calibration.color_camera_calibration.extrinsics.translation[2] / 1000; + + + // + for (it = pts_3d.begin(); it != pts_3d.end(); it++) + { + CvPoint3D32f v = *it; + float Xd = v.x; + float Yd = v.y; + float Zd = v.z; + + float x_p = Xd / Zd; + float y_p = Yd / Zd; + // 点云转为深度二维图 + float x_depth = fx_d * x_p + cx_d; + float y_depth = fy_d * y_p + cy_d; + circle(depth_color, Point(x_depth, y_depth), 0, Scalar(88, 255, 88), -1); + //----------------------------------- + + cv::Mat pt_3D(4, 1, CV_32FC1); + pt_3D.at(0, 0) = Xd; + pt_3D.at(1, 0) = Yd; + pt_3D.at(2, 0) = Zd; + pt_3D.at(3, 0) = 1.; + + cv::Mat pt3D_2_Color = depth2color * pt_3D; // Depth Camera 坐标系转为 RGB 坐标系 + // 三维点云做 RGB 相机投影 + float Xdc = pt3D_2_Color.at(0, 0); + float Ydc = pt3D_2_Color.at(1, 0); + float Zdc = pt3D_2_Color.at(2, 0); + // printf("%f, %f , %f", pt3D_2_Color.at(0, 0), pt3D_2_Color.at(1, 0), pt3D_2_Color.at(2, 0)); + + float x_pc = Xdc / Zdc; + float y_pc = Ydc / Zdc; + + //printf("(x,y,z) = (%.3f ,%.3f ,%.3f)\n", v.x, v.y, v.z); + float r2 = x_pc * x_pc + y_pc * y_pc; + float x_2pc = x_pc * (1. + k1c * r2 + k2c * r2 * r2 + k3c * r2 * r2 * r2) + 2. * p1c * x_pc * y_pc + p2c * (r2 + 2. * x_pc * x_pc); + float y_2pc = y_pc * (1. + k1c * r2 + k2c * r2 * r2 + k3c * r2 * r2 * r2) + 2. * p2c * x_pc * y_pc + p1c * (r2 + 2. * y_pc * y_pc); + // + float x_color = fx_c * x_2pc + cx_c; + float y_color = fy_c * y_2pc + cy_c; + circle(color_show, Point(x_color, y_color), 0, Scalar(255, 88, 88), -1); + } + //namedWindow("pt3d_2_depth_color", 0); + //imshow("pt3d_2_depth_color", depth_color); + //namedWindow("color_distortion", 0); + //imshow("color_distortion", color_show); + + //------------------------------------------------- + + k4a_image_release(transformed_color_image); + k4a_image_release(point_cloud_image); + + return true; +} + +void detectSkin(cv::Mat src, cv::Mat& dst, float x0, float y0, float x1, float y1) +{ + static cv::Mat ycrcb_image; + int Cr = 1; + int Cb = 2; + cv::cvtColor(src, ycrcb_image, cv::COLOR_BGR2YCrCb); + for (int i = 0; i < src.rows; i++) { + for (int j = 0; j < src.cols; j++) { + int dx = j + x0; + int dy = i + y0; + if (dx >= dst.cols || dx < 0 + || dy >= dst.rows || dy < 0) { + continue; + } + uchar* data = dst.ptr(dy, dx); + uchar* d_src = src.ptr(i, j); + uchar* p_src = ycrcb_image.ptr(i, j); + if (p_src[Cr] >= 133 && p_src[Cr] <= 173 && p_src[Cb] >= 77 && p_src[Cb] <= 127) { + data[0] = d_src[0]; + data[1] = d_src[1]; + data[2] = d_src[2]; + } + } + } +} +/* + img_rgb : 640*480 原图 + mask : 模型输出 mask ,type : uint8 + mask_output : 输出,type: uint8 + bbox_x,bbox_y,bbox_w,bbox_h : 手机框定位 +*/ +void mask_pro(cv::Mat img_rgb, cv::Mat mask, cv::Mat mask_output, int bbox_x, int bbox_y, int bbox_w, int bbox_h) +{ + // 基于手机区域,限定一定有效手范围 + int h_bbox_x1 = bbox_x - 5; + int h_bbox_y1 = bbox_y - 5; + int h_bbox_x2 = bbox_x + bbox_w; + int h_bbox_y2 = bbox_y + bbox_h * 4 / 3; + + static cv::Mat ycrcb_image; + + int Cr = 1; + int Cb = 2; + cv::cvtColor(img_rgb, ycrcb_image, cv::COLOR_BGR2YCrCb); + cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5)); + resize(mask, mask, cv::Size(img_rgb.cols, img_rgb.rows)); + + //cv::imshow("膨胀图", mask); + for (int i = 0; i < mask.rows; i++) + { + for (int j = 0; j < mask.cols; j++) + { + if (j > h_bbox_x2 || j < h_bbox_x1 + || i > h_bbox_y2 || i < h_bbox_y1) { + mask.at(i, j) = 0; + } + else + { + if (int(mask.at(i, j)) > 100) + mask.at(i, j) = 255; + else + mask.at(i, j) = 0; + } + + if (i == 0 || i == (mask.rows - 1) || j == 0 || j == (mask.cols - 1)) //为了防止找不到外轮廓 + { + mask.at(i, j) = 0; + } + } + } + vector> contours; + vector hierarchy; + // CHAIN_APPROX_NONE CHAIN_APPROX_SIMPLE + findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CHAIN_APPROX_NONE, cv::Point()); + + int idx = 0; + int max_area = 0; + int max_area_idx = -1; + cv::Rect max_rect; + for (; idx >= 0; idx = hierarchy[idx][0]) { + //Scalar color(rand() & 255, rand() & 255, rand() & 255); + if (max_area < contourArea(contours[idx])) + { + max_area = contourArea(contours[idx]); + max_area_idx = idx; + max_rect = cv::boundingRect(contours[idx]); + //printf("max_area : %d\n", max_area); + } + } + // + if (max_area_idx != -1) + { + Scalar color(rand() & 255, rand() & 255, rand() & 255); + drawContours(img_rgb, contours, max_area_idx, color, 3, LINE_8, hierarchy); + // 扩大区域融合肤色 + int x_begin = max_rect.x - (max_rect.width / 10); + int x_end = max_rect.x + max_rect.width + (max_rect.width / 10); + int y_begin = max_rect.y - (max_rect.height / 10); + int y_end = max_rect.y + max_rect.height + (max_rect.height / 10); + cv::rectangle(img_rgb, cv::Rect(x_begin, y_begin, x_end, y_end), cv::Scalar(0, 255, 0), 2, LINE_8); + for (int i = y_begin; i < y_end; i++) + { + for (int j = x_begin; j < x_end; j++) + { + if (j > img_rgb.cols || j < 0 + || i > img_rgb.rows || i < 0) { + continue; + } + + if (j > h_bbox_x2 || j < h_bbox_x1 + || i > h_bbox_y2 || i < h_bbox_y1) { + mask.at(i, j) = 0; + } + else + { + uchar* p_src = ycrcb_image.ptr(i, j); + if (p_src[Cr] >= 133 && p_src[Cr] <= 173 && p_src[Cb] >= 77 && p_src[Cb] <= 127) + { + //printf("crcb\n"); + mask.at(i, j) = 255; + } + } + + if (i == 0 || i == (mask.rows - 1) || j == 0 || j == (mask.cols - 1)) //为了防止找不到外轮廓 + { + mask.at(i, j) = 0; + } + } + } + } + contours.clear(); + hierarchy.clear(); + + idx = 0; + findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CHAIN_APPROX_NONE, cv::Point()); + max_area = 0; + max_area_idx = -1; + for (; idx >= 0; idx = hierarchy[idx][0]) { + if (max_area < contourArea(contours[idx])) + { + max_area = contourArea(contours[idx]); + max_area_idx = idx; + max_rect = cv::boundingRect(contours[idx]); + printf("max_area : %d\n", max_area); + } + } + + if (max_area_idx != -1) + { + Scalar color(rand() & 255, rand() & 255, rand() & 255); + drawContours(mask_output, contours, max_area_idx, 255, -1, LINE_8, hierarchy); + drawContours(img_rgb, contours, max_area_idx, color, 2, LINE_8, hierarchy); + } + + + cv::dilate(mask_output, mask_output, element); + cv::imshow("img_rgb图", img_rgb); + + //printf("mask size: %d,%d,%d", mask.size().width, mask.size().height, mask.channels()); + /*namedWindow("mask", 0); + imshow("mask", mask);*/ + +} + +uint64_t GetCurrentTimerMS(char* szTimer = NULL) +{ + uint64_t nTimer = 0; + SYSTEMTIME currentTime; + GetLocalTime(¤tTime); + + tm temptm = { currentTime.wSecond, + currentTime.wMinute, + currentTime.wHour, + currentTime.wDay, + currentTime.wMonth - 1, + currentTime.wYear - 1900 + }; + nTimer = mktime(&temptm) * 1000 + currentTime.wMilliseconds; + if (szTimer != NULL) + sprintf(szTimer, "%llu", nTimer); + return nTimer; +} +/************* main 函数 ***********/ +int main() +{ + //struct tm* ptr; + /* time_t lt; + lt = time(NULL); + printf("The Calendar Time now is %d\n", lt);*/ + + uint64_t img_id = 0;// 图片帧 ID + //clock_t img_id; + + /* SYSTEMTIME tmp; + GetLocalTime(&tmp);*/ + // + + char szTimer[128]; + GetCurrentTimerMS(szTimer); + printf("millisecond:%s,\t%llu\n\n", szTimer, GetCurrentTimerMS()); //毫秒 + // + + //img_id = tmp.wMilliseconds; + + + printf("Hello World!\n"); + + /***************** opencv 测试 ****************/ + // 读入一张图片 + Mat img = imread("../sample/image.jpg"); + // 创建一个名为 "图片"窗口 + //namedWindow("image",0); + // 在窗口中显示图片 + //imshow("image", img); + //---------------------------------- + /* cv::Mat img_rgb = imread("E:/yc_work_project/git_hand/UNet/onnx_test/inf_rgb.png"); + cv::Mat mask = imread("E:/yc_work_project/git_hand/UNet/onnx_test/inf_mask.png", 0); + static cv::Mat mask_output = cv::Mat::zeros(img_rgb.rows, img_rgb.cols, CV_8UC1); + int bbox_x = 40; + int bbox_y = 0; + int bbox_w = 500; + int bbox_h = 480; + mask_pro(img_rgb, mask, mask_output, bbox_x, bbox_y, bbox_w, bbox_h); + cv::imshow("mask-255图", mask_output); + waitKey(1);*/ + //----------------------------------- + //vector points_3d = { { { 0.f, 0.f, 1000.f } }, // color camera center + // { { -1000.f, -1000.f, 1000.f } }, // color camera top left + // { { 1000.f, -1000.f, 1000.f } }, // color camera top right + // { { 1000.f, 1000.f, 1000.f } }, // color camera bottom right + // { { -1000.f, 1000.f, 1000.f } } }; // color camera bottom left + int timeout_ms; + + uint32_t count = k4a_device_get_installed_count(); + if (count == 0) + { + printf("No k4a devices attached!\n"); + return 1; + } + + // Open the first plugged in Kinect device + k4a_device_t device = NULL; + if (K4A_FAILED(k4a_device_open(K4A_DEVICE_DEFAULT, &device))) + { + printf("Failed to open k4a device!\n"); + return 1; + } + + // Get the size of the serial number + size_t serial_size = 0; + k4a_device_get_serialnum(device, NULL, &serial_size); + + // Allocate memory for the serial, then acquire it + char* serial = (char*)(malloc(serial_size)); + k4a_device_get_serialnum(device, serial, &serial_size); + printf("Opened device: %s\n", serial); + free(serial); + + // Configure a stream of 4096x3072 BRGA color data at 15 frames per second + k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL; + config.camera_fps = K4A_FRAMES_PER_SECOND_15; // 帧率 + config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32; + config.color_resolution = K4A_COLOR_RESOLUTION_720P;// 分辨率 + config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED; + + + //config.depth_mode = K4A_DEPTH_MODE_NFOV_2X2BINNED; + config.synchronized_images_only = true; + // + k4a_transformation_t transformation = NULL; + k4a_calibration_t calibration; + if (K4A_RESULT_SUCCEEDED != + k4a_device_get_calibration(device, config.depth_mode, config.color_resolution, &calibration)) + { + printf("Failed to get calibration\n"); + return 1; + } + transformation = k4a_transformation_create(&calibration); + // Start the camera with the given configuration + if (K4A_FAILED(k4a_device_start_cameras(device, &config))) + { + printf("Failed to start cameras!\n"); + k4a_device_close(device); + return 1; + } + + // 相机内参获取 + //----------------------- + float fx_d = calibration.depth_camera_calibration.intrinsics.parameters.param.fx; + float fy_d = calibration.depth_camera_calibration.intrinsics.parameters.param.fy; + float cx_d = calibration.depth_camera_calibration.intrinsics.parameters.param.cx; + float cy_d = calibration.depth_camera_calibration.intrinsics.parameters.param.cy; + float k1 = calibration.depth_camera_calibration.intrinsics.parameters.param.k1; + float k2 = calibration.depth_camera_calibration.intrinsics.parameters.param.k2; + float k3 = calibration.depth_camera_calibration.intrinsics.parameters.param.k3; + float p1 = calibration.depth_camera_calibration.intrinsics.parameters.param.p1; + float p2 = calibration.depth_camera_calibration.intrinsics.parameters.param.p2; + // + float fx_c = calibration.color_camera_calibration.intrinsics.parameters.param.fx; + float fy_c = calibration.color_camera_calibration.intrinsics.parameters.param.fy; + float cx_c = calibration.color_camera_calibration.intrinsics.parameters.param.cx; + float cy_c = calibration.color_camera_calibration.intrinsics.parameters.param.cy; + float k1c = calibration.color_camera_calibration.intrinsics.parameters.param.k1; + float k2c = calibration.color_camera_calibration.intrinsics.parameters.param.k2; + float k3c = calibration.color_camera_calibration.intrinsics.parameters.param.k3; + float p1c = calibration.color_camera_calibration.intrinsics.parameters.param.p1; + float p2c = calibration.color_camera_calibration.intrinsics.parameters.param.p2; + + // ---------------------- 记录标定参数 + FILE* fpWrite = fopen("E:/git_quick/azure_kinect_lab/Azure_Kinect_Lab/camera_cc.txt", "w+"); + if (fpWrite == NULL) + { + return 0; + } + // 记录 相机标定参数 + fprintf(fpWrite, "%f,%f,%f,%f,%f,%f,%f,%f,%f\n", fx_d, fy_d, cx_d, cy_d, k1, k2, k3, p1, p2); + fprintf(fpWrite, "%f,%f,%f,%f,%f,%f,%f,%f,%f\n", fx_c, fy_c, cx_c, cy_c, k1c, k2c, k3c, p1c, p2c); + fclose(fpWrite); + + //----------------------- 记录外参 + cv::Mat depth2color(3, 4, CV_32FC1); + depth2color.at(0, 0) = calibration.color_camera_calibration.extrinsics.rotation[0]; + depth2color.at(0, 1) = calibration.color_camera_calibration.extrinsics.rotation[1]; + depth2color.at(0, 2) = calibration.color_camera_calibration.extrinsics.rotation[2]; + depth2color.at(0, 3) = calibration.color_camera_calibration.extrinsics.translation[0] / 1000; + //depth2color(0, 3) = calib.color_camera_calibration.extrinsics.translation[0]; + depth2color.at(1, 0) = calibration.color_camera_calibration.extrinsics.rotation[3]; + depth2color.at(1, 1) = calibration.color_camera_calibration.extrinsics.rotation[4]; + depth2color.at(1, 2) = calibration.color_camera_calibration.extrinsics.rotation[5]; + depth2color.at(1, 3) = calibration.color_camera_calibration.extrinsics.translation[1] / 1000; + //depth2color(1, 3) = calib.color_camera_calibration.extrinsics.translation[1]; + depth2color.at(2, 0) = calibration.color_camera_calibration.extrinsics.rotation[6]; + depth2color.at(2, 1) = calibration.color_camera_calibration.extrinsics.rotation[7]; + depth2color.at(2, 2) = calibration.color_camera_calibration.extrinsics.rotation[8]; + depth2color.at(2, 3) = calibration.color_camera_calibration.extrinsics.translation[2] / 1000; + + FILE* fpWriteRT = fopen("E:/git_quick/azure_kinect_lab/Azure_Kinect_Lab/Depth2RGB_RT.txt", "w+"); + if (fpWrite == NULL) + { + return 0; + } + // 记录 相机标定参数 + fprintf(fpWriteRT, "%f,%f,%f,%f\n", + depth2color.at(0, 0), + depth2color.at(0, 1), + depth2color.at(0, 2), + depth2color.at(0, 3)); + fprintf(fpWriteRT, "%f,%f,%f,%f\n", + depth2color.at(1, 0), + depth2color.at(1, 1), + depth2color.at(1, 2), + depth2color.at(1, 3)); + fprintf(fpWriteRT, "%f,%f,%f,%f\n", + depth2color.at(2, 0), + depth2color.at(2, 1), + depth2color.at(2, 2), + depth2color.at(2, 3)); + fclose(fpWriteRT); + // ---------------------- + // Camera capture and application specific code would go here + while (true) { + k4a_capture_t capture; + + + timeout_ms = ERROR_START_STREAM_TIME; + // Access the depth16 image + while (K4A_WAIT_RESULT_SUCCEEDED != k4a_device_get_capture(device, &capture, timeout_ms)) { + printf("get %d\n", k4a_device_get_capture(device, &capture, timeout_ms)); + } + + k4a_image_t depth_image = NULL; + // k4a_image_t color_image = NULL; + k4a_image_t ir_image = NULL; + + depth_image = k4a_capture_get_depth_image(capture);// 深度图 + //color_image = k4a_capture_get_color_image(capture);// RGB彩色图 + ir_image = k4a_capture_get_ir_image(capture);// IR 图 + + //img_id = clock(); + //img_id++; + //GetLocalTime(&tmp); + GetCurrentTimerMS(szTimer); + img_id = GetCurrentTimerMS(); + printf("millisecond:%s,\t%llu\n\n", szTimer, GetCurrentTimerMS()); //毫秒 + if (depth_image != NULL) { + printf(" | Depth16 res:%4dx%4d stride:%5d\n", + k4a_image_get_height_pixels(depth_image), + k4a_image_get_width_pixels(depth_image), + k4a_image_get_stride_bytes(depth_image)); // Release the image + } + + // k4a project function + /* vector k4a_points_2d(points_3d.size()); + for (size_t i = 0; i < points_3d.size(); i++) + { + int valid = 0; + k4a_calibration_3d_to_2d(&calibration, + &points_3d[i], + K4A_CALIBRATION_TYPE_COLOR, + K4A_CALIBRATION_TYPE_DEPTH, + &k4a_points_2d[i], + &valid); + }*/ + + //if (color_image != NULL) { + // printf(" | color res:%4dx%4d stride:%5d\n", + // k4a_image_get_height_pixels(color_image), + // k4a_image_get_width_pixels(color_image), + // k4a_image_get_stride_bytes(color_image)); // Release the image + //} + + if (ir_image != NULL) { + printf(" | IR res:%4dx%4d stride:%5d\n", + k4a_image_get_height_pixels(ir_image), + k4a_image_get_width_pixels(ir_image), + k4a_image_get_stride_bytes(ir_image)); // Release the image + } + + //--------------------------------------------------------------- + //uint8_t* colorPtr = k4a_image_get_buffer(color_image); + uint16_t* depthPtr = (uint16_t*)k4a_image_get_buffer(depth_image); + uint16_t* irPtr = (uint16_t*)k4a_image_get_buffer(ir_image); + int _resolution_y = k4a_image_get_height_pixels(depth_image); + int _resolution_x = k4a_image_get_width_pixels(depth_image); + cv::Mat depth(_resolution_y, _resolution_x, CV_16UC1); + memcpy(depth.data, depthPtr, sizeof(uint16_t) * _resolution_x * _resolution_y); + + cv::Mat depth_show(_resolution_y, _resolution_x, CV_8UC1); + get_depth_image(depth, depth_show, calibration); // 获取 2.5D 深度渲染图 + + namedWindow("depth_render", 0);// 绘制 2.5D 深度渲染图 + imshow("depth_render", depth_show); + + char buf[256]; + sprintf(buf, "E:/git_quick/azure_kinect_lab/Azure_Kinect_Lab/datasets/%llu-depth.png", img_id); + imwrite(buf, depth); + + + /* _resolution_y = k4a_image_get_height_pixels(color_image); + _resolution_x = k4a_image_get_width_pixels(color_image); + cv::Mat color_show(_resolution_y, _resolution_x, CV_8UC4); + memcpy(color_show.data, colorPtr, sizeof(uint8_t) * _resolution_x * _resolution_y * 4);*/ + //namedWindow("color_show", 0); + //imshow("color_show", color_show); + + //sprintf(buf, "E:/git_quick/azure_kinect_lab/Azure_Kinect_Lab/datasets/%llu-color.jpg", img_id); + //imwrite(buf, color_show); + + _resolution_y = k4a_image_get_height_pixels(ir_image); + _resolution_x = k4a_image_get_width_pixels(ir_image); + cv::Mat ir_show(_resolution_y, _resolution_x, CV_16UC1); + cv::Mat ir_show_8u(_resolution_y, _resolution_x, CV_8UC1); + memcpy(ir_show.data, irPtr, sizeof(uint16_t) * _resolution_x * _resolution_y * 1); + //normalize(ir_show, ir_show_8u, 0, 256 * 256, NORM_MINMAX, CV_16UC1); + + get_ir_image(ir_show, ir_show_8u, 9000); + namedWindow("ir_show", 0); + imshow("ir_show", ir_show_8u); + + sprintf(buf, "E:/git_quick/azure_kinect_lab/Azure_Kinect_Lab/datasets/%llu-ir.jpg", img_id); + imwrite(buf, ir_show_8u); + + + int ir_thr = 25; // 用作三维点云分割辅助使用 ir 阈值 + + if (waitKey(1) == 27) { + break; + } + //------------------------------------------- + // 彩色图映射到深度图 + //point_cloud_color_to_depth(transformation, depth_image, color_image, depth, color_show, ir_show_8u, calibration, ir_thr, img_id); + + // releae image + k4a_image_release(depth_image); + //k4a_image_release(color_image); + k4a_image_release(ir_image); + depth.release(); + depth_show.release(); + //color_show.release(); + ir_show.release(); + ir_show_8u.release(); + + // Release the capture + k4a_capture_release(capture); + } + + // Shut down the camera when finished with application logic + k4a_device_stop_cameras(device); + k4a_device_close(device); + + return 0; +} \ No newline at end of file -- GitLab