Skip to content
This repository has been archived by the owner on Jan 7, 2023. It is now read-only.

When using two cameras (D435), there will be a difference between the color image and the depth image of the camera on one side. #179

Open
lopesnb opened this issue Apr 26, 2022 · 0 comments

Comments

@lopesnb
Copy link

lopesnb commented Apr 26, 2022

When using two cameras (D435), there will be a difference between the color image and the depth image of the camera on one side. What do you think is the cause?
`
#include <librealsense2/rs.hpp> // Include RealSense Cross Platform API
#include "example.hpp" // Include short list of convenience functions for rendering
#include <opencv2/opencv.hpp>
#include
#include
void remove_background(rs2::video_frame& other, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist);

void remove_background2(cv::Mat& outP, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist);
float get_depth_scale(rs2::device dev);
rs2_stream find_stream_to_align(const std::vectorrs2::stream_profile& streams);
struct AreaCmp {
AreaCmp(const std::vector& _areas) : areas(&_areas) {}
bool operator()(int a, int b) const { return (areas)[a] > (areas)[b]; }
const std::vector
areas;
};
const int CAMERA_WIDTH = 640;
const int CAMERA_HEIGHT = 480;
double absP(cv::Point p)
{
return sqrt(p.x
p.x + p.y*p.y);
}
int main(int argc, char * argv[]) try
{
// Create a simple OpenGL window for rendering:
window app(CAMERA_WIDTH, CAMERA_HEIGHT, "CPP Multi-Camera Example");

rs2::context                          ctx;        // Create librealsense context for managing devices

std::map<std::string, rs2::colorizer> colorizers; // Declare map from device serial number to colorizer (utility class to convert depth data RGB colorspace)
std::map<std::string, std::string> windowName; // Declare map from device serial number to colorizer (utility class to convert depth data RGB colorspace)
std::map<std::string, rs2::align> pipealigns; // Declare map from device serial number to colorizer (utility class to convert depth data RGB colorspace)

std::vector<rs2::pipeline>            pipelines;

// Capture serial numbers before opening streaming
std::vector<std::string>              serials;
for (auto&& dev : ctx.query_devices())
    serials.push_back(dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER));

// Start a streaming pipe per each connected device
float depth_scale;
rs2_stream align_to = RS2_STREAM_ANY;
rs2::align aligin =rs2::align(align_to);
for (auto&& serial : serials)
{
    rs2::pipeline pipe(ctx);
    rs2::config cfg;
    cfg.enable_device(serial);
	cfg.enable_stream(RS2_STREAM_COLOR, CAMERA_WIDTH, CAMERA_HEIGHT);
	cfg.enable_stream(RS2_STREAM_DEPTH, CAMERA_WIDTH, CAMERA_HEIGHT);
	rs2::pipeline_profile profile=pipe.start(cfg);
	//rs2::pipeline_profile profile = pipe.start();
	depth_scale = get_depth_scale(profile.get_device());
	align_to = find_stream_to_align(profile.get_streams());
	aligin=rs2::align(align_to);
	//pipealigns[serial]= rs2::align(align_to);
    pipelines.emplace_back(pipe);
    // Map from each device's serial number to a different colorizer
    colorizers[serial] = rs2::colorizer();
	windowName[serial] = serial.substr(serial.size() - 5, 5);
}

// We'll keep track of the last frame of each stream available to make the presentation persistent
std::map<int, rs2::frame> render_frames;
bool sizeZero = false;
// Main app loop
while (app)
{
    // Collect the new frames from all the connected devices
    std::vector<rs2::frame> new_frames;
    for (auto &&pipe : pipelines)
    {
        rs2::frameset fs;
   //     if (pipe.poll_for_frames(&fs))
        if (pipe.try_wait_for_frames(&fs))
         {

			auto processed = aligin.process(fs);

			// Trying to get both other and aligned depth frames
			rs2::video_frame other_frame = processed.first(align_to);
			rs2::depth_frame aligned_depth_frame = processed.get_depth_frame();

			//If one of them is unavailable, continue iteration
			if (!aligned_depth_frame || !other_frame)
			{
				continue;
			}
			cv::Mat cameraRGB,cameraGray,cameraThresh;

			cv::Mat frame(cv::Size(app.width(), app.height()), CV_8UC3, (void*)other_frame.get_data(), cv::Mat::AUTO_STEP);
			cv::cvtColor(frame, cameraRGB, cv::COLOR_RGB2BGR);
			cv::Mat cameraMat = cameraRGB.clone();

			remove_background2(cameraMat, aligned_depth_frame, depth_scale, 0.6);

			cv::cvtColor(cameraMat, cameraGray, cv::COLOR_BGR2GRAY);
			cv::threshold(cameraGray, cameraThresh, 5, 255, cv::THRESH_BINARY);
			std::vector< std::vector< cv::Point > > contoursArea;
			std::vector< cv::Point >  contoursArea4;
			cv::findContours(cameraThresh, contoursArea, cv::RetrievalModes::RETR_LIST, cv::ContourApproximationModes::CHAIN_APPROX_NONE);
			cv::drawContours(cameraRGB, contoursArea, -1, cv::Scalar(0, 255, 0));

			std::vector<int> sortIdx(contoursArea.size());
			//std::vector<int> sortIdx(0);
			std::vector<float> areas(contoursArea.size());
			
			auto serial = rs2::sensor_from_frame(fs)->get_info(RS2_CAMERA_INFO_SERIAL_NUMBER);
			cv::imshow("color"+windowName[serial], cameraRGB);
			cv::imshow("depth" + windowName[serial], cameraMat);

        }
    }
}
return EXIT_SUCCESS;

}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
void remove_background2(cv::Mat& outP, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());

int width = outP.cols;
int height = outP.rows;
int other_bpp = 3;

#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
for (int y = 0; y < height; y++)
{
auto depth_pixel_index = y * width;
for (int x = 0; x < width; x++, ++depth_pixel_index)
{
// Get the depth value of the current pixel
auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];

		// Check if the depth value is invalid (<=0) or greater than the threashold
		//if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
		if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
		{
			// Calculate the offset in other frame's buffer to current pixel
			auto offset = depth_pixel_index * other_bpp;

			// Set pixel to "background" color (0x999999)
			//std::memset(&p_other_frame[offset], 0x99, other_bpp);
			outP.data[offset] = 0;;
			outP.data[offset + 1] = 0;;
			outP.data[offset + 2] = 0;;
		}
	}
}

}
float get_depth_scale(rs2::device dev)
{
// Go over the device's sensors
for (rs2::sensor& sensor : dev.query_sensors())
{
// Check if the sensor if a depth sensor
if (rs2::depth_sensor dpt = sensor.asrs2::depth_sensor())
{
return dpt.get_depth_scale();
}
}
throw std::runtime_error("Device does not have a depth sensor");
}
rs2_stream find_stream_to_align(const std::vectorrs2::stream_profile& streams)
{
//Given a vector of streams, we try to find a depth stream and another stream to align depth with.
//We prioritize color streams to make the view look better.
//If color is not available, we take another stream that (other than depth)
rs2_stream align_to = RS2_STREAM_ANY;
bool depth_stream_found = false;
bool color_stream_found = false;
for (rs2::stream_profile sp : streams)
{
rs2_stream profile_stream = sp.stream_type();
if (profile_stream != RS2_STREAM_DEPTH)
{
if (!color_stream_found) //Prefer color
align_to = profile_stream;

		if (profile_stream == RS2_STREAM_COLOR)
		{
			color_stream_found = true;
		}
	}
	else
	{
		depth_stream_found = true;
	}
}

if (!depth_stream_found)
	throw std::runtime_error("No Depth stream available");

if (align_to == RS2_STREAM_ANY)
	throw std::runtime_error("No stream found to align with Depth");

return align_to;

}
void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());
uint8_t* p_other_frame = reinterpret_cast<uint8_t*>(const_cast<void*>(other_frame.get_data()));

int width = other_frame.get_width();
int height = other_frame.get_height();
int other_bpp = other_frame.get_bytes_per_pixel();

#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
for (int y = 0; y < height; y++)
{
auto depth_pixel_index = y * width;
for (int x = 0; x < width; x++, ++depth_pixel_index)
{
// Get the depth value of the current pixel
auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];

		// Check if the depth value is invalid (<=0) or greater than the threashold
		//if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
		if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
		{
			// Calculate the offset in other frame's buffer to current pixel
			auto offset = depth_pixel_index * other_bpp;

			// Set pixel to "background" color (0x999999)
			//std::memset(&p_other_frame[offset], 0x99, other_bpp);
			std::memset(&p_other_frame[offset], 0x0, other_bpp);
		}
	}
}

}`

Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant