const uint8_t OB_EXT_CMD3[16] = { 0x47, 0x4d, 0x04, 0x00, 0x02, 0x00, 0x58, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 };
const uint8_t OB_EXT_CMD4[16] = { 0x47, 0x4d, 0x02, 0x00, 0x03, 0x00, 0x60, 0x00, 0xed, 0x03, 0x00, 0x00 };
const uint8_t OB_EXT_CMD5[16] = { 0x47, 0x4d, 0x02, 0x00, 0x03, 0x00, 0x62, 0x00, 0xe9, 0x03, 0x00, 0x00 };
+const uint8_t OB_EXT_CMD6[16] = { 0x47, 0x4d, 0x04, 0x00, 0x02, 0x00, 0x7c, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00};
+const uint8_t OB_EXT_CMD7[16] = { 0x47, 0x4d, 0x04, 0x00, 0x02, 0x00, 0xfe, 0x12, 0x55, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 };
+const uint8_t OB_EXT_CMD8[16] = { 0x47, 0x4d, 0x04, 0x00, 0x02, 0x00, 0xfe, 0x13, 0x3f, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 };
+const uint8_t OB_EXT_CMD9[16] = { 0x47, 0x4d, 0x04, 0x00, 0x02, 0x00, 0xfa, 0x13, 0x4b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 };
+const uint8_t OB_EXT_CMD10[16] = { 0x47, 0x4d, 0x04, 0x00, 0x02, 0x00, 0xfa, 0x13, 0x3f, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00 };
#if defined(HAVE_OBSENSOR_V4L2)
#define fourCc2Int(a, b, c, d) \
{fourCc2Int('Y', 'U', 'Y', '2'), FRAME_FORMAT_YUYV},
{fourCc2Int('M', 'J', 'P', 'G'), FRAME_FORMAT_MJPG},
{fourCc2Int('Y', '1', '6', ' '), FRAME_FORMAT_Y16},
+ {fourCc2Int('Y', '1', '4', ' '), FRAME_FORMAT_Y14},
};
StreamType parseUvcDeviceNameToStreamType(const std::string& devName)
}
}
+
+DepthFrameUnpacker::DepthFrameUnpacker(){
+ outputDataBuf_ = new uint8_t[OUT_DATA_SIZE];
+}
+
+DepthFrameUnpacker::~DepthFrameUnpacker(){
+ delete[] outputDataBuf_;
+}
+
+
+#define ON_BITS(count) ((1 << count) - 1)
+#define CREATE_MASK(count, offset) (ON_BITS(count) << offset)
+#define TAKE_BITS(source, count, offset) ((source & CREATE_MASK(count, offset)) >> offset)
+void DepthFrameUnpacker::process(Frame *frame){
+ const uint8_t tarStep = 16;
+ const uint8_t srcStep = 28;
+ uint16_t *tar = (uint16_t *)outputDataBuf_;
+ uint8_t *src = frame->data;
+
+ uint32_t pixelSize = frame->width * frame->height;
+ for(uint32_t i = 0; i < pixelSize; i += tarStep) {
+ tar[0] = (TAKE_BITS(src[0], 8, 0) << 6) | TAKE_BITS(src[1], 6, 2);
+ tar[1] = (TAKE_BITS(src[1], 2, 0) << 12) | (TAKE_BITS(src[2], 8, 0) << 4) | TAKE_BITS(src[3], 4, 4);
+ tar[2] = (TAKE_BITS(src[3], 4, 0) << 10) | (TAKE_BITS(src[4], 8, 0) << 2) | TAKE_BITS(src[5], 2, 6);
+ tar[3] = (TAKE_BITS(src[5], 6, 0) << 8) | TAKE_BITS(src[6], 8, 0);
+
+ tar[4] = (TAKE_BITS(src[7], 8, 0) << 6) | TAKE_BITS(src[8], 6, 2);
+ tar[5] = (TAKE_BITS(src[8], 2, 0) << 12) | (TAKE_BITS(src[9], 8, 0) << 4) | TAKE_BITS(src[10], 4, 4);
+ tar[6] = (TAKE_BITS(src[10], 4, 0) << 10) | (TAKE_BITS(src[11], 8, 0) << 2) | TAKE_BITS(src[12], 2, 6);
+ tar[7] = (TAKE_BITS(src[12], 6, 0) << 8) | TAKE_BITS(src[13], 8, 0);
+
+ tar[8] = (TAKE_BITS(src[14], 8, 0) << 6) | TAKE_BITS(src[15], 6, 2);
+ tar[9] = (TAKE_BITS(src[15], 2, 0) << 12) | (TAKE_BITS(src[16], 8, 0) << 4) | TAKE_BITS(src[17], 4, 4);
+ tar[10] = (TAKE_BITS(src[17], 4, 0) << 10) | (TAKE_BITS(src[18], 8, 0) << 2) | TAKE_BITS(src[19], 2, 6);
+ tar[11] = (TAKE_BITS(src[19], 6, 0) << 8) | TAKE_BITS(src[20], 8, 0);
+
+ tar[12] = (TAKE_BITS(src[21], 8, 0) << 6) | TAKE_BITS(src[22], 6, 2);
+ tar[13] = (TAKE_BITS(src[22], 2, 0) << 12) | (TAKE_BITS(src[23], 8, 0) << 4) | TAKE_BITS(src[24], 4, 4);
+ tar[14] = (TAKE_BITS(src[24], 4, 0) << 10) | (TAKE_BITS(src[25], 8, 0) << 2) | TAKE_BITS(src[26], 2, 6);
+ tar[15] = (TAKE_BITS(src[26], 6, 0) << 8) | TAKE_BITS(src[27], 8, 0);
+
+ src += srcStep;
+ tar += tarStep;
+ }
+ frame->data = outputDataBuf_;
+ frame->format = FRAME_FORMAT_Y16;
+}
+
IUvcStreamChannel::IUvcStreamChannel(const UvcDeviceInfo& devInfo) :
devInfo_(devInfo),
streamType_(parseUvcDeviceNameToStreamType(devInfo_.name))
return streamType_;
}
+uint16_t IUvcStreamChannel::getPid() const {
+ return devInfo_.pid;
+};
+
bool IUvcStreamChannel::setProperty(int propId, const uint8_t* /*data*/, uint32_t /*dataSize*/)
{
uint8_t* rcvData;
switch (propId)
{
case DEPTH_TO_COLOR_ALIGN:
- // todo: value filling
- rst &= setXu(2, OB_EXT_CMD0, sizeof(OB_EXT_CMD0));
- rst &= getXu(2, &rcvData, &rcvLen);
- rst &= setXu(2, OB_EXT_CMD1, sizeof(OB_EXT_CMD1));
- rst &= getXu(2, &rcvData, &rcvLen);
- rst &= setXu(2, OB_EXT_CMD2, sizeof(OB_EXT_CMD2));
- rst &= getXu(2, &rcvData, &rcvLen);
- rst &= setXu(2, OB_EXT_CMD3, sizeof(OB_EXT_CMD3));
- rst &= getXu(2, &rcvData, &rcvLen);
+ if(OBSENSOR_GEMINI2_PID == devInfo_.pid ){
+ rst &= setXu(2, OB_EXT_CMD8, sizeof(OB_EXT_CMD8));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ rst &= setXu(2, OB_EXT_CMD6, sizeof(OB_EXT_CMD6));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ }
+ else if(OBSENSOR_ASTRA2_PID == devInfo_.pid ){
+ rst &= setXu(2, OB_EXT_CMD10, sizeof(OB_EXT_CMD8));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ rst &= setXu(2, OB_EXT_CMD6, sizeof(OB_EXT_CMD6));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ }
+ else{
+ rst &= setXu(2, OB_EXT_CMD0, sizeof(OB_EXT_CMD0));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ rst &= setXu(2, OB_EXT_CMD1, sizeof(OB_EXT_CMD1));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ rst &= setXu(2, OB_EXT_CMD2, sizeof(OB_EXT_CMD2));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ rst &= setXu(2, OB_EXT_CMD3, sizeof(OB_EXT_CMD3));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ }
break;
default:
rst = false;
switch (propId)
{
case CAMERA_PARAM:
- rst &= setXu(2, OB_EXT_CMD5, sizeof(OB_EXT_CMD5));
- rst &= getXu(2, &rcvData, &rcvLen);
- if (rst && OB_EXT_CMD5[6] == rcvData[6] && rcvData[8] == 0 && rcvData[9] == 0)
- {
- memcpy(recvData, rcvData + 10, rcvLen - 10);
- *recvDataSize = rcvLen - 10;
+ if(OBSENSOR_GEMINI2_PID == devInfo_.pid){
+ // return default param
+ CameraParam param;
+ param.p0[0] = 516.652f;
+ param.p0[1] = 516.692f;
+ param.p0[2] = 322.988f;
+ param.p0[3] = 235.787f;
+ param.p1[0] = 516.652f;
+ param.p1[1] = 516.692f;
+ param.p1[2] = 322.988f;
+ param.p1[3] = 235.787f;
+ param.p6[0] = 640;
+ param.p6[1] = 480;
+ param.p7[0] = 640;
+ param.p7[1] = 480;
+ *recvDataSize = sizeof(CameraParam);
+ memcpy(recvData, ¶m, *recvDataSize);
+ }
+ else if(OBSENSOR_ASTRA2_PID == devInfo_.pid){
+ // return default param
+ CameraParam param;
+ param.p0[0] = 558.151f;
+ param.p0[1] = 558.003f;
+ param.p0[2] = 312.546f;
+ param.p0[3] = 241.169f;
+ param.p1[0] = 558.151f;
+ param.p1[1] = 558.003f;
+ param.p1[2] = 312.546f;
+ param.p1[3] = 241.169f;
+ param.p6[0] = 640;
+ param.p6[1] = 480;
+ param.p7[0] = 640;
+ param.p7[1] = 480;
+ *recvDataSize = sizeof(CameraParam);
+ memcpy(recvData, ¶m, *recvDataSize);
+ }
+ else{
+ rst &= setXu(2, OB_EXT_CMD5, sizeof(OB_EXT_CMD5));
+ rst &= getXu(2, &rcvData, &rcvLen);
+ if (rst && OB_EXT_CMD5[6] == rcvData[6] && rcvData[8] == 0 && rcvData[9] == 0)
+ {
+ memcpy(recvData, rcvData + 10, rcvLen - 10);
+ *recvDataSize = rcvLen - 10;
+ }
}
break;
default:
bool IUvcStreamChannel::initDepthFrameProcessor()
{
- if (streamType_ == OBSENSOR_STREAM_DEPTH && setXu(2, OB_EXT_CMD4, sizeof(OB_EXT_CMD4)))
+ if(OBSENSOR_GEMINI2_PID == devInfo_.pid || OBSENSOR_ASTRA2_PID == devInfo_.pid){
+ uint8_t* rcvData;
+ uint32_t rcvLen;
+
+ setXu(2, OB_EXT_CMD7, sizeof(OB_EXT_CMD7));
+ getXu(2, &rcvData, &rcvLen);
+
+ setXu(2, OB_EXT_CMD9, sizeof(OB_EXT_CMD9));
+ getXu(2, &rcvData, &rcvLen);
+
+ depthFrameProcessor_ = makePtr<DepthFrameUnpacker>();
+ return true;
+ }
+ else if (streamType_ == OBSENSOR_STREAM_DEPTH && setXu(2, OB_EXT_CMD4, sizeof(OB_EXT_CMD4)))
{
uint8_t* rcvData;
uint32_t rcvLen;
{
static const obsensor::StreamProfile colorProfile = { 640, 480, 30, obsensor::FRAME_FORMAT_MJPG };
static const obsensor::StreamProfile depthProfile = {640, 480, 30, obsensor::FRAME_FORMAT_Y16};
+ static const obsensor::StreamProfile gemini2depthProfile = {1280, 800, 30, obsensor::FRAME_FORMAT_Y14};
+ static const obsensor::StreamProfile astra2depthProfile = {640, 480, 30, obsensor::FRAME_FORMAT_Y14};
streamChannelGroup_ = obsensor::getStreamChannelGroup(index);
if (!streamChannelGroup_.empty())
channel->start(colorProfile, [&](obsensor::Frame* frame) {
std::unique_lock<std::mutex> lk(frameMutex_);
colorFrame_ = Mat(1, frame->dataSize, CV_8UC1, frame->data).clone();
+ frameCv_.notify_all();
});
break;
case obsensor::OBSENSOR_STREAM_DEPTH:
{
uint8_t data = 1;
channel->setProperty(obsensor::DEPTH_TO_COLOR_ALIGN, &data, 1);
- channel->start(depthProfile, [&](obsensor::Frame* frame) {
+
+ obsensor::StreamProfile profile = depthProfile;
+ if(OBSENSOR_GEMINI2_PID == channel->getPid()){
+ profile = gemini2depthProfile;
+ }
+ else if(OBSENSOR_ASTRA2_PID == channel->getPid()){
+
+ profile = astra2depthProfile;
+ }
+
+ channel->start(profile, [&](obsensor::Frame* frame) {
std::unique_lock<std::mutex> lk(frameMutex_);
depthFrame_ = Mat(frame->height, frame->width, CV_16UC1, frame->data, frame->width * 2).clone();
+ frameCv_.notify_all();
});
uint32_t len;
}
}
+VideoCapture_obsensor::~VideoCapture_obsensor(){
+ for (auto& channel : streamChannelGroup_)
+ {
+ channel->stop();
+ }
+ streamChannelGroup_.clear();
+}
+
bool VideoCapture_obsensor::grabFrame()
{
std::unique_lock<std::mutex> lk(frameMutex_);
+ // Try waiting for 33 milliseconds to ensure that both depth and color frame have been received!
+ frameCv_.wait_for(lk, std::chrono::milliseconds(33), [&](){ return !depthFrame_.empty() && !colorFrame_.empty(); });
+
grabbedDepthFrame_ = depthFrame_;
grabbedColorFrame_ = colorFrame_;
case CAP_OBSENSOR_DEPTH_MAP:
if (!grabbedDepthFrame_.empty())
{
- grabbedDepthFrame_.copyTo(frame);
+ if(OBSENSOR_GEMINI2_PID == streamChannelGroup_.front()->getPid()){
+ grabbedDepthFrame_ = grabbedDepthFrame_*0.8;
+ Rect rect(320, 160, 640, 480);
+ grabbedDepthFrame_(rect).copyTo(frame);
+ }
+ else if(OBSENSOR_ASTRA2_PID == streamChannelGroup_.front()->getPid()){
+ grabbedDepthFrame_ = grabbedDepthFrame_*0.8;
+ grabbedDepthFrame_.copyTo(frame);
+ }
+ else{
+ grabbedDepthFrame_.copyTo(frame);
+ }
grabbedDepthFrame_.release();
return true;
}