The first commit

This commit is contained in:
qupengwei
2026-01-04 16:51:58 +08:00
parent 684a923cda
commit 9b2a6bf423
107 changed files with 35063 additions and 0 deletions

View File

@@ -0,0 +1,218 @@
#ifndef SAMPLE_COMMON_ISP_HPP_
#define SAMPLE_COMMON_ISP_HPP_
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <vector>
#include <algorithm>
#include "TyIsp.h"
/**
*The RGB image data output by some cameras is the original Bayer array.
*By calling the API provided by this file, Bayer data can be converted to BGR array.
*You can refer to the sample code: SimpleView_FetchFrame.
*/
static int __TYCompareFirmwareVersion(const TY_DEVICE_BASE_INFO &info, int major, int minor){
const TY_VERSION_INFO &v = info.firmwareVersion;
if (v.major < major){
return -1;
}
if (v.major == major && v.minor < minor){
return -1;
}
if (v.major == major && v.minor == minor){
return 0;
}
return 1;
}
static TY_STATUS __TYDetectOldVer21ColorCam(TY_DEV_HANDLE dev_handle,bool *is_v21_color_device){
TY_DEVICE_BASE_INFO info;
TY_STATUS res = TYGetDeviceInfo(dev_handle, &info);
if (res != TY_STATUS_OK){
LOGI("get device info failed");
return res;
}
*is_v21_color_device = false;
if (info.iface.type == TY_INTERFACE_USB){
*is_v21_color_device = true;
}
if ((info.iface.type == TY_INTERFACE_ETHERNET || info.iface.type == TY_INTERFACE_RAW) &&
__TYCompareFirmwareVersion(info, 2, 2) < 0){
*is_v21_color_device = true;
}
return TY_STATUS_OK;
}
static void __TYParseSizeFromImageMode(TY_IMAGE_MODE mode , int *image_size) {
const int mask = ((0x01 << 12) - 1);
int height = mode & mask;
int width = (mode >> 12) & mask;
image_size[0] = width;
image_size[1] = height;
}
///init color isp setting
///for bayer raw image process
static TY_STATUS ColorIspInitSetting(TY_ISP_HANDLE isp_handle, TY_DEV_HANDLE dev_handle){
bool is_v21_color_device ;
TY_STATUS res = __TYDetectOldVer21ColorCam(dev_handle, &is_v21_color_device);//old version device has different config
if (res != TY_STATUS_OK){
return res;
}
if (is_v21_color_device){
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_BLACK_LEVEL, 11));
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_BLACK_LEVEL_GAIN, 256.f / (256 - 11)));
}
else{
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_BLACK_LEVEL, 0));
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_BLACK_LEVEL_GAIN, 1.f));
bool b;
ASSERT_OK(TYHasFeature(dev_handle, TY_COMPONENT_RGB_CAM, TY_INT_ANALOG_GAIN, &b));
if (b){
TYSetInt(dev_handle, TY_COMPONENT_RGB_CAM, TY_INT_ANALOG_GAIN, 1);
}
}
TYISPSetFeature(isp_handle, TY_ISP_FEATURE_BAYER_PATTERN, TY_ISP_BAYER_AUTO);
float shading[9] = { 0.30890417098999026, 10.63355541229248, -6.433426856994629,
0.24413758516311646, 11.739893913269043, -8.148622512817383,
0.1255662441253662, 11.88359546661377, -7.865192413330078 };
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_SHADING, (uint8_t*)shading, sizeof(shading)));
int shading_center[2] = { 640, 480 };
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_SHADING_CENTER, (uint8_t*)shading_center, sizeof(shading_center)));
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_CCM_ENABLE, 0));//we are not using ccm by default
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_CAM_DEV_HANDLE, (uint8_t*)&dev_handle, sizeof(dev_handle)));
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_CAM_DEV_COMPONENT, int32_t(TY_COMPONENT_RGB_CAM)));
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_GAMMA, 1.f));
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_AUTOBRIGHT, 1));//enable auto bright control
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_ENABLE_AUTO_EXPOSURE_GAIN, 0));//disable ae by default
int default_image_size[2] = { 1280, 960 };// image size
int current_image_size[2] = { 1280, 960 };// image size for current parameters
TY_IMAGE_MODE img_mode;
#if 1
res = TYGetEnum(dev_handle, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE, &img_mode);
if (res == TY_STATUS_OK) {
__TYParseSizeFromImageMode(img_mode, current_image_size);
}
TY_ENUM_ENTRY mode_entry[10];
uint32_t num;
res = TYGetEnumEntryInfo(dev_handle, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE, mode_entry, 10, &num);
if (res == TY_STATUS_OK) {
__TYParseSizeFromImageMode(mode_entry[0].value, default_image_size);
}
#else
//some device may not support WIDTH & HEIGHT feature. image mode is recommended
TYGetInt(dev_handle, TY_COMPONENT_RGB_CAM, TY_INT_WIDTH, &image_size[0]);
TYGetInt(dev_handle, TY_COMPONENT_RGB_CAM, TY_INT_HEIGHT, &image_size[1]);
#endif
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_IMAGE_SIZE, (uint8_t*)&default_image_size, sizeof(default_image_size)));//the orignal raw image size
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_INPUT_RESAMPLE_SCALE, default_image_size[0] / current_image_size[0]));//resampled input
#if 1
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_ENABLE_AUTO_WHITEBALANCE, 1)); //eanble auto white balance
#else
//manual wb gain control
const float wb_rgb_gain[3] = { 2.0123140811920168, 1, 1.481866478919983 };
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_WHITEBALANCE_GAIN, (uint8_t*)wb_rgb_gain, sizeof(wb_rgb_gain)));
#endif
//try to load specifical device config from device storage
TY_COMPONENT_ID comp_all;
ASSERT_OK(TYGetComponentIDs(dev_handle, &comp_all));
if (!(comp_all & TY_COMPONENT_STORAGE)){
return TY_STATUS_OK;
}
bool has_isp_block = false;
ASSERT_OK(TYHasFeature(dev_handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_ISP_BLOCK, &has_isp_block));
if (!has_isp_block){
return TY_STATUS_OK;
}
uint32_t sz = 0;
ASSERT_OK(TYGetByteArraySize(dev_handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_ISP_BLOCK, &sz));
if (sz <= 0){
return TY_STATUS_OK;
}
std::vector<uint8_t> buff(sz);
ASSERT_OK(TYGetByteArray(dev_handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_ISP_BLOCK, &buff[0], buff.size()));
res = TYISPLoadConfig(isp_handle, &buff[0], buff.size());
if (res == TY_STATUS_OK){
LOGD("Load RGB ISP Config From Device");
}
return TY_STATUS_OK;
}
static TY_STATUS ColorIspInitAutoExposure(TY_ISP_HANDLE isp_handle, TY_DEV_HANDLE dev_handle){
bool is_v21_color_device;
TY_STATUS res = __TYDetectOldVer21ColorCam(dev_handle, &is_v21_color_device);//old version device has different config
if (res != TY_STATUS_OK){
return res;
}
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_ENABLE_AUTO_EXPOSURE_GAIN, 1));
// do not enable gain auto control by default
# if 1
int auto_gain_range[2] = { -1, -1 };
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_AUTO_GAIN_RANGE, (uint8_t*)&auto_gain_range, sizeof(auto_gain_range)));
#else
if(is_v21_color_device){
const int old_auto_gain_range[2] = { 33, 255 };
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_AUTO_GAIN_RANGE, (uint8_t*)&old_auto_gain_range, sizeof(old_auto_gain_range)));
}
else{
#define CHECK_GO_FAILED(a) {if((a)!=TY_STATUS_OK) break;}
do{
TY_FEATURE_ID_LIST feature_id = TY_INT_GAIN;
bool val;
CHECK_GO_FAILED(TYHasFeature(dev_handle, TY_COMPONENT_RGB_CAM, TY_INT_GAIN, &val));
if (val) {
feature_id = TY_INT_GAIN;
}
CHECK_GO_FAILED(TYHasFeature(dev_handle, TY_COMPONENT_RGB_CAM, TY_INT_R_GAIN, &val));
if (val) {
feature_id = TY_INT_R_GAIN;
}
int auto_gain_range[2] = { 15, 255 };
TY_INT_RANGE range;
CHECK_GO_FAILED(TYGetIntRange(dev_handle, TY_COMPONENT_RGB_CAM, feature_id, &range));
auto_gain_range[0] = std::min(range.min + 1, range.max);
auto_gain_range[1] = std::max(range.max - 1, range.min);
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_AUTO_GAIN_RANGE, (uint8_t*)&auto_gain_range, sizeof(auto_gain_range)));
} while(0);
#undef CHECK_GO_FAILED
}
#endif
//constraint exposure time
int auto_expo_range[2] = { 10, 100 };
TY_INT_RANGE range;
res = TYGetIntRange(dev_handle, TY_COMPONENT_RGB_CAM, TY_INT_EXPOSURE_TIME, &range);
if (res == TY_STATUS_OK) {
auto_expo_range[0] = std::min(range.min + 1, range.max);
auto_expo_range[1] = std::max(range.max - 1, range.min);
}
ASSERT_OK(TYISPSetFeature(isp_handle, TY_ISP_FEATURE_AUTO_EXPOSURE_RANGE, (uint8_t*)&auto_expo_range, sizeof(auto_expo_range)));
return TY_STATUS_OK;
}
static TY_STATUS ColorIspShowSupportedFeatures(TY_ISP_HANDLE handle){
int sz;
TY_STATUS res = TYISPGetFeatureInfoListSize(handle,&sz);
if (res != TY_STATUS_OK){
return res;
}
std::vector<TY_ISP_FEATURE_INFO> info;
info.resize(sz);
TYISPGetFeatureInfoList(handle, &info[0], info.size());
for (int idx = 0; idx < sz; idx++){
printf("feature name : %-50s type : %s \n", info[idx].name, info[idx].value_type);
}
return TY_STATUS_OK;
}
#endif

View File

@@ -0,0 +1,126 @@
#ifndef CMDLINE_FEATURE_HELPER__H_
#define CMDLINE_FEATURE_HELPER__H_
#include "CommandLineParser.hpp"
#include "TYApi.h"
#include "Utils.hpp"
/// @brief command line sgb feature param id
struct ty_fetaure_options
{
int component_id;
int feature_id;
ty_fetaure_options(int comp_id = 0, int _f_id = 0)
{
component_id = comp_id;
feature_id = _f_id;
}
};
/// @brief command line feature helper for set device feature by command line args
class CommandLineFeatureHelper
{
public:
TyCommandlineParser<ty_fetaure_options> cmd_parser; ///< command line parser
/// @brief add feature param to command line parser
/// @param param command line param name
/// @param comp_id component id , 0 for not a feature setting
/// @param feat_id feature id , 0 for not a feature setting
/// @param val default value
/// @param desc describe
/// @param is_flag is a flag only , no value
void add_feature(const std::string &param, int comp_id, int feat_id, int val, const std::string &desc, bool is_flag = false)
{
cmd_parser.addItem(param, desc, is_flag, std::to_string(val), ty_fetaure_options(comp_id, feat_id));
}
/// @brief add feature param to command line parser
/// @param param command line param name
/// @param comp_id component id , 0 for not a feature setting
/// @param feat_id feature id , 0 for not a feature setting
/// @param val default value
/// @param desc describe
/// @param is_flag is a flag only , no value
void add_feature(const std::string &param, int comp_id, int feat_id, std::string val, const std::string &desc, bool is_flag = false)
{
cmd_parser.addItem(param, desc, is_flag, val, ty_fetaure_options(comp_id, feat_id));
}
/// @brief add feature param to command line parser
/// @param name command line param name
/// @return command line item
const TyCommandlineItem<ty_fetaure_options> *get_feature(const std::string &name) const
{
auto res = cmd_parser.get(name);
return res;
}
/// @brief get command line param describe
/// @return describe string
std::string usage_describe() const
{
return cmd_parser.getUsage();
}
/// @brief parse command line args
void parse_argv(int argc, char *argv[])
{
cmd_parser.parse(argc, argv);
}
/// @brief set command line param to device
/// @param hDevice device handle
void set_device_feature(TY_DEV_HANDLE hDevice)
{
// loop for all command line argv items and set to device
for (auto &kv : cmd_parser.cmd_items)
{
auto &p = kv.second;
int res = TY_STATUS_OK;
if (!p.has_set)
{
continue;
}
int feature_id = p.ctx.feature_id;
int comp_id = p.ctx.component_id;
if (comp_id == 0 && feature_id == 0)
{
// param is not a feature setting
continue;
}
// set feature by type
int type = feature_id & 0xf000;
if (type == TY_FEATURE_INT)
{
int val = p.get_int_val();
LOGD("set feature %s (compId 0x%x featId 0x%x) to %d", p.name.c_str(), comp_id, feature_id, val);
res = TYSetInt(hDevice, comp_id, feature_id, val);
}
else if (type == TY_FEATURE_BOOL)
{
bool val = p.get_bool_val();
LOGD("set feature %s (compId 0x%x featId 0x%x) to %d", p.name.c_str(), comp_id, feature_id, val);
res = TYSetBool(hDevice, comp_id, feature_id, val);
}
else if (type == TY_FEATURE_FLOAT)
{
float val = p.get_float_val();
LOGD("set feature %s (compId 0x%x featId 0x%x) to %f", p.name.c_str(), comp_id, feature_id, val);
res = TYSetFloat(hDevice, comp_id, feature_id, val);
}
else
{
LOGE("unknow feature type %d for %s", type, p.name.c_str());
continue;
}
if (res != TY_STATUS_OK)
{
LOGE("set feature %s (%s) FAILED with return status code %d", p.name.c_str(), p.describe.c_str(), res);
}
}
}
};
#endif // CMDLINE_FEATURE_HELPER__H_

View File

@@ -0,0 +1,173 @@
#ifndef _TYP_COMMAND_LINE_PARSER_HPP
#define _TYP_COMMAND_LINE_PARSER_HPP
#include <string>
#include <vector>
#include <map>
/// @brief command line arg item
/// @tparam T context type
template <class T>
class TyCommandlineItem
{
public:
TyCommandlineItem(const std::string &name = "",
const std::string &describe = "",
bool is_flag = false,
const std::string &default_val = "")
{
this->name = name;
this->describe = describe;
this->default_val = default_val;
this->is_flag = is_flag;
has_set = false;
curr_val = default_val;
}
std::string name, describe; ///< name and describe
std::string default_val; ///< default value
bool is_flag; ///< flag only, no value
T ctx; ///< context
bool has_set; ///< has set by command line
std::string curr_val; ///< current arg value
int get_int_val() const
{
return std::stoi(curr_val);
}
float get_float_val() const
{
return std::stof(curr_val);
}
double get_double_val() const
{
return std::stod(curr_val);
}
std::string get_str_val() const
{
return curr_val;
}
bool get_bool_val() const
{
return curr_val == "true" || curr_val == "1";
}
};
////--------------------
/// @brief command line parser
/// @tparam T context type
template <class T>
class TyCommandlineParser
{
public:
std::map<std::string, TyCommandlineItem<T>> cmd_items; ///< command line items
/// @brief add command line item
/// @param name item name
/// @param describe item describe
/// @param is_flag is flag only
/// @param default_val default value
/// @param ctx context
void addItem(const std::string &name,
const std::string &describe,
bool is_flag = false,
const std::string &default_val = "0",
T ctx = T())
{
TyCommandlineItem<T> item(name, describe, is_flag, default_val);
item.ctx = ctx;
cmd_items.emplace(name, item);
}
/// @brief clear all items
void clear()
{
cmd_items.clear();
}
/// @brief parse command line
/// @param argc arg count
/// @param argv arg list
/// @return 0: success, -1: failed
int parse(int argc, char *argv[])
{
int idx = 1;
while (idx < argc)
{
std::string arg = argv[idx];
if (arg[0] != '-')
{
continue;
}
arg = arg.substr(1);
auto find_res = cmd_items.find(arg);
if (find_res== cmd_items.end()) {
printf("TyCommandlineParser:ignore unknow param: %s\n", arg.c_str());
idx++;
continue;
}
auto& item = find_res->second;
item.has_set = true;
item.curr_val = item.default_val;
if (idx + 1 < argc && !item.is_flag)
{
item.curr_val = argv[idx + 1];
idx++;
}
idx++;
}
return 0;
}
/// @brief get command line item
/// @param name item name
/// @return item
const TyCommandlineItem<T> *get(const std::string &name) const
{
auto find_res = cmd_items.find(name);
if (find_res != cmd_items.end()) {
return &find_res->second;
}
LOGE("ERROR: not find command argv by name %s ", name.c_str());
return nullptr;
}
/// @brief get usage string
/// @return usage string
std::string getUsage() const
{
std::string usage = "Usage: \n";
size_t max_name_len = 1;
for (auto& kv : cmd_items) {
max_name_len = std::max(kv.first.size(), max_name_len);
}
for (auto& kv : cmd_items)
{
const auto &cmd = kv.second;
std::string name = cmd.name;
if (name.size() < max_name_len) {
name.append(max_name_len - name.size(), ' ');
}
usage += " -" + name + " ";
if (!cmd.is_flag)
{
usage += "<value> ";
}
else {
usage += " ";
}
usage += cmd.describe + " \n";
}
return usage;
}
};
#endif // _TYP_COMMAND_LINE_PARSER_HPP

View File

@@ -0,0 +1,647 @@
#include "DepthInpainter.hpp"
#include <stdint.h>
#ifdef OPENCV_DEPENDENCIES
#include <opencv2/opencv.hpp>
#ifndef CV_VERSION_EPOCH
#if defined (CV_MAJOR_VERSION) && (CV_VERSION_MAJOR == 4)
#include <opencv2/imgproc/types_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/photo/legacy/constants_c.h>
#include <opencv2/imgcodecs/legacy/constants_c.h>
#endif
#endif
using namespace cv;
#undef CV_MAT_ELEM_PTR_FAST
#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \
((mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col))
inline float
min4( float a, float b, float c, float d )
{
a = MIN(a,b);
c = MIN(c,d);
return MIN(a,c);
}
#define CV_MAT_3COLOR_ELEM(img,type,y,x,c) CV_MAT_ELEM(img,type,y,(x)*3+(c))
#define KNOWN 0 //known outside narrow band
#define BAND 1 //narrow band (known)
#define INSIDE 2 //unknown
#define CHANGE 3 //servise
typedef struct CvHeapElem
{
float T;
int i,j;
struct CvHeapElem* prev;
struct CvHeapElem* next;
}
CvHeapElem;
class CvPriorityQueueFloat
{
protected:
CvHeapElem *mem,*empty,*head,*tail;
int num,in;
public:
bool Init( const CvMat* f )
{
int i,j;
for( i = num = 0; i < f->rows; i++ )
{
for( j = 0; j < f->cols; j++ )
num += CV_MAT_ELEM(*f,uchar,i,j)!=0;
}
if (num<=0) return false;
mem = (CvHeapElem*)cvAlloc((num+2)*sizeof(CvHeapElem));
if (mem==NULL) return false;
head = mem;
head->i = head->j = -1;
head->prev = NULL;
head->next = mem+1;
head->T = -FLT_MAX;
empty = mem+1;
for (i=1; i<=num; i++) {
mem[i].prev = mem+i-1;
mem[i].next = mem+i+1;
mem[i].i = -1;
mem[i].T = FLT_MAX;
}
tail = mem+i;
tail->i = tail->j = -1;
tail->prev = mem+i-1;
tail->next = NULL;
tail->T = FLT_MAX;
return true;
}
bool Add(const CvMat* f) {
int i,j;
for (i=0; i<f->rows; i++) {
for (j=0; j<f->cols; j++) {
if (CV_MAT_ELEM(*f,uchar,i,j)!=0) {
if (!Push(i,j,0)) return false;
}
}
}
return true;
}
bool Push(int i, int j, float T) {
CvHeapElem *tmp=empty,*add=empty;
if (empty==tail) return false;
while (tmp->prev->T>T) tmp = tmp->prev;
if (tmp!=empty) {
add->prev->next = add->next;
add->next->prev = add->prev;
empty = add->next;
add->prev = tmp->prev;
add->next = tmp;
add->prev->next = add;
add->next->prev = add;
} else {
empty = empty->next;
}
add->i = i;
add->j = j;
add->T = T;
in++;
// printf("push i %3d j %3d T %12.4e in %4d\n",i,j,T,in);
return true;
}
bool Pop(int *i, int *j) {
CvHeapElem *tmp=head->next;
if (empty==tmp) return false;
*i = tmp->i;
*j = tmp->j;
tmp->prev->next = tmp->next;
tmp->next->prev = tmp->prev;
tmp->prev = empty->prev;
tmp->next = empty;
tmp->prev->next = tmp;
tmp->next->prev = tmp;
empty = tmp;
in--;
// printf("pop i %3d j %3d T %12.4e in %4d\n",tmp->i,tmp->j,tmp->T,in);
return true;
}
bool Pop(int *i, int *j, float *T) {
CvHeapElem *tmp=head->next;
if (empty==tmp) return false;
*i = tmp->i;
*j = tmp->j;
*T = tmp->T;
tmp->prev->next = tmp->next;
tmp->next->prev = tmp->prev;
tmp->prev = empty->prev;
tmp->next = empty;
tmp->prev->next = tmp;
tmp->next->prev = tmp;
empty = tmp;
in--;
// printf("pop i %3d j %3d T %12.4e in %4d\n",tmp->i,tmp->j,tmp->T,in);
return true;
}
CvPriorityQueueFloat(void) {
num=in=0;
mem=empty=head=tail=NULL;
}
~CvPriorityQueueFloat(void)
{
cvFree( &mem );
}
};
inline float VectorScalMult(CvPoint2D32f v1,CvPoint2D32f v2) {
return v1.x*v2.x+v1.y*v2.y;
}
inline float VectorLength(CvPoint2D32f v1) {
return v1.x*v1.x+v1.y*v1.y;
}
///////////////////////////////////////////////////////////////////////////////////////////
//HEAP::iterator Heap_Iterator;
//HEAP Heap;
static float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMat* t)
{
double sol, a11, a22, m12;
a11=CV_MAT_ELEM(*t,float,i1,j1);
a22=CV_MAT_ELEM(*t,float,i2,j2);
m12=MIN(a11,a22);
if( CV_MAT_ELEM(*f,uchar,i1,j1) != INSIDE )
if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE )
if( fabs(a11-a22) >= 1.0 )
sol = 1+m12;
else
sol = (a11+a22+sqrt((double)(2-(a11-a22)*(a11-a22))))*0.5;
else
sol = 1+a11;
else if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE )
sol = 1+a22;
else
sol = 1+m12;
return (float)sol;
}
/////////////////////////////////////////////////////////////////////////////////////
static void
icvCalcFMM(const CvMat *f, CvMat *t, CvPriorityQueueFloat *Heap, bool negate) {
int i, j, ii = 0, jj = 0, q;
float dist;
while (Heap->Pop(&ii,&jj)) {
unsigned known=(negate)?CHANGE:KNOWN;
CV_MAT_ELEM(*f,uchar,ii,jj) = (uchar)known;
for (q=0; q<4; q++) {
i=0; j=0;
if (q==0) {i=ii-1; j=jj;}
else if(q==1) {i=ii; j=jj-1;}
else if(q==2) {i=ii+1; j=jj;}
else {i=ii; j=jj+1;}
if ((i<=0)||(j<=0)||(i>f->rows)||(j>f->cols)) continue;
if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
FastMarching_solve(i+1,j,i,j-1,f,t),
FastMarching_solve(i-1,j,i,j+1,f,t),
FastMarching_solve(i+1,j,i,j+1,f,t));
CV_MAT_ELEM(*t,float,i,j) = dist;
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
Heap->Push(i,j,dist);
}
}
}
if (negate) {
for (i=0; i<f->rows; i++) {
for(j=0; j<f->cols; j++) {
if (CV_MAT_ELEM(*f,uchar,i,j) == CHANGE) {
CV_MAT_ELEM(*f,uchar,i,j) = KNOWN;
CV_MAT_ELEM(*t,float,i,j) = -CV_MAT_ELEM(*t,float,i,j);
}
}
}
}
}
static void
icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueueFloat *Heap ) {
int i = 0, j = 0, ii = 0, jj = 0, k, l, q, color = 0;
float dist;
if (CV_MAT_CN(out->type)==1) {
while (Heap->Pop(&ii,&jj)) {
CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN;
for(q=0; q<4; q++) {
if (q==0) {i=ii-1; j=jj;}
else if(q==1) {i=ii; j=jj-1;}
else if(q==2) {i=ii+1; j=jj;}
else if(q==3) {i=ii; j=jj+1;}
if ((i<=1)||(j<=1)||(i>t->rows-1)||(j>t->cols-1)) continue;
if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
FastMarching_solve(i+1,j,i,j-1,f,t),
FastMarching_solve(i-1,j,i,j+1,f,t),
FastMarching_solve(i+1,j,i,j+1,f,t));
CV_MAT_ELEM(*t,float,i,j) = dist;
for (color=0; color<=0; color++) {
CvPoint2D32f gradI,gradT,r;
float Ia=0,Jx=0,Jy=0,s=1.0e-20f,w,dst,lev,dir,sat;
if (CV_MAT_ELEM(*f,uchar,i,j+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) {
gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j-1)))*0.5f;
} else {
gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) {
gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i,j-1)));
} else {
gradT.x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,i+1,j)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) {
gradT.y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i-1,j)))*0.5f;
} else {
gradT.y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i,j)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) {
gradT.y=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i-1,j)));
} else {
gradT.y=0;
}
}
for (k=i-range; k<=i+range; k++) {
int km=k-1+(k==1),kp=k-1-(k==t->rows-2);
for (l=j-range; l<=j+range; l++) {
int lm=l-1+(l==1),lp=l-1-(l==t->cols-2);
if (k>0&&l>0&&k<t->rows-1&&l<t->cols-1) {
if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&&
((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) {
r.y = (float)(i-k);
r.x = (float)(j-l);
dst = (float)(1./(VectorLength(r)*sqrt(VectorLength(r))));
lev = (float)(1./(1+fabs(CV_MAT_ELEM(*t,float,k,l)-CV_MAT_ELEM(*t,float,i,j))));
dir=VectorScalMult(r,gradT);
if (fabs(dir)<=0.01) dir=0.000001f;
w = (float)fabs(dst*lev*dir);
if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
// gradI.x=(float)((CV_MAT_ELEM(*out,uchar,km,lp+1)-CV_MAT_ELEM(*out,uchar,km,lm-1)))*2.0f;
gradI.x=(float)((CV_MAT_ELEM(*out,uint16_t,km,lp+1)-CV_MAT_ELEM(*out,uint16_t,km,lm-1)))*2.0f;
} else {
// gradI.x=(float)((CV_MAT_ELEM(*out,uchar,km,lp+1)-CV_MAT_ELEM(*out,uchar,km,lm)));
gradI.x=(float)((CV_MAT_ELEM(*out,uint16_t,km,lp+1)-CV_MAT_ELEM(*out,uint16_t,km,lm)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
// gradI.x=(float)((CV_MAT_ELEM(*out,uchar,km,lp)-CV_MAT_ELEM(*out,uchar,km,lm-1)));
gradI.x=(float)((CV_MAT_ELEM(*out,uint16_t,km,lp)-CV_MAT_ELEM(*out,uint16_t,km,lm-1)));
} else {
gradI.x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
// gradI.y=(float)((CV_MAT_ELEM(*out,uchar,kp+1,lm)-CV_MAT_ELEM(*out,uchar,km-1,lm)))*2.0f;
gradI.y=(float)((CV_MAT_ELEM(*out,uint16_t,kp+1,lm)-CV_MAT_ELEM(*out,uint16_t,km-1,lm)))*2.0f;
} else {
// gradI.y=(float)((CV_MAT_ELEM(*out,uchar,kp+1,lm)-CV_MAT_ELEM(*out,uchar,km,lm)));
gradI.y=(float)((CV_MAT_ELEM(*out,uint16_t,kp+1,lm)-CV_MAT_ELEM(*out,uint16_t,km,lm)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
// gradI.y=(float)((CV_MAT_ELEM(*out,uchar,kp,lm)-CV_MAT_ELEM(*out,uchar,km-1,lm)));
gradI.y=(float)((CV_MAT_ELEM(*out,uint16_t,kp,lm)-CV_MAT_ELEM(*out,uint16_t,km-1,lm)));
} else {
gradI.y=0;
}
}
// Ia += (float)w * (float)(CV_MAT_ELEM(*out,uchar,km,lm));
Ia += (float)w * (float)(CV_MAT_ELEM(*out,uint16_t,km,lm));
Jx -= (float)w * (float)(gradI.x*r.x);
Jy -= (float)w * (float)(gradI.y*r.y);
s += w;
}
}
}
}
sat = (float)((Ia/s+(Jx+Jy)/(sqrt(Jx*Jx+Jy*Jy)+1.0e-20f)+0.5f));
{
// CV_MAT_ELEM(*out,uchar,i-1,j-1) = cv::saturate_cast<uchar>(sat);
CV_MAT_ELEM(*out,uint16_t,i-1,j-1) = cv::saturate_cast<uint16_t>(sat);
}
}
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
Heap->Push(i,j,dist);
}
}
}
}
}
static void
icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueueFloat *Heap) {
int i = 0, j = 0, ii = 0, jj = 0, k, l, q;
float dist;
if (CV_MAT_CN(out->type)==1) {
while (Heap->Pop(&ii,&jj)) {
CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN;
for(q=0; q<4; q++) {
if (q==0) {i=ii-1; j=jj;}
else if(q==1) {i=ii; j=jj-1;}
else if(q==2) {i=ii+1; j=jj;}
else if(q==3) {i=ii; j=jj+1;}
if ((i<=1)||(j<=1)||(i>t->rows-1)||(j>t->cols-1)) continue;
if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
FastMarching_solve(i+1,j,i,j-1,f,t),
FastMarching_solve(i-1,j,i,j+1,f,t),
FastMarching_solve(i+1,j,i,j+1,f,t));
CV_MAT_ELEM(*t,float,i,j) = dist;
{
CvPoint2D32f gradI,r;
float Ia=0,s=1.0e-20f,w,dst,dir;
for (k=i-range; k<=i+range; k++) {
int km=k-1+(k==1),kp=k-1-(k==t->rows-2);
for (l=j-range; l<=j+range; l++) {
int lm=l-1+(l==1),lp=l-1-(l==t->cols-2);
if (k>0&&l>0&&k<t->rows-1&&l<t->cols-1) {
if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&&
((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) {
r.y=(float)(i-k);
r.x=(float)(j-l);
dst = 1/(VectorLength(r)*VectorLength(r)+1);
if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
// gradI.x=(float)(abs(CV_MAT_ELEM(*out,uchar,kp+1,lm)-CV_MAT_ELEM(*out,uchar,kp,lm))+
// abs(CV_MAT_ELEM(*out,uchar,kp,lm)-CV_MAT_ELEM(*out,uchar,km-1,lm)));
gradI.x=(float)(abs(CV_MAT_ELEM(*out,uint16_t,kp+1,lm)-CV_MAT_ELEM(*out,uint16_t,kp,lm))+
abs(CV_MAT_ELEM(*out,uint16_t,kp,lm)-CV_MAT_ELEM(*out,uint16_t,km-1,lm)));
} else {
// gradI.x=(float)(abs(CV_MAT_ELEM(*out,uchar,kp+1,lm)-CV_MAT_ELEM(*out,uchar,kp,lm)))*2.0f;
gradI.x=(float)(abs(CV_MAT_ELEM(*out,uint16_t,kp+1,lm)-CV_MAT_ELEM(*out,uint16_t,kp,lm)))*2.0f;
}
} else {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
// gradI.x=(float)(abs(CV_MAT_ELEM(*out,uchar,kp,lm)-CV_MAT_ELEM(*out,uchar,km-1,lm)))*2.0f;
gradI.x=(float)(abs(CV_MAT_ELEM(*out,uint16_t,kp,lm)-CV_MAT_ELEM(*out,uint16_t,km-1,lm)))*2.0f;
} else {
gradI.x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
// gradI.y=(float)(abs(CV_MAT_ELEM(*out,uchar,km,lp+1)-CV_MAT_ELEM(*out,uchar,km,lm))+
// abs(CV_MAT_ELEM(*out,uchar,km,lm)-CV_MAT_ELEM(*out,uchar,km,lm-1)));
gradI.y=(float)(abs(CV_MAT_ELEM(*out,uint16_t,km,lp+1)-CV_MAT_ELEM(*out,uint16_t,km,lm))+
abs(CV_MAT_ELEM(*out,uint16_t,km,lm)-CV_MAT_ELEM(*out,uint16_t,km,lm-1)));
} else {
// gradI.y=(float)(abs(CV_MAT_ELEM(*out,uchar,km,lp+1)-CV_MAT_ELEM(*out,uchar,km,lm)))*2.0f;
gradI.y=(float)(abs(CV_MAT_ELEM(*out,uint16_t,km,lp+1)-CV_MAT_ELEM(*out,uint16_t,km,lm)))*2.0f;
}
} else {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
// gradI.y=(float)(abs(CV_MAT_ELEM(*out,uchar,km,lm)-CV_MAT_ELEM(*out,uchar,km,lm-1)))*2.0f;
gradI.y=(float)(abs(CV_MAT_ELEM(*out,uint16_t,km,lm)-CV_MAT_ELEM(*out,uint16_t,km,lm-1)))*2.0f;
} else {
gradI.y=0;
}
}
gradI.x=-gradI.x;
dir=VectorScalMult(r,gradI);
if (fabs(dir)<=0.01) {
dir=0.000001f;
} else {
dir = (float)fabs(VectorScalMult(r,gradI)/sqrt(VectorLength(r)*VectorLength(gradI)));
}
w = dst*dir;
// Ia += (float)w * (float)(CV_MAT_ELEM(*out,uchar,km,lm));
Ia += (float)w * (float)(CV_MAT_ELEM(*out,uint16_t,km,lm));
s += w;
}
}
}
}
// CV_MAT_ELEM(*out,uchar,i-1,j-1) = cv::saturate_cast<uchar>((double)Ia/s);
CV_MAT_ELEM(*out,uint16_t,i-1,j-1) = cv::saturate_cast<uint16_t>((double)Ia/s);
}
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
Heap->Push(i,j,dist);
}
}
}
}
}
#define SET_BORDER1_C1(image,type,value) {\
int i,j;\
for(j=0; j<image->cols; j++) {\
CV_MAT_ELEM(*image,type,0,j) = value;\
}\
for (i=1; i<image->rows-1; i++) {\
CV_MAT_ELEM(*image,type,i,0) = CV_MAT_ELEM(*image,type,i,image->cols-1) = value;\
}\
for(j=0; j<image->cols; j++) {\
CV_MAT_ELEM(*image,type,erows-1,j) = value;\
}\
}
#define COPY_MASK_BORDER1_C1(src,dst,type) {\
int i,j;\
for (i=0; i<src->rows; i++) {\
for(j=0; j<src->cols; j++) {\
if (CV_MAT_ELEM(*src,type,i,j)!=0)\
CV_MAT_ELEM(*dst,type,i+1,j+1) = INSIDE;\
}\
}\
}
void
_cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_img,
double inpaintRange, int flags )
{
cv::Ptr<CvMat> mask, band, f, t, out;
cv::Ptr<CvPriorityQueueFloat> Heap, Out;
IplConvKernel *el_cross, *el_range;
CvMat input_hdr, mask_hdr, output_hdr;
CvMat* input_img, *inpaint_mask, *output_img;
int range=cvRound(inpaintRange);
int erows, ecols;
input_img = cvGetMat( _input_img, &input_hdr );
inpaint_mask = cvGetMat( _inpaint_mask, &mask_hdr );
output_img = cvGetMat( _output_img, &output_hdr );
if( !CV_ARE_SIZES_EQ(input_img,output_img) || !CV_ARE_SIZES_EQ(input_img,inpaint_mask))
CV_Error( CV_StsUnmatchedSizes, "All the input and output images must have the same size" );
if( (CV_MAT_TYPE(input_img->type) != CV_16UC1) ||
!CV_ARE_TYPES_EQ(input_img,output_img) )
CV_Error( CV_StsUnsupportedFormat,
"Only 8-bit 1-channel and 3-channel input/output images are supported" );
if( CV_MAT_TYPE(inpaint_mask->type) != CV_8UC1 )
CV_Error( CV_StsUnsupportedFormat, "The mask must be 8-bit 1-channel image" );
range = MAX(range,1);
range = MIN(range,100);
ecols = input_img->cols + 2;
erows = input_img->rows + 2;
f = cvCreateMat(erows, ecols, CV_8UC1);
t = cvCreateMat(erows, ecols, CV_32FC1);
band = cvCreateMat(erows, ecols, CV_8UC1);
mask = cvCreateMat(erows, ecols, CV_8UC1);
el_cross = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_CROSS,NULL);
cvCopy( input_img, output_img );
cvSet(mask,cvScalar(KNOWN,0,0,0));
COPY_MASK_BORDER1_C1(inpaint_mask,mask,uchar);
SET_BORDER1_C1(mask,uchar,0);
cvSet(f,cvScalar(KNOWN,0,0,0));
cvSet(t,cvScalar(1.0e6f,0,0,0));
cvDilate(mask,band,el_cross,1); // image with narrow band
cvReleaseStructuringElement(&el_cross);
Heap=new CvPriorityQueueFloat;
if (!Heap->Init(band))
return;
cvSub(band,mask,band,NULL);
SET_BORDER1_C1(band,uchar,0);
if (!Heap->Add(band))
return;
cvSet(f,cvScalar(BAND,0,0,0),band);
cvSet(f,cvScalar(INSIDE,0,0,0),mask);
cvSet(t,cvScalar(0,0,0,0),band);
if( flags == CV_INPAINT_TELEA )
{
out = cvCreateMat(erows, ecols, CV_8UC1);
el_range = cvCreateStructuringElementEx(2*range+1,2*range+1,
range,range,CV_SHAPE_RECT,NULL);
cvDilate(mask,out,el_range,1);
cvReleaseStructuringElement(&el_range);
cvSub(out,mask,out,NULL);
Out=new CvPriorityQueueFloat;
if (!Out->Init(out))
return;
if (!Out->Add(band))
return;
cvSub(out,band,out,NULL);
SET_BORDER1_C1(out,uchar,0);
icvCalcFMM(out,t,Out,true);
icvTeleaInpaintFMM(mask,t,output_img,range,Heap);
}
else if (flags == CV_INPAINT_NS) {
icvNSInpaintFMM(mask,t,output_img,range,Heap);
} else {
CV_Error( CV_StsBadArg, "The flags argument must be one of CV_INPAINT_TELEA or CV_INPAINT_NS" );
}
}
CvMat ToCvMat(const cv::Mat& m)
{
CV_DbgAssert(m.dims <= 2);
CvMat dst = cvMat(m.rows, m.dims == 1 ? 1 : m.cols, m.type(), m.data);
dst.step = (int)m.step[0];
dst.type = (dst.type & ~cv::Mat::CONTINUOUS_FLAG) | (m.flags & cv::Mat::CONTINUOUS_FLAG);
return dst;
}
void _inpaint( InputArray _src, InputArray _mask, OutputArray _dst,
double inpaintRange, int flags )
{
Mat src = _src.getMat(), mask = _mask.getMat();
_dst.create( src.size(), src.type() );
CvMat c_src = ToCvMat(src), c_mask = ToCvMat(mask), c_dst = ToCvMat(_dst.getMat());
_cvInpaint( &c_src, &c_mask, &c_dst, inpaintRange, flags );
}
//////////////////////////////////////////////////////////////////////////////////////
cv::Mat DepthInpainter::genValidMask(const cv::Mat& depth)
{
cv::Mat orgMask = (depth == 0);
// cv::Mat mask = orgMask.clone();
cv::Mat mask = orgMask;
cv::Mat kernel = cv::Mat::zeros(_kernelSize, _kernelSize, CV_8U);
cv::circle(kernel, cv::Point(kernel.cols/2, kernel.rows/2), kernel.rows/2, cv::Scalar(255), -1);
cv::erode(orgMask, mask, kernel);
cv::dilate(mask, mask, kernel);
gSpeckleFilter.Compute(mask, 0, _maxInternalHoleToBeFilled, 1);
// revert mask
mask = mask == 0;
return mask;
}
void DepthInpainter::inpaint(const cv::Mat& depth, cv::Mat& out, const cv::Mat& mask)
{
cv::Mat newDepth;
cv::Mat _mask = mask.empty() ? (depth == 0) : mask;
if(depth.type() == CV_8U || depth.type() == CV_8UC3){
cv::inpaint(depth, _mask, newDepth, _inpaintRadius, cv::INPAINT_TELEA);
} else if(depth.type() == CV_16U){
_inpaint(depth, _mask, newDepth, _inpaintRadius, cv::INPAINT_TELEA);
}
if(mask.empty() && !_fillAll){
// gen masked image
cv::Mat mask = genValidMask(depth);
out = cv::Mat::zeros(depth.size(), CV_16U);
newDepth.copyTo(out, mask);
} else {
out = newDepth;
}
}
#endif

View File

@@ -0,0 +1,36 @@
#ifndef XYZ_INPAINTER_HPP_
#define XYZ_INPAINTER_HPP_
#ifdef OPENCV_DEPENDENCIES
#include <opencv2/opencv.hpp>
#include "ImageSpeckleFilter.hpp"
//#warn("DepthInpainter this design no longer supported by new opencv version, using opencv inpaint api for alternative")
class DepthInpainter
{
public:
int _kernelSize;
int _maxInternalHoleToBeFilled;
double _inpaintRadius;
bool _fillAll;
DepthInpainter()
: _kernelSize(5)
, _maxInternalHoleToBeFilled(50)
, _inpaintRadius(1)
, _fillAll(true)
{
}
void inpaint(const cv::Mat& inputDepth, cv::Mat& out, const cv::Mat& mask);
private:
cv::Mat genValidMask(const cv::Mat& depth);
};
#endif
#endif

View File

@@ -0,0 +1,249 @@
#ifndef PERCIPIO_SAMPLE_COMMON_DEPTH_RENDER_HPP_
#define PERCIPIO_SAMPLE_COMMON_DEPTH_RENDER_HPP_
#ifdef OPENCV_DEPENDENCIES
#include <opencv2/opencv.hpp>
#ifndef CV_VERSION_EPOCH
#if defined (CV_MAJOR_VERSION) && (CV_VERSION_MAJOR == 4)
#include <opencv2/imgproc/types_c.h>
#include <opencv2/imgcodecs/legacy/constants_c.h>
#endif
#endif
#include <map>
#include <vector>
class DepthRender {
public:
enum OutputColorType {
COLORTYPE_RAINBOW = 0,
COLORTYPE_BLUERED = 1,
COLORTYPE_GRAY = 2
};
enum ColorRangeMode {
COLOR_RANGE_ABS = 0,
COLOR_RANGE_DYNAMIC = 1
};
DepthRender() : needResetColorTable(true)
, color_type(COLORTYPE_BLUERED)
, range_mode(COLOR_RANGE_DYNAMIC)
, min_distance(0)
, max_distance(0)
, invalid_label(0)
{}
void SetColorType( OutputColorType ct = COLORTYPE_BLUERED ){
if(ct != color_type){
needResetColorTable = true;
color_type = ct;
}
}
void SetRangeMode( ColorRangeMode rm = COLOR_RANGE_DYNAMIC ){
if(range_mode != rm){
needResetColorTable = true;
range_mode = rm;
}
}
/// for abs mode
void SetColorRange(int minDis, int maxDis){
min_distance = minDis;
max_distance = maxDis;
}
/// input 16UC1 output 8UC3
void Compute(const cv::Mat &src, cv::Mat& dst ){
dst = Compute(src);
}
cv::Mat Compute(const cv::Mat &src){
cv::Mat src16U;
if(src.type() != CV_16U){
src.convertTo(src16U, CV_16U);
}else{
src16U = src;
}
if(needResetColorTable){
BuildColorTable();
needResetColorTable = false;
}
cv::Mat dst;
filtered_mask = (src16U == invalid_label);
clr_disp = src16U.clone();
if(COLOR_RANGE_ABS == range_mode) {
TruncValue(clr_disp, filtered_mask, min_distance, max_distance);
clr_disp -= min_distance;
clr_disp = clr_disp * 255 / (max_distance - min_distance);
clr_disp.convertTo(clr_disp, CV_8UC1);
} else {
unsigned short vmax, vmin;
HistAdjustRange(clr_disp, invalid_label, min_distance, vmin, vmax);
clr_disp = (clr_disp - vmin) * 255 / (vmax - vmin);
//clr_disp = 255 - clr_disp;
clr_disp.convertTo(clr_disp, CV_8UC1);
}
switch (color_type) {
case COLORTYPE_GRAY:
clr_disp = 255 - clr_disp;
cv::cvtColor(clr_disp, dst, cv::COLOR_GRAY2BGR);
break;
case COLORTYPE_BLUERED:
//temp = 255 - clr_disp;
CalcColorMap(clr_disp, dst);
//cv::applyColorMap(temp, color_img, cv::COLORMAP_COOL);
break;
case COLORTYPE_RAINBOW:
//cv::cvtColor(color_img, color_img, CV_GRAY2BGR);
cv::applyColorMap(clr_disp, dst, cv::COLORMAP_RAINBOW);
break;
}
ClearInvalidArea(dst, filtered_mask);
return dst;
}
private:
void CalcColorMap(const cv::Mat &src, cv::Mat &dst){
std::vector<cv::Scalar> &table = _color_lookup_table;
assert(table.size() == 256);
assert(!src.empty());
assert(src.type() == CV_8UC1);
dst.create(src.size(), CV_8UC3);
const unsigned char* sptr = src.ptr<unsigned char>();
unsigned char* dptr = dst.ptr<unsigned char>();
for (int i = src.size().area(); i != 0; i--) {
cv::Scalar &v = table[*sptr];
dptr[0] = (unsigned char)v.val[0];
dptr[1] = (unsigned char)v.val[1];
dptr[2] = (unsigned char)v.val[2];
dptr += 3;
sptr += 1;
}
}
void BuildColorTable(){
_color_lookup_table.resize(256);
cv::Scalar from(50, 0, 0xff), to(50, 200, 255);
for (int i = 0; i < 128; i++) {
float a = (float)i / 128;
cv::Scalar &v = _color_lookup_table[i];
for (int j = 0; j < 3; j++) {
v.val[j] = from.val[j] * (1 - a) + to.val[j] * a;
}
}
from = to;
to = cv::Scalar(255, 104, 0);
for (int i = 128; i < 256; i++) {
float a = (float)(i - 128) / 128;
cv::Scalar &v = _color_lookup_table[i];
for (int j = 0; j < 3; j++) {
v.val[j] = from.val[j] * (1 - a) + to.val[j] * a;
}
}
}
//keep value in range
void TruncValue(cv::Mat &img, cv::Mat &mask, short min_val, short max_val){
assert(max_val >= min_val);
assert(img.type() == CV_16SC1);
assert(mask.type() == CV_8UC1);
short* ptr = img.ptr<short>();
unsigned char* mask_ptr = mask.ptr<unsigned char>();
for (int i = img.size().area(); i != 0; i--) {
if (*ptr > max_val) {
*ptr = max_val;
*mask_ptr = 0xff;
} else if (*ptr < min_val) {
*ptr = min_val;
*mask_ptr = 0xff;
}
ptr++;
mask_ptr++;
}
}
void ClearInvalidArea(cv::Mat &clr_disp, cv::Mat &filtered_mask){
assert(clr_disp.type() == CV_8UC3);
assert(filtered_mask.type() == CV_8UC1);
assert(clr_disp.size().area() == filtered_mask.size().area());
unsigned char* filter_ptr = filtered_mask.ptr<unsigned char>();
unsigned char* ptr = clr_disp.ptr<unsigned char>();
int len = clr_disp.size().area();
for (int i = 0; i < len; i++) {
if (*filter_ptr != 0) {
ptr[0] = ptr[1] = ptr[2] = 0;
}
filter_ptr++;
ptr += 3;
}
}
void HistAdjustRange(const cv::Mat &dist, ushort invalid, int min_display_distance_range
, ushort &min_val, ushort &max_val) {
std::map<ushort, int> hist;
int sz = dist.size().area();
const ushort* ptr = dist.ptr < ushort>();
int total_num = 0;
for (int idx = sz; idx != 0; idx--, ptr++) {
if (invalid == *ptr) {
continue;
}
total_num++;
if (hist.find(*ptr) != hist.end()) {
hist[*ptr]++;
} else {
hist.insert(std::make_pair(*ptr, 1));
}
}
if (hist.empty()) {
min_val = 0;
max_val = 2000;
return;
}
const int delta = total_num * 0.01;
int sum = 0;
min_val = hist.begin()->first;
for (std::map<ushort, int>::iterator it = hist.begin(); it != hist.end();it++){
sum += it->second;
if (sum > delta) {
min_val = it->first;
break;
}
}
sum = 0;
max_val = hist.rbegin()->first;
for (std::map<ushort, int>::reverse_iterator s = hist.rbegin()
; s != hist.rend(); s++) {
sum += s->second;
if (sum > delta) {
max_val = s->first;
break;
}
}
const int min_display_dist = min_display_distance_range;
if (max_val - min_val < min_display_dist) {
int m = (max_val + min_val) / 2;
max_val = m + min_display_dist / 2;
min_val = m - min_display_dist / 2;
if (min_val < 0) {
min_val = 0;
}
}
}
bool needResetColorTable;
OutputColorType color_type;
ColorRangeMode range_mode;
int min_distance;
int max_distance;
uint16_t invalid_label;
cv::Mat clr_disp ;
cv::Mat filtered_mask;
std::vector<cv::Scalar> _color_lookup_table;
};
#endif
#endif

View File

@@ -0,0 +1,120 @@
#include "ImageSpeckleFilter.hpp"
#include <stdio.h>
#include <stdexcept>
#ifdef WIN32
#include <stdint.h>
#endif
#ifdef OPENCV_DEPENDENCIES
struct Point2s {
Point2s(short _x, short _y) {
x = _x;
y = _y;
}
short x, y;
};
template <typename T>
void filterSpecklesImpl(cv::Mat& img, int newVal, int maxSpeckleSize, int maxDiff, std::vector<char> &_buf) {
int width = img.cols, height = img.rows;
int npixels = width * height;//number of pixels
size_t bufSize = npixels * (int)(sizeof(Point2s) + sizeof(int) + sizeof(uint8_t));//all pixel buffer
if (_buf.size() < bufSize) {
_buf.resize((int)bufSize);
}
uint8_t* buf = (uint8_t*)(&_buf[0]);
int i, j, dstep = img.cols;//(int)(img.step / sizeof(T));
int* labels = (int*)buf;
buf += npixels * sizeof(labels[0]);
Point2s* wbuf = (Point2s*)buf;
buf += npixels * sizeof(wbuf[0]);
uint8_t* rtype = (uint8_t*)buf;
int curlabel = 0;
// clear out label assignments
memset(labels, 0, npixels * sizeof(labels[0]));
for (i = 0; i < height; i++) {
T* ds = img.ptr<T>(i);
int* ls = labels + width * i;//label ptr for a row
for (j = 0; j < width; j++) {
if (ds[j] != newVal) { // not a bad disparity
if (ls[j]) { // has a label, check for bad label
if (rtype[ls[j]]) // small region, zero out disparity
ds[j] = (T)newVal;
}
// no label, assign and propagate
else {
Point2s* ws = wbuf; // initialize wavefront
Point2s p((short)j, (short)i); // current pixel
curlabel++; // next label
int count = 0; // current region size
ls[j] = curlabel;
// wavefront propagation
while (ws >= wbuf) { // wavefront not empty
count++;
// put neighbors onto wavefront
T* dpp = &img.ptr<T>(p.y)[p.x];
T dp = *dpp;
int* lpp = labels + width * p.y + p.x;
if (p.x < width - 1 && !lpp[+1] && dpp[+1] != newVal && std::abs(dp - dpp[+1]) <= maxDiff) {
lpp[+1] = curlabel;
*ws++ = Point2s(p.x + 1, p.y);
}
if (p.x > 0 && !lpp[-1] && dpp[-1] != newVal && std::abs(dp - dpp[-1]) <= maxDiff) {
lpp[-1] = curlabel;
*ws++ = Point2s(p.x - 1, p.y);
}
if (p.y < height - 1 && !lpp[+width] && dpp[+dstep] != newVal && std::abs(dp - dpp[+dstep]) <= maxDiff) {
lpp[+width] = curlabel;
*ws++ = Point2s(p.x, p.y + 1);
}
if (p.y > 0 && !lpp[-width] && dpp[-dstep] != newVal && std::abs(dp - dpp[-dstep]) <= maxDiff) {
lpp[-width] = curlabel;
*ws++ = Point2s(p.x, p.y - 1);
}
// pop most recent and propagate
// NB: could try least recent, maybe better convergence
p = *--ws;
}
// assign label type
if (count <= maxSpeckleSize) { // speckle region
rtype[ls[j]] = 1; // small region label
ds[j] = (T)newVal;
} else
rtype[ls[j]] = 0; // large region label
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////
ImageSpeckleFilter gSpeckleFilter;
void ImageSpeckleFilter::Compute(cv::Mat &image, int newVal, int maxSpeckleSize, int maxDiff)
{
if(image.type() == CV_8U){
filterSpecklesImpl<uint8_t>(image, newVal, maxSpeckleSize, maxDiff, _labelBuf);
} else if(image.type() == CV_16U){
filterSpecklesImpl<uint16_t>(image, newVal, maxSpeckleSize, maxDiff, _labelBuf);
} else {
char sz[10];
sprintf(sz, "%d", image.type());
throw std::runtime_error(std::string("ImageSpeckleFilter only support 8u and 16u, not ") + sz);
}
}
#endif

View File

@@ -0,0 +1,22 @@
#ifndef XYZ_IMAGE_SPECKLE_FILTER_HPP_
#define XYZ_IMAGE_SPECKLE_FILTER_HPP_
#ifdef OPENCV_DEPENDENCIES
#include <vector>
#include <opencv2/opencv.hpp>
class ImageSpeckleFilter
{
public:
void Compute(cv::Mat &image, int newVal = 0, int maxSpeckleSize = 50, int maxDiff = 6);
private:
std::vector<char> _labelBuf;
};
extern ImageSpeckleFilter gSpeckleFilter;
#endif
#endif

View File

@@ -0,0 +1,95 @@
#include <stdint.h>
#include <stdio.h>
#include "MatViewer.hpp"
#ifdef OPENCV_DEPENDENCIES
int GraphicItem::globalID = 0;
void OpencvViewer::_onMouseCallback(int event, int x, int y, int /*flags*/, void* ustc)
{
OpencvViewer* p = (OpencvViewer*)ustc;
// NOTE: This callback will be called very frequently while mouse moving,
// keep it simple
bool repaint = false;
p->onMouseCallback(p->_orgImg, event, cv::Point(x,y), repaint);
if(repaint){
p->showImage();
}
}
void OpencvViewer::showImage()
{
_showImg = _orgImg.clone();
for(std::map<int, GraphicItem*>::iterator it = _items.begin()
; it != _items.end(); it++){
it->second->draw(_showImg);
}
cv::imshow(_win.c_str(), _showImg);
cv::setMouseCallback(_win, _onMouseCallback, this);
}
///////////////////////////// DepthViewer ///////////////////////////////////////
DepthViewer::DepthViewer(const std::string& win)
: OpencvViewer(win)
, _centerDepthItem(std::string(), cv::Point(0,20), 0.5, cv::Scalar(0,255,0), 2)
, _pickedDepthItem(std::string(), cv::Point(0,40), 0.5, cv::Scalar(0,255,0), 2)
{
OpencvViewer::addGraphicItem(&_centerDepthItem);
OpencvViewer::addGraphicItem(&_pickedDepthItem);
depth_scale_unit = 1.f;
}
void DepthViewer::show(const cv::Mat& img)
{
if(img.type() != CV_16U || img.total() == 0){
return;
}
char str[128];
float val = img.at<uint16_t>(img.rows / 2, img.cols / 2)*depth_scale_unit;
sprintf(str, "Depth at center: %.1f", val);
_centerDepthItem.set(str);
val = img.at<uint16_t>(_fixLoc.y, _fixLoc.x)*depth_scale_unit;
sprintf(str, "Depth at (%d,%d): %.1f", _fixLoc.x, _fixLoc.y , val);
_pickedDepthItem.set(str);
_depth = img.clone();
_renderedDepth = _render.Compute(img);
OpencvViewer::show(_renderedDepth);
}
void DepthViewer::onMouseCallback(cv::Mat& img, int event, const cv::Point pnt
, bool& repaint)
{
repaint = false;
switch(event){
case cv::EVENT_LBUTTONDOWN: {
_fixLoc = pnt;
char str[64];
float val = _depth.at<uint16_t>(pnt.y, pnt.x)*depth_scale_unit;
sprintf(str, "Depth at (%d,%d): %.1f", pnt.x, pnt.y, val);
printf(">>>>>>>>>>>>>>>> depth(%.1f)\n", val);
_pickedDepthItem.set(str);
repaint = true;
break;
}
case cv::EVENT_MOUSEMOVE:
// uint16_t val = _img.at<uint16_t>(pnt.y, pnt.x);
// char str[32];
// sprintf(str, "Depth at mouse: %d", val);
// drawText(img, str, cv::Point(0,60), 0.5, cv::Scalar(0,255,0), 2);
break;
}
}
#endif

View File

@@ -0,0 +1,144 @@
#ifndef XYZ_MAT_VIEWER_HPP_
#define XYZ_MAT_VIEWER_HPP_
#ifdef OPENCV_DEPENDENCIES
#include <opencv2/opencv.hpp>
#include <string>
#include "DepthRender.hpp"
class GraphicItem
{
public:
GraphicItem(
const cv::Scalar& color = cv::Scalar(255,255,255)
)
: _id(++globalID), _color(color) {}
virtual ~GraphicItem() {}
int id() const { return _id; }
cv::Scalar color() const { return _color; }
void setColor(const cv::Scalar& color) { _color = color; }
virtual void draw(cv::Mat& img) = 0;
protected:
int _id;
cv::Scalar _color;
private:
static int globalID;
};
class GraphicRectangleItem : public GraphicItem
{
public:
cv::Rect _rect;
GraphicRectangleItem(
const cv::Scalar& color = cv::Scalar(255,255,255),
const cv::Rect& rect = cv::Rect()
)
: GraphicItem(color), _rect(rect) {}
virtual ~GraphicRectangleItem() {}
void set(const cv::Rect& rect) { _rect = rect; }
virtual void draw(cv::Mat& img){ cv::rectangle(img, _rect, color()); }
};
class GraphicStringItem : public GraphicItem
{
public:
std::string _str;
cv::Point _loc;
double _scale;
int _thick;
GraphicStringItem(
const std::string& str = std::string(),
const cv::Point loc = cv::Point(),
double scale = 0,
const cv::Scalar& color = cv::Scalar(),
int thick = 0
)
: GraphicItem(color), _str(str), _loc(loc), _scale(scale), _thick(thick) {}
virtual ~GraphicStringItem() {}
void set(const std::string& str) { _str = str; }
virtual void draw(cv::Mat& img){
cv::putText(img, _str, _loc, cv::FONT_HERSHEY_SIMPLEX, _scale, _color, _thick);
}
};
class OpencvViewer
{
public:
OpencvViewer(const std::string& win)
: _win(win)
{
_has_win = 0;
//cv::namedWindow(_win);
//cv::setMouseCallback(_win, _onMouseCallback, this);
}
~OpencvViewer()
{
if (_has_win)
{
//cv::setMouseCallback(_win, NULL, NULL);
cv::destroyWindow(_win);
}
}
const std::string& name() const {return _win;}
virtual void show(const cv::Mat& img)
{
_has_win = 1;
_orgImg = img.clone();
showImage();
}
virtual void onMouseCallback(cv::Mat& /*img*/, int /*event*/, const cv::Point /*pnt*/
, bool& repaint) {repaint = false;}
void addGraphicItem(GraphicItem* item) {
_items.insert(std::make_pair(item->id(), item));}
void delGraphicItem(GraphicItem* item) { _items.erase(item->id()); }
private:
static void _onMouseCallback(int event, int x, int y, int flags, void* ustc);
void showImage();
cv::Mat _orgImg;
cv::Mat _showImg;
int _has_win;
std::string _win;
std::map<int, GraphicItem*> _items;
};
//////////////////////////////////////////////////////////////////////////////////
class DepthViewer : public OpencvViewer
{
public:
DepthViewer(const std::string& win);
virtual void show(const cv::Mat& depthImage);
virtual void onMouseCallback(cv::Mat& img, int event, const cv::Point pnt
, bool& repaint);
float depth_scale_unit;
private:
cv::Mat _depth;
cv::Mat _renderedDepth;
DepthRender _render;
GraphicStringItem _centerDepthItem;
GraphicStringItem _pickedDepthItem;
cv::Point _fixLoc;
};
#endif
#endif

View File

@@ -0,0 +1,198 @@
#include "ParametersParse.h"
#include "json11.hpp"
using namespace json11;
TY_STATUS write_int_feature(const TY_DEV_HANDLE hDevice, TY_COMPONENT_ID comp, TY_FEATURE_ID feat, const Json& value)
{
if(value.is_number())
return TYSetInt(hDevice, comp, feat, static_cast<int>(value.number_value()));
else
return TY_STATUS_ERROR;
}
TY_STATUS write_float_feature(const TY_DEV_HANDLE hDevice, TY_COMPONENT_ID comp, TY_FEATURE_ID feat, const Json& value)
{
if(value.is_number())
return TYSetFloat(hDevice, comp, feat, static_cast<float>(value.number_value()));
else
return TY_STATUS_ERROR;
}
TY_STATUS write_enum_feature(const TY_DEV_HANDLE hDevice, TY_COMPONENT_ID comp, TY_FEATURE_ID feat, const Json& value)
{
if(value.is_number())
return TYSetEnum(hDevice, comp, feat, static_cast<uint32_t>(value.number_value()));
else
return TY_STATUS_ERROR;
}
TY_STATUS write_bool_feature(const TY_DEV_HANDLE hDevice, TY_COMPONENT_ID comp, TY_FEATURE_ID feat, const Json& value)
{
if(value.is_bool())
return TYSetBool(hDevice, comp, feat, value.bool_value());
else
return TY_STATUS_ERROR;
}
bool json_parse_arrar(const Json& value, std::vector<char>& buff)
{
buff.clear();
if(value.is_array()) {
size_t size = value.array_items().size();
buff.resize(size);
for(size_t i = 0; i < size; i++)
buff[i] = static_cast<char>(value[i].number_value());
return true;
} else {
return false;
}
}
TY_STATUS write_string_feature(const TY_DEV_HANDLE hDevice, TY_COMPONENT_ID comp, TY_FEATURE_ID feat, const Json& value)
{
std::vector<char> buff(0);
if(json_parse_arrar(value, buff)) {
buff.push_back(0);
return TYSetString(hDevice, comp, feat, &buff[0]);
} else {
return TY_STATUS_ERROR;
}
}
TY_STATUS write_bytearray_feature(const TY_DEV_HANDLE hDevice, TY_COMPONENT_ID comp, TY_FEATURE_ID feat, const Json& value)
{
std::vector<char> buff(0);
if(json_parse_arrar(value, buff)) {
return TYSetByteArray(hDevice, comp, feat, (uint8_t*)(&buff[0]), buff.size());
} else {
return TY_STATUS_ERROR;
}
}
TY_STATUS write_struct_feature(const TY_DEV_HANDLE hDevice, TY_COMPONENT_ID comp, TY_FEATURE_ID feat, const Json& value)
{
std::vector<char> buff(0);
if(json_parse_arrar(value, buff)) {
return TYSetStruct(hDevice, comp, feat, (void*)(&buff[0]), buff.size());
} else {
return TY_STATUS_ERROR;
}
}
TY_STATUS device_write_feature(const TY_DEV_HANDLE hDevice, TY_COMPONENT_ID comp, TY_FEATURE_ID feat, const Json& value)
{
TY_STATUS status = TY_STATUS_OK;
TY_FEATURE_TYPE type = TYFeatureType(feat);
switch (type)
{
case TY_FEATURE_INT:
status = write_int_feature(hDevice, comp, feat, value);
break;
case TY_FEATURE_FLOAT:
status = write_float_feature(hDevice, comp, feat, value);
break;
case TY_FEATURE_ENUM:
status = write_enum_feature(hDevice, comp, feat, value);
break;
case TY_FEATURE_BOOL:
status = write_bool_feature(hDevice, comp, feat, value);
break;
case TY_FEATURE_STRING:
status = write_string_feature(hDevice, comp, feat, value);
break;
case TY_FEATURE_BYTEARRAY:
status = write_bytearray_feature(hDevice, comp, feat, value);
break;
case TY_FEATURE_STRUCT:
status = write_struct_feature(hDevice, comp, feat, value);
break;
default:
status = TY_STATUS_INVALID_FEATURE;
break;
}
return status;
}
struct DevParam
{
TY_COMPONENT_ID compID;
TY_FEATURE_ID featID;
Json feat_value;
};
bool isValidJsonString(const char* code)
{
std::string err;
const auto json = Json::parse(code, err);
if(json.is_null()) return false;
return true;
}
bool json_parse(const TY_DEV_HANDLE hDevice, const char* jscode)
{
std::string err;
const auto json = Json::parse(jscode, err);
Json components = json["component"];
if(components.is_array()) {
std::vector<DevParam> param_list(0);
for (auto &k : components.array_items()) {
const Json& comp_id = k["id"];
const Json& comp_desc = k["desc"];
const Json& features = k["feature"];
if(!comp_id.is_string()) continue;
if(!comp_desc.is_string()) continue;
if(!features.is_array()) continue;
const char* comp_desc_str = comp_desc.string_value().c_str();
const char* comp_id_str = comp_id.string_value().c_str();
TY_COMPONENT_ID m_comp_id;
sscanf(comp_id_str,"%x",&m_comp_id);
for (auto &f : features.array_items()) {
const Json& feat_name = f["name"];
const Json& feat_id = f["id"];
const Json& feat_value = f["value"];
if(!feat_id.is_string()) continue;
if(!feat_name.is_string()) continue;
const char* feat_name_str = feat_name.string_value().c_str();
const char* feat_id_str = feat_id.string_value().c_str();
TY_FEATURE_ID m_feat_id;
sscanf(feat_id_str,"%x",&m_feat_id);
param_list.push_back({m_comp_id, m_feat_id, feat_value});
}
}
while(1)
{
size_t cnt = param_list.size();
for(auto it = param_list.begin(); it != param_list.end(); )
{
if(TY_STATUS_OK == device_write_feature(hDevice, it->compID, it->featID, it->feat_value))
{
it = param_list.erase(it);
} else {
++it;
}
}
if(param_list.size() == 0) {
return true;
}
if(param_list.size() == cnt) {
return false;
}
}
}
return false;
}

View File

@@ -0,0 +1,6 @@
#ifndef _PARAMETERS_PARSE_H_
#define _PARAMETERS_PARSE_H_
#include "TYApi.h"
bool isValidJsonString(const char* code);
bool json_parse(const TY_DEV_HANDLE hDevice, const char* jscode);
#endif

View File

@@ -0,0 +1,83 @@
#include "TYThread.hpp"
#ifdef _WIN32
#include <windows.h>
class TYThreadImpl
{
public:
TYThreadImpl() : _thread(NULL) {}
int create(TYThread::Callback_t cb, void* arg) {
DWORD dwThreadId = 0;
_thread = CreateThread(
NULL, // default security attributes
0, // use default stack size
(LPTHREAD_START_ROUTINE)cb, // thread function name
arg, // argument to thread function
0, // use default creation flags
&dwThreadId); // returns the thread identifier
return 0;
}
int destroy() {
// TerminateThread(_thread, 0);
switch (WaitForSingleObject(_thread, INFINITE))
{
case WAIT_OBJECT_0:
if (CloseHandle(_thread)) {
_thread = 0;
return 0;
}
else {
return -1;
}
default:
return -2;
}
}
private:
HANDLE _thread;
};
#else // _WIN32
#include <pthread.h>
class TYThreadImpl
{
public:
TYThreadImpl() {}
int create(TYThread::Callback_t cb, void* arg) {
int ret = pthread_create(&_thread, NULL, cb, arg);
return ret;
}
int destroy() {
pthread_join(_thread, NULL);
return 0;
}
private:
pthread_t _thread;
};
#endif // _WIN32
////////////////////////////////////////////////////////////////////////////
TYThread::TYThread()
{
impl = new TYThreadImpl();
}
TYThread::~TYThread()
{
delete impl;
impl = NULL;
}
int TYThread::create(Callback_t cb, void* arg)
{
return impl->create(cb, arg);
}
int TYThread::destroy()
{
return impl->destroy();
}

View File

@@ -0,0 +1,25 @@
#ifndef XYZ_TYThread_HPP_
#define XYZ_TYThread_HPP_
class TYThreadImpl;
class TYThread
{
public:
typedef void* (*Callback_t)(void*);
TYThread();
~TYThread();
int create(Callback_t cb, void* arg);
int destroy();
private:
TYThreadImpl* impl;
};
#endif

View File

@@ -0,0 +1,496 @@
#ifndef SAMPLE_COMMON_UTILS_HPP_
#define SAMPLE_COMMON_UTILS_HPP_
/**
* This file excludes opencv for sample_raw.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include <sstream>
#include <inttypes.h>
#include "TYApi.h"
#include "TYThread.hpp"
#include "crc32.h"
#include "ParametersParse.h"
#include "huffman.h"
#ifndef ASSERT
#define ASSERT(x) do{ \
if(!(x)) { \
LOGE("Assert failed at %s:%d", __FILE__, __LINE__); \
LOGE(" : " #x ); \
abort(); \
} \
}while(0)
#endif
#ifndef ASSERT_OK
#define ASSERT_OK(x) do{ \
int err = (x); \
if(err != TY_STATUS_OK) { \
LOGE("Assert failed: error %d(%s) at %s:%d", err, TYErrorString(err), __FILE__, __LINE__); \
LOGE(" : " #x ); \
abort(); \
} \
}while(0)
#endif
#ifndef CHECK_RET
#define CHECK_RET(x) do{ \
int err = (x); \
if(err != TY_STATUS_OK) { \
LOGD(#x " failed: error %d(%s)", err, TYErrorString(err)); \
LOGD("at %s:%d", __FILE__, __LINE__); \
} \
}while(0)
#endif
#ifdef _WIN32
# include <windows.h>
# include <time.h>
static inline char* getLocalTime()
{
static char local[26] = {0};
SYSTEMTIME wtm;
struct tm tm;
GetLocalTime(&wtm);
tm.tm_year = wtm.wYear - 1900;
tm.tm_mon = wtm.wMonth - 1;
tm.tm_mday = wtm.wDay;
tm.tm_hour = wtm.wHour;
tm.tm_min = wtm.wMinute;
tm.tm_sec = wtm.wSecond;
tm.tm_isdst = -1;
strftime(local, 26, "%Y-%m-%d %H:%M:%S", &tm);
return local;
}
static inline uint64_t getSystemTime()
{
SYSTEMTIME wtm;
struct tm tm;
GetLocalTime(&wtm);
tm.tm_year = wtm.wYear - 1900;
tm.tm_mon = wtm.wMonth - 1;
tm.tm_mday = wtm.wDay;
tm.tm_hour = wtm.wHour;
tm.tm_min = wtm.wMinute;
tm.tm_sec = wtm.wSecond;
tm. tm_isdst = -1;
return mktime(&tm) * 1000 + wtm.wMilliseconds;
}
static inline void MSleep(uint32_t ms)
{
Sleep(ms);
}
#else
# include <sys/time.h>
# include <unistd.h>
static inline char* getLocalTime()
{
static char local[26] = {0};
time_t time;
struct timeval tv;
gettimeofday(&tv, NULL);
time = tv.tv_sec;
struct tm* p_time = localtime(&time);
strftime(local, 26, "%Y-%m-%d %H:%M:%S", p_time);
return local;
}
static inline uint64_t getSystemTime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec*1000 + tv.tv_usec/1000;
}
static inline void MSleep(uint32_t ms)
{
usleep(ms * 1000);
}
#endif
#define LOGD(fmt,...) printf("%" PRIu64 " (%s) " fmt "\n", getSystemTime(), getLocalTime(), ##__VA_ARGS__)
#define LOGI(fmt,...) printf("%" PRIu64 " (%s) " fmt "\n", getSystemTime(), getLocalTime(), ##__VA_ARGS__)
#define LOGW(fmt,...) printf("%" PRIu64 " (%s) " fmt "\n", getSystemTime(), getLocalTime(), ##__VA_ARGS__)
#define LOGE(fmt,...) printf("%" PRIu64 " (%s) Error: " fmt "\n", getSystemTime(), getLocalTime(), ##__VA_ARGS__)
#define xLOGD(fmt,...)
#define xLOGI(fmt,...)
#define xLOGW(fmt,...)
#define xLOGE(fmt,...)
#ifdef _WIN32
# include <windows.h>
# define MSLEEP(x) Sleep(x)
// windows defined macro max/min
# ifdef max
# undef max
# endif
# ifdef min
# undef min
# endif
#else
# include <unistd.h>
# include <sys/time.h>
# define MSLEEP(x) usleep((x)*1000)
#endif
static inline const char* colorFormatName(TY_PIXEL_FORMAT fmt)
{
#define FORMAT_CASE(a) case (a): return #a
switch(fmt){
FORMAT_CASE(TY_PIXEL_FORMAT_UNDEFINED);
FORMAT_CASE(TY_PIXEL_FORMAT_MONO);
FORMAT_CASE(TY_PIXEL_FORMAT_RGB);
FORMAT_CASE(TY_PIXEL_FORMAT_YVYU);
FORMAT_CASE(TY_PIXEL_FORMAT_YUYV);
FORMAT_CASE(TY_PIXEL_FORMAT_DEPTH16);
FORMAT_CASE(TY_PIXEL_FORMAT_BAYER8GB);
FORMAT_CASE(TY_PIXEL_FORMAT_BAYER8BG);
FORMAT_CASE(TY_PIXEL_FORMAT_BAYER8GR);
FORMAT_CASE(TY_PIXEL_FORMAT_BAYER8RG);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_MONO10);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_BAYER10GBRG);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_BAYER10BGGR);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_BAYER10GRBG);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_BAYER10RGGB);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_MONO12);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_BAYER12GBRG);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_BAYER12BGGR);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_BAYER12GRBG);
FORMAT_CASE(TY_PIXEL_FORMAT_CSI_BAYER12RGGB);
FORMAT_CASE(TY_PIXEL_FORMAT_BGR);
FORMAT_CASE(TY_PIXEL_FORMAT_JPEG);
FORMAT_CASE(TY_PIXEL_FORMAT_MJPG);
default: return "UNKNOWN FORMAT";
}
#undef FORMAT_CASE
}
static inline const TY_IMAGE_DATA* TYImageInFrame(const TY_FRAME_DATA& frame
, const TY_COMPONENT_ID comp)
{
for(int i = 0; i < frame.validCount; i++){
if(frame.image[i].componentID == comp){
return &frame.image[i];
}
}
return NULL;
}
static void *updateThreadFunc(void *userdata)
{
TY_INTERFACE_HANDLE iface = (TY_INTERFACE_HANDLE)userdata;
TYUpdateDeviceList(iface);
return NULL;
}
static TY_STATUS updateDevicesParallel(std::vector<TY_INTERFACE_HANDLE> &ifaces,
uint64_t timeout=2000)
{
if(ifaces.size() != 0) {
TYThread *updateThreads = new TYThread[ifaces.size()];
for(int i = 0; i < ifaces.size(); i++) {
updateThreads[i].create(updateThreadFunc, ifaces[i]);
}
for(int i = 0; i < ifaces.size(); i++) {
updateThreads[i].destroy();
}
delete [] updateThreads;
updateThreads = NULL;
}
return TY_STATUS_OK;
}
static inline TY_STATUS selectDevice(TY_INTERFACE_TYPE iface
, const std::string& ID, const std::string& IP
, uint32_t deviceNum, std::vector<TY_DEVICE_BASE_INFO>& out)
{
LOGD("Update interface list");
ASSERT_OK( TYUpdateInterfaceList() );
uint32_t n = 0;
ASSERT_OK( TYGetInterfaceNumber(&n) );
LOGD("Got %u interface list", n);
if(n == 0){
LOGE("interface number incorrect");
return TY_STATUS_ERROR;
}
std::vector<TY_INTERFACE_INFO> ifaces(n);
ASSERT_OK( TYGetInterfaceList(&ifaces[0], n, &n) );
ASSERT( n == ifaces.size() );
for(uint32_t i = 0; i < n; i++){
LOGI("Found interface %u:", i);
LOGI(" name: %s", ifaces[i].name);
LOGI(" id: %s", ifaces[i].id);
LOGI(" type: 0x%x", ifaces[i].type);
if(TYIsNetworkInterface(ifaces[i].type)){
LOGI(" MAC: %s", ifaces[i].netInfo.mac);
LOGI(" ip: %s", ifaces[i].netInfo.ip);
LOGI(" netmask: %s", ifaces[i].netInfo.netmask);
LOGI(" gateway: %s", ifaces[i].netInfo.gateway);
LOGI(" broadcast: %s", ifaces[i].netInfo.broadcast);
}
}
out.clear();
std::vector<TY_INTERFACE_TYPE> ifaceTypeList;
std::vector<TY_INTERFACE_HANDLE> hIfaces;
ifaceTypeList.push_back(TY_INTERFACE_USB);
ifaceTypeList.push_back(TY_INTERFACE_ETHERNET);
ifaceTypeList.push_back(TY_INTERFACE_IEEE80211);
for(size_t t = 0; t < ifaceTypeList.size(); t++){
for(uint32_t i = 0; i < ifaces.size(); i++){
if(ifaces[i].type == ifaceTypeList[t] && (ifaces[i].type & iface) && deviceNum > out.size()){
TY_INTERFACE_HANDLE hIface;
ASSERT_OK( TYOpenInterface(ifaces[i].id, &hIface) );
hIfaces.push_back(hIface);
}
}
}
updateDevicesParallel(hIfaces);
for (uint32_t i = 0; i < hIfaces.size(); i++) {
TY_INTERFACE_HANDLE hIface = hIfaces[i];
uint32_t n = 0;
TYGetDeviceNumber(hIface, &n);
if(n > 0){
std::vector<TY_DEVICE_BASE_INFO> devs(n);
TYGetDeviceList(hIface, &devs[0], n, &n);
for(uint32_t j = 0; j < n; j++){
if(deviceNum > out.size() && ((ID.empty() && IP.empty())
|| (!ID.empty() && devs[j].id == ID)
|| (!IP.empty() && IP == devs[j].netInfo.ip)))
{
if (devs[j].iface.type == TY_INTERFACE_ETHERNET || devs[j].iface.type == TY_INTERFACE_IEEE80211) {
LOGI("*** Select %s on %s, ip %s", devs[j].id, devs[j].iface.id, devs[j].netInfo.ip);
} else {
LOGI("*** Select %s on %s", devs[j].id, devs[j].iface.id);
}
out.push_back(devs[j]);
}
}
}
TYCloseInterface(hIface);
}
if(out.size() == 0){
LOGE("not found any device");
return TY_STATUS_ERROR;
}
return TY_STATUS_OK;
}
static inline TY_STATUS get_feature_enum_list(TY_DEV_HANDLE handle,
TY_COMPONENT_ID compID,
TY_FEATURE_ID featID,
std::vector<TY_ENUM_ENTRY> &feature_info){
uint32_t n = 0;
ASSERT_OK(TYGetEnumEntryCount(handle, compID, featID, &n));
LOGD("=== %14s: entry count %d", "", n);
feature_info.clear();
if (n == 0){
return TY_STATUS_ERROR;
}
feature_info.resize(n);
ASSERT_OK(TYGetEnumEntryInfo(handle, compID, featID, &feature_info[0], n, &n));
return TY_STATUS_OK;
}
static inline TY_STATUS get_image_mode(TY_DEV_HANDLE handle
, TY_COMPONENT_ID compID
, TY_IMAGE_MODE &image_mode, int idx)
{
std::vector<TY_ENUM_ENTRY> image_mode_list;
ASSERT_OK(get_feature_enum_list(handle, compID, TY_ENUM_IMAGE_MODE, image_mode_list));
if (image_mode_list.size() == 0 || idx < 0
|| idx > image_mode_list.size() -1){
return TY_STATUS_ERROR;
}
image_mode = image_mode_list[idx].value;
return TY_STATUS_OK;
}
static inline TY_STATUS get_default_image_mode(TY_DEV_HANDLE handle
, TY_COMPONENT_ID compID
, TY_IMAGE_MODE &image_mode)
{
return get_image_mode(handle, compID, image_mode, 0);
}
enum EncodingType : uint32_t
{
HUFFMAN = 0,
};
//10MB
#define MAX_STORAGE_SIZE (10*1024*1024)
static inline TY_STATUS clear_storage(const TY_DEV_HANDLE handle)
{
uint32_t block_size;
ASSERT_OK( TYGetByteArraySize(handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_CUSTOM_BLOCK, &block_size) );
uint8_t* blocks = new uint8_t[MAX_STORAGE_SIZE] ();
ASSERT_OK( TYSetByteArray(handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_CUSTOM_BLOCK, blocks, block_size) );
delete []blocks;
return TY_STATUS_OK;
}
static inline TY_STATUS load_parameters_from_storage(const TY_DEV_HANDLE handle, std::string& js)
{
uint32_t block_size;
uint8_t* blocks = new uint8_t[MAX_STORAGE_SIZE] ();
ASSERT_OK( TYGetByteArraySize(handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_CUSTOM_BLOCK, &block_size) );
ASSERT_OK( TYGetByteArray(handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_CUSTOM_BLOCK, blocks, block_size) );
uint32_t crc_data = *(uint32_t*)blocks;
if(0 == crc_data || 0xffffffff == crc_data) {
LOGE("The CRC check code is empty.");
delete []blocks;
return TY_STATUS_ERROR;
}
uint32_t crc;
uint8_t* js_code = blocks + 4;
crc = crc32_bitwise(js_code, strlen((const char*)js_code));
if((crc != crc_data) || !isValidJsonString((const char*)js_code)) {
EncodingType type = *(EncodingType*)(blocks + 4);
ASSERT(type == HUFFMAN);
uint32_t huffman_size = *(uint32_t*)(blocks + 8);
uint8_t* huffman_ptr = (uint8_t*)(blocks + 12);
if(huffman_size > (MAX_STORAGE_SIZE - 12)) {
LOGE("Data length error.");
delete []blocks;
return TY_STATUS_ERROR;
}
crc = crc32_bitwise(huffman_ptr, huffman_size);
if(crc_data != crc) {
LOGE("The data in the storage area has a CRC check error.");
delete []blocks;
return TY_STATUS_ERROR;
}
std::string huffman_string(huffman_ptr, huffman_ptr + huffman_size);
if(!TextHuffmanDecompression(huffman_string, js)) {
LOGE("Huffman decoding error");
delete []blocks;
return TY_STATUS_ERROR;
}
} else {
js = std::string((const char*)js_code);
}
if(!json_parse(handle, (const char* )js.c_str())) {
LOGW("parameters load fail!");
delete []blocks;
return TY_STATUS_ERROR;
}
delete []blocks;
return TY_STATUS_OK;
}
static inline TY_STATUS write_parameters_to_storage(const TY_DEV_HANDLE handle, const std::string& json_file)
{
std::ifstream ifs(json_file);
if (!ifs.is_open()) {
LOGE("Unable to open file");
return TY_STATUS_ERROR;
}
std::stringstream buffer;
buffer << ifs.rdbuf();
ifs.close();
std::string huffman_string;
if(!TextHuffmanCompression(buffer.str(), huffman_string)) {
LOGE("Huffman compression error");
return TY_STATUS_ERROR;
}
const char* str = huffman_string.data();
uint32_t crc = crc32_bitwise(str, huffman_string.length());
uint32_t block_size;
ASSERT_OK( TYGetByteArraySize(handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_CUSTOM_BLOCK, &block_size) );
if(block_size < huffman_string.length() + 12) {
LOGE("The configuration file is too large, the maximum size should not exceed 4000 bytes");
return TY_STATUS_ERROR;
}
uint8_t* blocks = new uint8_t[block_size] ();
*(uint32_t*)blocks = crc;
*(uint32_t*)(blocks + 4) = HUFFMAN;
*(uint32_t*)(blocks + 8) = huffman_string.length();
memcpy((char*)blocks + 12, str, huffman_string.length());
ASSERT_OK( TYSetByteArray(handle, TY_COMPONENT_STORAGE, TY_BYTEARRAY_CUSTOM_BLOCK, blocks, block_size) );
delete []blocks;
return TY_STATUS_OK;
}
static inline void parse_firmware_errcode(TY_FW_ERRORCODE err_code) {
if (TY_FW_ERRORCODE_CAM0_NOT_DETECTED & err_code) {
LOGE("Left sensor Not Detected");
}
if (TY_FW_ERRORCODE_CAM1_NOT_DETECTED & err_code) {
LOGE("Right sensor Not Detected");
}
if (TY_FW_ERRORCODE_CAM2_NOT_DETECTED & err_code) {
LOGE("Color sensor Not Detected");
}
if (TY_FW_ERRORCODE_POE_NOT_INIT & err_code) {
LOGE("POE init error");
}
if (TY_FW_ERRORCODE_RECMAP_NOT_CORRECT & err_code) {
LOGE("RecMap error");
}
if (TY_FW_ERRORCODE_LOOKUPTABLE_NOT_CORRECT & err_code) {
LOGE("Disparity error");
}
if (TY_FW_ERRORCODE_DRV8899_NOT_INIT & err_code) {
LOGE("Motor init error");
}
if (TY_FW_ERRORCODE_FOC_START_ERR & err_code) {
LOGE("Motor start failed");
}
if (TY_FW_ERRORCODE_CONFIG_NOT_FOUND & err_code) {
LOGE("Config file not exist");
}
if (TY_FW_ERRORCODE_CONFIG_NOT_CORRECT & err_code) {
LOGE("Broken Config file");
}
if (TY_FW_ERRORCODE_XML_NOT_FOUND & err_code) {
LOGE("XML file not exist");
}
if (TY_FW_ERRORCODE_XML_NOT_CORRECT & err_code) {
LOGE("XML Parse err");
}
if (TY_FW_ERRORCODE_XML_OVERRIDE_FAILED & err_code) {
LOGE("Illegal XML file overrided, Only Used in Debug Mode!");
}
if (TY_FW_ERRORCODE_CAM_INIT_FAILED & err_code) {
LOGE("Init default cam feature failed!");
}
if (TY_FW_ERRORCODE_LASER_INIT_FAILED & err_code) {
LOGE("Init default laser feature failed!");
}
}
#endif

View File

@@ -0,0 +1,539 @@
#ifndef SAMPLE_COMMON_COMMON_HPP_
#define SAMPLE_COMMON_COMMON_HPP_
#include "Utils.hpp"
#include <fstream>
#include <iterator>
#include <memory>
#include <iostream>
#include <typeinfo>
#ifdef OPENCV_DEPENDENCIES
#include <opencv2/opencv.hpp>
#include "DepthRender.hpp"
#include "MatViewer.hpp"
#include "DepthInpainter.hpp"
#endif
#include "TYThread.hpp"
#include "TyIsp.h"
#include "BayerISP.hpp"
#include "CommandLineParser.hpp"
#include "CommandLineFeatureHelper.hpp"
static inline int decodeCsiRaw10(unsigned char* src, unsigned short* dst, int width, int height)
{
if(width & 0x3) {
return -1;
}
int raw10_line_size = 5 * width / 4;
for(size_t i = 0, j = 0; i < raw10_line_size * height; i+=5, j+=4)
{
//[A2 - A9] | [B2 - B9] | [C2 - C9] | [D2 - D9] | [A0A1-B0B1-C0C1-D0D1]
dst[j + 0] = ((uint16_t)src[i + 0] << 2) | ((src[i + 4] & 0x3) >> 0);
dst[j + 1] = ((uint16_t)src[i + 1] << 2) | ((src[i + 4] & 0xc) >> 2);
dst[j + 2] = ((uint16_t)src[i + 2] << 2) | ((src[i + 4] & 0x30) >> 4);
dst[j + 3] = ((uint16_t)src[i + 3] << 2) | ((src[i + 4] & 0xc0) >> 6);
}
return 0;
}
static inline int decodeCsiRaw12(unsigned char* src, unsigned short* dst, int width, int height)
{
if(width & 0x1) {
return -1;
}
int raw12_line_size = 3 * width / 2;
for(size_t i = 0, j = 0; i < raw12_line_size * height; i+=3, j+=2)
{
//[A4 - A11] | [B4 - B11] | [A0A1A2A3-B0B1B2B3]
dst[j + 0] = ((uint16_t)src[i + 0] << 4) | ((src[i + 2] & 0x0f) >> 0);
dst[j + 1] = ((uint16_t)src[i + 1] << 4) | ((src[i + 2] & 0xf0) >> 4);
}
return 0;
}
static inline int decodeCsiRaw14(unsigned char* src, unsigned short* dst, int width, int height)
{
if(width & 0x3) {
return -1;
}
int raw14_line_size = 7 * width / 4;
for(size_t i = 0, j = 0; i < raw14_line_size * height; i+=7, j+=4)
{
//[A6 - A13] | [B6 - B13] | [C6 - C13] | [D6 - D13] | [A0A1A2A3A4A5-B0B1] | [B2B3B4B5-C0C1C2C3] | [C4C5-D0D1D2D3D4D5]
dst[j + 0] = ((uint16_t)src[i + 0] << 6) | ((src[i + 4] & 0x3f) >> 0);
dst[j + 1] = ((uint16_t)src[i + 1] << 6) | ((src[i + 4] & 0xc0) >> 6) | ((src[i + 5] & 0x0f) << 2);
dst[j + 2] = ((uint16_t)src[i + 2] << 6) | ((src[i + 5] & 0xf0) >> 4) | ((src[i + 6] & 0x03) << 4);
dst[j + 3] = ((uint16_t)src[i + 3] << 6) | ((src[i + 6] & 0xfc) >> 2);
}
return 0;
}
#ifdef OPENCV_DEPENDENCIES
static inline int parseCsiRaw10(unsigned char* src, cv::Mat &dst, int width, int height)
{
cv::Mat m(height, width, CV_16U);
decodeCsiRaw10(src, (ushort*)m.data, width, height);
//convert valid 10bit from lsb to msb, d = s * 64
dst = m * 64;
return 0;
}
static inline int parseCsiRaw12(unsigned char* src, cv::Mat &dst, int width, int height)
{
cv::Mat m(height, width, CV_16U);
decodeCsiRaw12(src, (ushort*)m.data, width, height);
//convert valid 12bit from lsb to msb, d = s * 16
dst = m * 16;
return 0;
}
static inline int parseIrFrame(const TY_IMAGE_DATA* img, cv::Mat* pIR)
{
if (img->pixelFormat == TY_PIXEL_FORMAT_MONO16 || img->pixelFormat==TY_PIXEL_FORMAT_TOF_IR_MONO16){
*pIR = cv::Mat(img->height, img->width, CV_16U, img->buffer).clone();
} else if(img->pixelFormat == TY_PIXEL_FORMAT_CSI_MONO10) {
*pIR = cv::Mat(img->height, img->width, CV_16U);
parseCsiRaw10((uchar*)img->buffer, (*pIR), img->width, img->height);
} else if(img->pixelFormat == TY_PIXEL_FORMAT_MONO) {
*pIR = cv::Mat(img->height, img->width, CV_8U, img->buffer).clone();
} else if(img->pixelFormat == TY_PIXEL_FORMAT_CSI_MONO12) {
*pIR = cv::Mat(img->height, img->width, CV_8U, img->buffer).clone();
parseCsiRaw12((uchar*)img->buffer, (*pIR), img->width, img->height);
}
else {
return -1;
}
return 0;
}
static inline int parseBayer8Frame(const TY_IMAGE_DATA* img, cv::Mat* pColor, TY_ISP_HANDLE color_isp_handle = NULL)
{
int code = cv::COLOR_BayerGB2BGR;
switch (img->pixelFormat)
{
case TY_PIXEL_FORMAT_BAYER8GBRG:
code = cv::COLOR_BayerGR2BGR;
break;
case TY_PIXEL_FORMAT_BAYER8BGGR:
code = cv::COLOR_BayerRG2BGR;
break;
case TY_PIXEL_FORMAT_BAYER8GRBG:
code = cv::COLOR_BayerGB2BGR;
break;
case TY_PIXEL_FORMAT_BAYER8RGGB:
code = cv::COLOR_BayerBG2BGR;
break;
default:
LOGE("Invalid bayer8 fmt!");
return -1;
}
if (!color_isp_handle){
cv::Mat raw(img->height, img->width, CV_8U, img->buffer);
cv::cvtColor(raw, *pColor, code);
}
else{
cv::Mat raw(img->height, img->width, CV_8U, img->buffer);
pColor->create(img->height, img->width, CV_8UC3);
int sz = img->height* img->width * 3;
TY_IMAGE_DATA out_buff = TYInitImageData(sz, pColor->data, img->width, img->height);
out_buff.pixelFormat = TY_PIXEL_FORMAT_BGR;
int res = TYISPProcessImage(color_isp_handle, img, &out_buff);
if (res != TY_STATUS_OK){
//fall back to using opencv api
cv::Mat raw(img->height, img->width, CV_8U, img->buffer);
cv::cvtColor(raw, *pColor, code);
}
}
return 0;
}
static inline int parseBayer10Frame(const TY_IMAGE_DATA* img, cv::Mat* pColor)
{
int code = cv::COLOR_BayerGB2BGR;
switch (img->pixelFormat)
{
case TY_PIXEL_FORMAT_CSI_BAYER10GBRG:
code = cv::COLOR_BayerGR2BGR;
break;
case TY_PIXEL_FORMAT_CSI_BAYER10BGGR:
code = cv::COLOR_BayerRG2BGR;
break;
case TY_PIXEL_FORMAT_CSI_BAYER10GRBG:
code = cv::COLOR_BayerGB2BGR;
break;
case TY_PIXEL_FORMAT_CSI_BAYER10RGGB:
code = cv::COLOR_BayerBG2BGR;
break;
default:
LOGE("Invalid bayer10 fmt!");
return -1;
}
cv::Mat raw16(img->height, img->width, CV_16U);
parseCsiRaw10((uchar*)img->buffer, raw16, img->width, img->height);
cv::cvtColor(raw16, *pColor, code);
return 0;
}
static inline int parseBayer12Frame(const TY_IMAGE_DATA* img, cv::Mat* pColor)
{
int code = cv::COLOR_BayerGB2BGR;
switch (img->pixelFormat)
{
case TY_PIXEL_FORMAT_CSI_BAYER12GBRG:
code = cv::COLOR_BayerGR2BGR;
break;
case TY_PIXEL_FORMAT_CSI_BAYER12BGGR:
code = cv::COLOR_BayerRG2BGR;
break;
case TY_PIXEL_FORMAT_CSI_BAYER12GRBG:
code = cv::COLOR_BayerGB2BGR;
break;
case TY_PIXEL_FORMAT_CSI_BAYER12RGGB:
code = cv::COLOR_BayerBG2BGR;
break;
default:
LOGE("Invalid bayer12 fmt!");
return -1;
}
cv::Mat raw16(img->height, img->width, CV_16U);
parseCsiRaw12((uchar*)img->buffer, raw16, img->width, img->height);
cv::cvtColor(raw16, *pColor, code);
return 0;
}
static inline int parseColorFrame(const TY_IMAGE_DATA* img, cv::Mat* pColor, TY_ISP_HANDLE color_isp_handle = NULL)
{
int ret = 0;
if (img->pixelFormat == TY_PIXEL_FORMAT_JPEG){
std::vector<uchar> _v((uchar*)img->buffer, (uchar*)img->buffer + img->size);
*pColor = cv::imdecode(_v, cv::IMREAD_COLOR);
ASSERT(img->width == pColor->cols && img->height == pColor->rows);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_YVYU){
cv::Mat yuv(img->height, img->width, CV_8UC2, img->buffer);
cv::cvtColor(yuv, *pColor, cv::COLOR_YUV2BGR_YVYU);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_YUYV){
cv::Mat yuv(img->height, img->width, CV_8UC2, img->buffer);
cv::cvtColor(yuv, *pColor, cv::COLOR_YUV2BGR_YUYV);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_RGB){
cv::Mat rgb(img->height, img->width, CV_8UC3, img->buffer);
cv::cvtColor(rgb, *pColor, cv::COLOR_RGB2BGR);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_BGR){
*pColor = cv::Mat(img->height, img->width, CV_8UC3, img->buffer).clone();
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_BAYER8GBRG ||
img->pixelFormat == TY_PIXEL_FORMAT_BAYER8BGGR ||
img->pixelFormat == TY_PIXEL_FORMAT_BAYER8GRBG ||
img->pixelFormat == TY_PIXEL_FORMAT_BAYER8RGGB)
{
ret = parseBayer8Frame(img, pColor, color_isp_handle);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER10GBRG ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER10BGGR ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER10GRBG ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER10RGGB)
{
ret = parseBayer10Frame(img, pColor);
}
else if(img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER12GBRG ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER12BGGR ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER12GRBG ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER12RGGB)
{
ret = parseBayer12Frame(img, pColor);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_MONO){
cv::Mat gray(img->height, img->width, CV_8U, img->buffer);
cv::cvtColor(gray, *pColor, cv::COLOR_GRAY2BGR);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_CSI_MONO10){
cv::Mat gray16(img->height, img->width, CV_16U);
parseCsiRaw10((uchar*)img->buffer, gray16, img->width, img->height);
*pColor = gray16.clone();
}
return ret;
}
static inline int parseImage(const TY_IMAGE_DATA* img, cv::Mat* image, TY_ISP_HANDLE color_isp_handle = NULL)
{
int ret = 0;
if (img->pixelFormat == TY_PIXEL_FORMAT_JPEG){
std::vector<uchar> _v((uchar*)img->buffer, (uchar*)img->buffer + img->size);
*image = cv::imdecode(_v, cv::IMREAD_COLOR);
ASSERT(img->width == image->cols && img->height == image->rows);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_YVYU){
cv::Mat yuv(img->height, img->width, CV_8UC2, img->buffer);
cv::cvtColor(yuv, *image, cv::COLOR_YUV2BGR_YVYU);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_YUYV){
cv::Mat yuv(img->height, img->width, CV_8UC2, img->buffer);
cv::cvtColor(yuv, *image, cv::COLOR_YUV2BGR_YUYV);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_RGB){
cv::Mat rgb(img->height, img->width, CV_8UC3, img->buffer);
cv::cvtColor(rgb, *image, cv::COLOR_RGB2BGR);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_BGR){
*image = cv::Mat(img->height, img->width, CV_8UC3, img->buffer).clone();
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_BAYER8GBRG ||
img->pixelFormat == TY_PIXEL_FORMAT_BAYER8BGGR ||
img->pixelFormat == TY_PIXEL_FORMAT_BAYER8GRBG ||
img->pixelFormat == TY_PIXEL_FORMAT_BAYER8RGGB)
{
ret = parseBayer8Frame(img, image, color_isp_handle);
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER10GBRG ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER10BGGR ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER10GRBG ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER10RGGB)
{
ret = parseBayer10Frame(img, image);
}
else if(img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER12GBRG ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER12BGGR ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER12GRBG ||
img->pixelFormat == TY_PIXEL_FORMAT_CSI_BAYER12RGGB)
{
ret = parseBayer12Frame(img, image);
}
else if(img->pixelFormat == TY_PIXEL_FORMAT_MONO) {
*image = cv::Mat(img->height, img->width, CV_8U, img->buffer).clone();
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_CSI_MONO10){
cv::Mat gray16(img->height, img->width, CV_16U);
ret = parseCsiRaw10((uchar*)img->buffer, gray16, img->width, img->height);
*image = gray16.clone();
}
else if(img->pixelFormat == TY_PIXEL_FORMAT_CSI_MONO12) {
cv::Mat gray16(img->height, img->width, CV_16U);
ret = parseCsiRaw12((uchar*)img->buffer, gray16, img->width, img->height);
*image = gray16.clone();
}
else if (img->pixelFormat == TY_PIXEL_FORMAT_MONO16 || img->pixelFormat==TY_PIXEL_FORMAT_TOF_IR_MONO16){
*image = cv::Mat(img->height, img->width, CV_16U, img->buffer).clone();
}
else {
return -1;
}
return ret;
}
static inline int parseFrame(const TY_FRAME_DATA& frame, cv::Mat* pDepth
, cv::Mat* pLeftIR, cv::Mat* pRightIR
, cv::Mat* pColor, TY_ISP_HANDLE color_isp_handle = NULL)
{
for (int i = 0; i < frame.validCount; i++){
if (frame.image[i].status != TY_STATUS_OK) continue;
// get depth image
if (pDepth && frame.image[i].componentID == TY_COMPONENT_DEPTH_CAM){
if (frame.image[i].pixelFormat == TY_PIXEL_FORMAT_XYZ48) {
*pDepth = cv::Mat(frame.image[i].height, frame.image[i].width
, CV_16SC3, frame.image[i].buffer).clone();
}
else {
*pDepth = cv::Mat(frame.image[i].height, frame.image[i].width
, CV_16U, frame.image[i].buffer).clone();
}
}
// get left ir image
if (pLeftIR && frame.image[i].componentID == TY_COMPONENT_IR_CAM_LEFT){
parseIrFrame(&frame.image[i], pLeftIR);
}
// get right ir image
if (pRightIR && frame.image[i].componentID == TY_COMPONENT_IR_CAM_RIGHT){
parseIrFrame(&frame.image[i], pRightIR);
}
// get BGR
if (pColor && frame.image[i].componentID == TY_COMPONENT_RGB_CAM){
parseColorFrame(&frame.image[i], pColor, color_isp_handle);
}
}
return 0;
}
enum{
PC_FILE_FORMAT_XYZ = 0,
};
static void writePC_XYZ(const cv::Point3f* pnts, const cv::Vec3b *color, size_t n, FILE* fp)
{
if (color){
for (size_t i = 0; i < n; i++){
if (!std::isnan(pnts[i].x)){
fprintf(fp, "%f %f %f %d %d %d\n", pnts[i].x, pnts[i].y, pnts[i].z, color[i][0], color[i][1], color[i][2]);
}
}
}
else{
for (size_t i = 0; i < n; i++){
if (!std::isnan(pnts[i].x)){
fprintf(fp, "%f %f %f 0 0 0\n", pnts[i].x, pnts[i].y, pnts[i].z);
}
}
}
}
static void writePointCloud(const cv::Point3f* pnts, const cv::Vec3b *color, size_t n, const char* file, int format)
{
FILE* fp = fopen(file, "w");
if (!fp){
return;
}
switch (format){
case PC_FILE_FORMAT_XYZ:
writePC_XYZ(pnts, color, n, fp);
break;
default:
break;
}
fclose(fp);
}
#else
#endif
class CallbackWrapper
{
public:
typedef void(*TY_FRAME_CALLBACK) (TY_FRAME_DATA*, void* userdata);
CallbackWrapper(){
_hDevice = NULL;
_cb = NULL;
_userdata = NULL;
_exit = true;
}
TY_STATUS TYRegisterCallback(TY_DEV_HANDLE hDevice, TY_FRAME_CALLBACK v, void* userdata)
{
_hDevice = hDevice;
_cb = v;
_userdata = userdata;
_exit = false;
_cbThread.create(&workerThread, this);
return TY_STATUS_OK;
}
void TYUnregisterCallback()
{
if (!_exit) {
_exit = true;
_cbThread.destroy();
}
}
private:
static void* workerThread(void* userdata)
{
CallbackWrapper* pWrapper = (CallbackWrapper*)userdata;
TY_FRAME_DATA frame;
while (!pWrapper->_exit)
{
int err = TYFetchFrame(pWrapper->_hDevice, &frame, 100);
if (!err) {
pWrapper->_cb(&frame, pWrapper->_userdata);
}
}
LOGI("frameCallback exit!");
return NULL;
}
TY_DEV_HANDLE _hDevice;
TY_FRAME_CALLBACK _cb;
void* _userdata;
bool _exit;
TYThread _cbThread;
};
#ifdef _WIN32
static int get_fps() {
static int fps_counter = 0;
static clock_t fps_tm = 0;
const int kMaxCounter = 250;
fps_counter++;
if (fps_counter < kMaxCounter) {
return -1;
}
int elapse = (clock() - fps_tm);
int v = (int)(((float)fps_counter) / elapse * CLOCKS_PER_SEC);
fps_tm = clock();
fps_counter = 0;
return v;
}
#else
static int get_fps() {
static int fps_counter = 0;
static clock_t fps_tm = 0;
const int kMaxCounter = 200;
struct timeval start;
fps_counter++;
if (fps_counter < kMaxCounter) {
return -1;
}
gettimeofday(&start, NULL);
int elapse = start.tv_sec * 1000 + start.tv_usec / 1000 - fps_tm;
int v = (int)(((float)fps_counter) / elapse * 1000);
gettimeofday(&start, NULL);
fps_tm = start.tv_sec * 1000 + start.tv_usec / 1000;
fps_counter = 0;
return v;
}
#endif
static std::vector<uint8_t> TYReadBinaryFile(const char* filename)
{
// open the file:
std::ifstream file(filename, std::ios::binary);
if (!file.is_open()){
return std::vector<uint8_t>();
}
// Stop eating new lines in binary mode!!!
file.unsetf(std::ios::skipws);
// get its size:
std::streampos fileSize;
file.seekg(0, std::ios::end);
fileSize = file.tellg();
file.seekg(0, std::ios::beg);
// reserve capacity
std::vector<uint8_t> vec;
vec.reserve(fileSize);
// read the data:
vec.insert(vec.begin(),
std::istream_iterator<uint8_t>(file),
std::istream_iterator<uint8_t>());
return vec;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
// //////////////////////////////////////////////////////////
// Crc32.h
// Copyright (c) 2011-2019 Stephan Brumme. All rights reserved.
// Slicing-by-16 contributed by Bulat Ziganshin
// Tableless bytewise CRC contributed by Hagai Gold
// see http://create.stephan-brumme.com/disclaimer.html
//
// if running on an embedded system, you might consider shrinking the
// big Crc32Lookup table by undefining these lines:
#define CRC32_USE_LOOKUP_TABLE_BYTE
#define CRC32_USE_LOOKUP_TABLE_SLICING_BY_4
#define CRC32_USE_LOOKUP_TABLE_SLICING_BY_8
#define CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
// - crc32_bitwise doesn't need it at all
// - crc32_halfbyte has its own small lookup table
// - crc32_1byte_tableless and crc32_1byte_tableless2 don't need it at all
// - crc32_1byte needs only Crc32Lookup[0]
// - crc32_4bytes needs only Crc32Lookup[0..3]
// - crc32_8bytes needs only Crc32Lookup[0..7]
// - crc32_4x8bytes needs only Crc32Lookup[0..7]
// - crc32_16bytes needs all of Crc32Lookup
// using the aforementioned #defines the table is automatically fitted to your needs
// uint8_t, uint32_t, int32_t
#include <stdint.h>
// size_t
#include <cstddef>
// crc32_fast selects the fastest algorithm depending on flags (CRC32_USE_LOOKUP_...)
/// compute CRC32 using the fastest algorithm for large datasets on modern CPUs
uint32_t crc32_fast (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// merge two CRC32 such that result = crc32(dataB, lengthB, crc32(dataA, lengthA))
uint32_t crc32_combine (uint32_t crcA, uint32_t crcB, size_t lengthB);
/// compute CRC32 (bitwise algorithm)
uint32_t crc32_bitwise (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// compute CRC32 (half-byte algoritm)
uint32_t crc32_halfbyte(const void* data, size_t length, uint32_t previousCrc32 = 0);
#ifdef CRC32_USE_LOOKUP_TABLE_BYTE
/// compute CRC32 (standard algorithm)
uint32_t crc32_1byte (const void* data, size_t length, uint32_t previousCrc32 = 0);
#endif
/// compute CRC32 (byte algorithm) without lookup tables
uint32_t crc32_1byte_tableless (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// compute CRC32 (byte algorithm) without lookup tables
uint32_t crc32_1byte_tableless2(const void* data, size_t length, uint32_t previousCrc32 = 0);
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_4
/// compute CRC32 (Slicing-by-4 algorithm)
uint32_t crc32_4bytes (const void* data, size_t length, uint32_t previousCrc32 = 0);
#endif
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_8
/// compute CRC32 (Slicing-by-8 algorithm)
uint32_t crc32_8bytes (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// compute CRC32 (Slicing-by-8 algorithm), unroll inner loop 4 times
uint32_t crc32_4x8bytes(const void* data, size_t length, uint32_t previousCrc32 = 0);
#endif
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
/// compute CRC32 (Slicing-by-16 algorithm)
uint32_t crc32_16bytes (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// compute CRC32 (Slicing-by-16 algorithm, prefetch upcoming data blocks)
uint32_t crc32_16bytes_prefetch(const void* data, size_t length, uint32_t previousCrc32 = 0, size_t prefetchAhead = 256);
#endif

View File

@@ -0,0 +1,464 @@
#include <iostream>
#include <cstdio>
#include <string>
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <sstream>
#include <iomanip>
#include <fstream>
#include <iostream>
#ifndef WIN32
#include <dirent.h>
#endif
#include "huffman.h"
struct ersel{ //this structure will be used to create the translation tree
ersel *left,*right;
long int number;
unsigned char character;
std::string bit;
};
struct translation{
translation *zero,*one;
unsigned char character;
};
bool erselcompare0(ersel a,ersel b){
return a.number<b.number;
}
const static unsigned char check=0b10000000;
//below function is used for writing the uChar to compressed file
//It does not write it directly as one byte instead it mixes uChar and current byte, writes 8 bits of it
//and puts the rest to curent byte for later use
void write_from_uChar(unsigned char uChar,unsigned char &current_byte,int current_bit_count, std::stringstream& ss){
current_byte<<=8-current_bit_count;
current_byte|=(uChar>>current_bit_count);
ss.write(reinterpret_cast<const char*>(&current_byte), sizeof(current_byte));
current_byte=uChar;
}
//below function is writing number of files we re going to translate inside current folder to compressed file's 2 bytes
//It is done like this to make sure that it can work on little, big or middle-endian systems
void write_file_count(int file_count,unsigned char &current_byte,int current_bit_count,std::stringstream& ss){
unsigned char temp=file_count%256;
write_from_uChar(temp,current_byte,current_bit_count,ss);
temp=file_count/256;
write_from_uChar(temp,current_byte,current_bit_count,ss);
}
//This function is writing byte count of current input file to compressed file using 8 bytes
//It is done like this to make sure that it can work on little, big or middle-endian systems
void write_file_size(long int size,unsigned char &current_byte,int current_bit_count,std::stringstream& ss){
for(int i=0;i<8;i++){
write_from_uChar(size%256,current_byte,current_bit_count,ss);
size/=256;
}
}
// Below function translates and writes bytes from current input file to the compressed file.
void write_the_file_content(const std::string& text, std::string *str_arr, unsigned char &current_byte, int &current_bit_count, std::stringstream& ss){
unsigned char x;
char *str_pointer;
long size = text.length();
x = text.at(0);
for(long int i=0;i<size;i++){
str_pointer=&str_arr[x][0];
while(*str_pointer){
if(current_bit_count==8){
ss.write(reinterpret_cast<const char*>(&current_byte), sizeof(current_byte));
current_bit_count=0;
}
switch(*str_pointer){
case '1':current_byte<<=1;current_byte|=1;current_bit_count++;break;
case '0':current_byte<<=1;current_bit_count++;break;
default: std::cout<<"An error has occurred"<< std::endl <<"Process has been aborted";
exit(2);
}
str_pointer++;
}
if(i != size - 1) {
x = (unsigned char)text.at(i + 1);
}
}
}
//checks if next input is either a file or a folder
//returns 1 if it is a file
//returns 0 if it is a folder
bool this_is_a_file(unsigned char &current_byte,int &current_bit_count, std::stringstream& ss){
bool val;
if(current_bit_count==0){
ss.read((char*)&current_byte, 1);
current_bit_count=8;
}
val=current_byte&check;
current_byte<<=1;
current_bit_count--;
return val;
}
// process_8_bits_NUMBER reads 8 successive bits from compressed file
//(does not have to be in the same byte)
// and returns it in unsigned char form
unsigned char process_8_bits_NUMBER(unsigned char &current_byte,int current_bit_count, std::stringstream& ss){
unsigned char val,temp_byte;
ss.read((char*)&temp_byte, 1);
val=current_byte|(temp_byte>>current_bit_count);
current_byte=temp_byte<<8-current_bit_count;
return val;
}
// returns file's size
long int read_file_size(unsigned char &current_byte,int current_bit_count, std::stringstream& ss){
long int size=0;
{
long int multiplier=1;
for(int i=0;i<8;i++){
size+=process_8_bits_NUMBER(current_byte,current_bit_count,ss)*multiplier;
multiplier*=256;
}
}
return size;
// Size was written to the compressed file from least significiant byte
// to the most significiant byte to make sure system's endianness
// does not affect the process and that is why we are processing size information like this
}
// This function translates compressed file from info that is now stored in the translation tree
// then writes it to a newly created file
void translate_file(long int size,unsigned char &current_byte,int &current_bit_count,translation *root, std::stringstream& ss, std::string& text){
translation *node;
for(long int i=0;i<size;i++){
node=root;
while(node->zero||node->one){
if(current_bit_count==0){
ss.read((char*)&current_byte, 1);
current_bit_count=8;
}
if(current_byte&check){
node=node->one;
}
else{
node=node->zero;
}
current_byte<<=1;
current_bit_count--;
}
text.at(i) = node->character;
}
}
// process_n_bits_TO_STRING function reads n successive bits from the compressed file
// and stores it in a leaf of the translation tree,
// after creating that leaf and sometimes after creating nodes that are binding that leaf to the tree.
void process_n_bits_TO_STRING(unsigned char &current_byte,int n,int &current_bit_count,std::stringstream& ss,translation *node,unsigned char uChar){
for(int i=0;i<n;i++){
if(current_bit_count==0){
ss.read((char*)&current_byte, 1);
current_bit_count=8;
}
switch(current_byte&check){
case 0:
if(!(node->zero)){
node->zero=(translation*)malloc(sizeof(translation));
node->zero->zero=NULL;
node->zero->one=NULL;
}
node=node->zero;
break;
case 128:
if(!(node->one)){
node->one=(translation*)malloc(sizeof(translation));
node->one->zero=NULL;
node->one->one=NULL;
}
node=node->one;
break;
}
current_byte<<=1;
current_bit_count--;
}
node->character=uChar;
}
// burn_tree function is used for deallocating translation tree
void burn_tree(translation *node){
if(node->zero)burn_tree(node->zero);
if(node->one)burn_tree(node->one);
free(node);
}
//////////////////////////////////////////////////////////////////////
bool TextHuffmanCompression(const std::string& text, std::string& result)
{
unsigned char x; //these are temp variables to take input from the file
long int total_size=0,size;
std::stringstream ss;
long int number[256];
long int total_bits=0;
unsigned char letter_count=0;
for(long int *i=number;i<number+256;i++){
*i=0;
}
total_bits+=16+9;
size = text.length();
total_size += size;
total_bits+=64;
x = text.at(0);
for(long int j=0;j<size;j++){ //counting usage frequency of unique bytes inside the file
number[x]++;
x = text.at(j);
}
for(long int *i=number;i<number+256;i++){
if(*i){
letter_count++;
}
}
//---------------------------------------------
// creating the base of translation array(and then sorting them by ascending frequencies
// this array of type 'ersel' will not be used after calculating transformed versions of every unique byte
// instead its info will be written in a new string array called str_arr
ersel* array = new ersel[letter_count*2-1];
ersel *e=array;
for(long int *i=number;i<number+256;i++){
if(*i){
e->right=NULL;
e->left=NULL;
e->number=*i;
e->character=i-number;
e++;
}
}
std::sort(array,array+letter_count,erselcompare0);
//---------------------------------------------
// min1 and min2 represents nodes that has minimum weights
// isleaf is the pointer that traverses through leafs and
// notleaf is the pointer that traverses through nodes that are not leafs
ersel *min1=array,*min2=array+1,*current=array+letter_count,*notleaf=array+letter_count,*isleaf=array+2;
for(int i=0;i<letter_count-1;i++){
current->number=min1->number+min2->number;
current->left=min1;
current->right=min2;
min1->bit="1";
min2->bit="0";
current++;
if(isleaf>=array+letter_count){
min1=notleaf;
notleaf++;
}
else{
if(isleaf->number<notleaf->number){
min1=isleaf;
isleaf++;
}
else{
min1=notleaf;
notleaf++;
}
}
if(isleaf>=array+letter_count){
min2=notleaf;
notleaf++;
}
else if(notleaf>=current){
min2=isleaf;
isleaf++;
}
else{
if(isleaf->number<notleaf->number){
min2=isleaf;
isleaf++;
}
else{
min2=notleaf;
notleaf++;
}
}
}
for(e=array+letter_count*2-2;e>array-1;e--){
if(e->left){
e->left->bit=e->bit+e->left->bit;
}
if(e->right){
e->right->bit=e->bit+e->right->bit;
}
}
// In this block we are adding the bytes from root to leafs
// and after this is done every leaf will have a transformation string that corresponds to it
// Note: It is actually a very neat process. Using 4th and 5th code blocks, we are making sure that
// the most used character is using least number of bits.
// Specific number of bits we re going to use for that character is determined by weight distribution
//---------------------------------------------
int current_bit_count=0;
unsigned char current_byte;
ss.write(reinterpret_cast<const char*>(&letter_count), sizeof(letter_count));
total_bits+=8;
//----------------------------------------
char *str_pointer;
unsigned char len,current_character;
std::string str_arr[256];
for(e=array;e<array+letter_count;e++){
str_arr[(e->character)]=e->bit; //we are putting the transformation string to str_arr array to make the compression process more time efficient
len=e->bit.length();
current_character=e->character;
write_from_uChar(current_character,current_byte,current_bit_count,ss);
write_from_uChar(len,current_byte,current_bit_count,ss);
total_bits+=len+16;
// above lines will write the byte and the number of bits
// we re going to need to represent this specific byte's transformated version
// after here we are going to write the transformed version of the number bit by bit.
str_pointer=&e->bit[0];
while(*str_pointer){
if(current_bit_count==8){
ss.write(reinterpret_cast<const char*>(&current_byte), sizeof(current_byte));
current_bit_count=0;
}
switch(*str_pointer){
case '1':current_byte<<=1;current_byte|=1;current_bit_count++;break;
case '0':current_byte<<=1;current_bit_count++;break;
default:std::cout<<"An error has occurred"<<std::endl<<"Compression process aborted"<<std::endl;
return false;
}
str_pointer++;
}
total_bits+=len*(e->number);
}
if(total_bits%8){
total_bits=(total_bits/8+1)*8;
// from this point on total bits doesnt represent total bits
// instead it represents 8*number_of_bytes we are gonna use on our compressed file
}
delete[]array;
// Above loop writes the translation script into compressed file and the str_arr array
//----------------------------------------
std::cout<<"The size of the sum of ORIGINAL files is: "<<total_size<<" bytes"<<std::endl;
std::cout<<"The size of the COMPRESSED file will be: "<<total_bits/8<<" bytes"<<std::endl;
std::cout<<"Compressed file's size will be [%"<<100*((float)total_bits/8/total_size)<<"] of the original file"<<std::endl;
if(total_bits/8>total_size){
std::cout<<std::endl<<"COMPRESSED FILE'S SIZE WILL BE HIGHER THAN THE SUM OF ORIGINALS"<<std::endl<<std::endl;
}
//-------------writes fourth---------------
write_file_count(1,current_byte,current_bit_count,ss);
//---------------------------------------
//-------------writes fifth--------------
if(current_bit_count==8){
ss.write(reinterpret_cast<const char*>(&current_byte), sizeof(current_byte));
current_bit_count=0;
}
current_byte<<=1;
current_byte|=1;
current_bit_count++;
write_file_size(size,current_byte,current_bit_count,ss); //writes sixth
write_the_file_content(text,str_arr,current_byte,current_bit_count,ss); //writes eighth
if(current_bit_count==8){ // here we are writing the last byte of the file
ss.write(reinterpret_cast<const char*>(&current_byte), sizeof(current_byte));
}
else{
current_byte<<=8-current_bit_count;
ss.write(reinterpret_cast<const char*>(&current_byte), sizeof(current_byte));
}
result = ss.str();
return true;
}
bool TextHuffmanDecompression(const std::string& huffman, std::string& text)
{
unsigned char letter_count=0;
std::stringstream ss(huffman);
//---------reads .first-----------
ss.read((char*)&letter_count, 1);
int m_letter_count;
if(letter_count==0)
m_letter_count=256;
else
m_letter_count = letter_count;
//-------------------------------
//----------------reads .second---------------------
// and stores transformation info into binary translation tree for later use
unsigned char current_byte=0,current_character;
int current_bit_count=0,len;
translation *root=(translation*)malloc(sizeof(translation));
root->zero=NULL;
root->one=NULL;
for(int i=0;i<m_letter_count;i++){
current_character=process_8_bits_NUMBER(current_byte,current_bit_count,ss);
len=process_8_bits_NUMBER(current_byte,current_bit_count,ss);
if(len==0)len=256;
process_n_bits_TO_STRING(current_byte,len,current_bit_count,ss,root,current_character);
}
//--------------------------------------------------
// ---------reads .third----------
//reads how many folders/files the program is going to create inside the main folder
int file_count;
file_count=process_8_bits_NUMBER(current_byte,current_bit_count,ss);
file_count+=256*process_8_bits_NUMBER(current_byte,current_bit_count,ss);
if(file_count != 1) {
//
return false;
}
// File count was written to the compressed file from least significiant byte
// to most significiant byte to make sure system's endianness
// does not affect the process and that is why we are processing size information like this
if(this_is_a_file(current_byte,current_bit_count,ss)){ // reads .fifth and goes inside if this is a file
long int size=read_file_size(current_byte,current_bit_count,ss); // reads .sixth
text.resize(size);
translate_file(size,current_byte,current_bit_count,root,ss, text); //translates .eighth
burn_tree(root);
return true;
}
burn_tree(root);
return false;
}

View File

@@ -0,0 +1,5 @@
#pragma once
#include <string>
bool TextHuffmanCompression(const std::string& text, std::string& result);
bool TextHuffmanDecompression(const std::string& huffman, std::string& text);

View File

@@ -0,0 +1,790 @@
/* Copyright (c) 2013 Dropbox, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "json11.hpp"
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <limits>
namespace json11 {
static const int max_depth = 200;
using std::string;
using std::vector;
using std::map;
using std::make_shared;
using std::initializer_list;
using std::move;
/* Helper for representing null - just a do-nothing struct, plus comparison
* operators so the helpers in JsonValue work. We can't use nullptr_t because
* it may not be orderable.
*/
struct NullStruct {
bool operator==(NullStruct) const { return true; }
bool operator<(NullStruct) const { return false; }
};
/* * * * * * * * * * * * * * * * * * * *
* Serialization
*/
static void dump(NullStruct, string &out) {
out += "null";
}
static void dump(double value, string &out) {
if (std::isfinite(value)) {
char buf[32];
snprintf(buf, sizeof buf, "%.17g", value);
out += buf;
} else {
out += "null";
}
}
static void dump(int value, string &out) {
char buf[32];
snprintf(buf, sizeof buf, "%d", value);
out += buf;
}
static void dump(bool value, string &out) {
out += value ? "true" : "false";
}
static void dump(const string &value, string &out) {
out += '"';
for (size_t i = 0; i < value.length(); i++) {
const char ch = value[i];
if (ch == '\\') {
out += "\\\\";
} else if (ch == '"') {
out += "\\\"";
} else if (ch == '\b') {
out += "\\b";
} else if (ch == '\f') {
out += "\\f";
} else if (ch == '\n') {
out += "\\n";
} else if (ch == '\r') {
out += "\\r";
} else if (ch == '\t') {
out += "\\t";
} else if (static_cast<uint8_t>(ch) <= 0x1f) {
char buf[8];
snprintf(buf, sizeof buf, "\\u%04x", ch);
out += buf;
} else if (static_cast<uint8_t>(ch) == 0xe2 && static_cast<uint8_t>(value[i+1]) == 0x80
&& static_cast<uint8_t>(value[i+2]) == 0xa8) {
out += "\\u2028";
i += 2;
} else if (static_cast<uint8_t>(ch) == 0xe2 && static_cast<uint8_t>(value[i+1]) == 0x80
&& static_cast<uint8_t>(value[i+2]) == 0xa9) {
out += "\\u2029";
i += 2;
} else {
out += ch;
}
}
out += '"';
}
static void dump(const Json::array &values, string &out) {
bool first = true;
out += "[";
for (const auto &value : values) {
if (!first)
out += ", ";
value.dump(out);
first = false;
}
out += "]";
}
static void dump(const Json::object &values, string &out) {
bool first = true;
out += "{";
for (const auto &kv : values) {
if (!first)
out += ", ";
dump(kv.first, out);
out += ": ";
kv.second.dump(out);
first = false;
}
out += "}";
}
void Json::dump(string &out) const {
m_ptr->dump(out);
}
/* * * * * * * * * * * * * * * * * * * *
* Value wrappers
*/
template <Json::Type tag, typename T>
class Value : public JsonValue {
protected:
// Constructors
explicit Value(const T &value) : m_value(value) {}
explicit Value(T &&value) : m_value(move(value)) {}
// Get type tag
Json::Type type() const override {
return tag;
}
// Comparisons
bool equals(const JsonValue * other) const override {
return m_value == static_cast<const Value<tag, T> *>(other)->m_value;
}
bool less(const JsonValue * other) const override {
return m_value < static_cast<const Value<tag, T> *>(other)->m_value;
}
const T m_value;
void dump(string &out) const override { json11::dump(m_value, out); }
};
class JsonDouble final : public Value<Json::NUMBER, double> {
double number_value() const override { return m_value; }
int int_value() const override { return static_cast<int>(m_value); }
bool equals(const JsonValue * other) const override { return m_value == other->number_value(); }
bool less(const JsonValue * other) const override { return m_value < other->number_value(); }
public:
explicit JsonDouble(double value) : Value(value) {}
};
class JsonInt final : public Value<Json::NUMBER, int> {
double number_value() const override { return m_value; }
int int_value() const override { return m_value; }
bool equals(const JsonValue * other) const override { return m_value == other->number_value(); }
bool less(const JsonValue * other) const override { return m_value < other->number_value(); }
public:
explicit JsonInt(int value) : Value(value) {}
};
class JsonBoolean final : public Value<Json::BOOL, bool> {
bool bool_value() const override { return m_value; }
public:
explicit JsonBoolean(bool value) : Value(value) {}
};
class JsonString final : public Value<Json::STRING, string> {
const string &string_value() const override { return m_value; }
public:
explicit JsonString(const string &value) : Value(value) {}
explicit JsonString(string &&value) : Value(move(value)) {}
};
class JsonArray final : public Value<Json::ARRAY, Json::array> {
const Json::array &array_items() const override { return m_value; }
const Json & operator[](size_t i) const override;
public:
explicit JsonArray(const Json::array &value) : Value(value) {}
explicit JsonArray(Json::array &&value) : Value(move(value)) {}
};
class JsonObject final : public Value<Json::OBJECT, Json::object> {
const Json::object &object_items() const override { return m_value; }
const Json & operator[](const string &key) const override;
public:
explicit JsonObject(const Json::object &value) : Value(value) {}
explicit JsonObject(Json::object &&value) : Value(move(value)) {}
};
class JsonNull final : public Value<Json::NUL, NullStruct> {
public:
JsonNull() : Value({}) {}
};
/* * * * * * * * * * * * * * * * * * * *
* Static globals - static-init-safe
*/
struct Statics {
const std::shared_ptr<JsonValue> null = make_shared<JsonNull>();
const std::shared_ptr<JsonValue> t = make_shared<JsonBoolean>(true);
const std::shared_ptr<JsonValue> f = make_shared<JsonBoolean>(false);
const string empty_string;
const vector<Json> empty_vector;
const map<string, Json> empty_map;
Statics() {}
};
static const Statics & statics() {
static const Statics s {};
return s;
}
static const Json & static_null() {
// This has to be separate, not in Statics, because Json() accesses statics().null.
static const Json json_null;
return json_null;
}
/* * * * * * * * * * * * * * * * * * * *
* Constructors
*/
Json::Json() noexcept : m_ptr(statics().null) {}
Json::Json(std::nullptr_t) noexcept : m_ptr(statics().null) {}
Json::Json(double value) : m_ptr(make_shared<JsonDouble>(value)) {}
Json::Json(int value) : m_ptr(make_shared<JsonInt>(value)) {}
Json::Json(bool value) : m_ptr(value ? statics().t : statics().f) {}
Json::Json(const string &value) : m_ptr(make_shared<JsonString>(value)) {}
Json::Json(string &&value) : m_ptr(make_shared<JsonString>(move(value))) {}
Json::Json(const char * value) : m_ptr(make_shared<JsonString>(value)) {}
Json::Json(const Json::array &values) : m_ptr(make_shared<JsonArray>(values)) {}
Json::Json(Json::array &&values) : m_ptr(make_shared<JsonArray>(move(values))) {}
Json::Json(const Json::object &values) : m_ptr(make_shared<JsonObject>(values)) {}
Json::Json(Json::object &&values) : m_ptr(make_shared<JsonObject>(move(values))) {}
/* * * * * * * * * * * * * * * * * * * *
* Accessors
*/
Json::Type Json::type() const { return m_ptr->type(); }
double Json::number_value() const { return m_ptr->number_value(); }
int Json::int_value() const { return m_ptr->int_value(); }
bool Json::bool_value() const { return m_ptr->bool_value(); }
const string & Json::string_value() const { return m_ptr->string_value(); }
const vector<Json> & Json::array_items() const { return m_ptr->array_items(); }
const map<string, Json> & Json::object_items() const { return m_ptr->object_items(); }
const Json & Json::operator[] (size_t i) const { return (*m_ptr)[i]; }
const Json & Json::operator[] (const string &key) const { return (*m_ptr)[key]; }
double JsonValue::number_value() const { return 0; }
int JsonValue::int_value() const { return 0; }
bool JsonValue::bool_value() const { return false; }
const string & JsonValue::string_value() const { return statics().empty_string; }
const vector<Json> & JsonValue::array_items() const { return statics().empty_vector; }
const map<string, Json> & JsonValue::object_items() const { return statics().empty_map; }
const Json & JsonValue::operator[] (size_t) const { return static_null(); }
const Json & JsonValue::operator[] (const string &) const { return static_null(); }
const Json & JsonObject::operator[] (const string &key) const {
auto iter = m_value.find(key);
return (iter == m_value.end()) ? static_null() : iter->second;
}
const Json & JsonArray::operator[] (size_t i) const {
if (i >= m_value.size()) return static_null();
else return m_value[i];
}
/* * * * * * * * * * * * * * * * * * * *
* Comparison
*/
bool Json::operator== (const Json &other) const {
if (m_ptr == other.m_ptr)
return true;
if (m_ptr->type() != other.m_ptr->type())
return false;
return m_ptr->equals(other.m_ptr.get());
}
bool Json::operator< (const Json &other) const {
if (m_ptr == other.m_ptr)
return false;
if (m_ptr->type() != other.m_ptr->type())
return m_ptr->type() < other.m_ptr->type();
return m_ptr->less(other.m_ptr.get());
}
/* * * * * * * * * * * * * * * * * * * *
* Parsing
*/
/* esc(c)
*
* Format char c suitable for printing in an error message.
*/
static inline string esc(char c) {
char buf[12];
if (static_cast<uint8_t>(c) >= 0x20 && static_cast<uint8_t>(c) <= 0x7f) {
snprintf(buf, sizeof buf, "'%c' (%d)", c, c);
} else {
snprintf(buf, sizeof buf, "(%d)", c);
}
return string(buf);
}
static inline bool in_range(long x, long lower, long upper) {
return (x >= lower && x <= upper);
}
namespace {
/* JsonParser
*
* Object that tracks all state of an in-progress parse.
*/
struct JsonParser final {
/* State
*/
const string &str;
size_t i;
string &err;
bool failed;
const JsonParse strategy;
/* fail(msg, err_ret = Json())
*
* Mark this parse as failed.
*/
Json fail(string &&msg) {
return fail(move(msg), Json());
}
template <typename T>
T fail(string &&msg, const T err_ret) {
if (!failed)
err = std::move(msg);
failed = true;
return err_ret;
}
/* consume_whitespace()
*
* Advance until the current character is non-whitespace.
*/
void consume_whitespace() {
while (str[i] == ' ' || str[i] == '\r' || str[i] == '\n' || str[i] == '\t')
i++;
}
/* consume_comment()
*
* Advance comments (c-style inline and multiline).
*/
bool consume_comment() {
bool comment_found = false;
if (str[i] == '/') {
i++;
if (i == str.size())
return fail("unexpected end of input after start of comment", false);
if (str[i] == '/') { // inline comment
i++;
// advance until next line, or end of input
while (i < str.size() && str[i] != '\n') {
i++;
}
comment_found = true;
}
else if (str[i] == '*') { // multiline comment
i++;
if (i > str.size()-2)
return fail("unexpected end of input inside multi-line comment", false);
// advance until closing tokens
while (!(str[i] == '*' && str[i+1] == '/')) {
i++;
if (i > str.size()-2)
return fail(
"unexpected end of input inside multi-line comment", false);
}
i += 2;
comment_found = true;
}
else
return fail("malformed comment", false);
}
return comment_found;
}
/* consume_garbage()
*
* Advance until the current character is non-whitespace and non-comment.
*/
void consume_garbage() {
consume_whitespace();
if(strategy == JsonParse::COMMENTS) {
bool comment_found = false;
do {
comment_found = consume_comment();
if (failed) return;
consume_whitespace();
}
while(comment_found);
}
}
/* get_next_token()
*
* Return the next non-whitespace character. If the end of the input is reached,
* flag an error and return 0.
*/
char get_next_token() {
consume_garbage();
if (failed) return static_cast<char>(0);
if (i == str.size())
return fail("unexpected end of input", static_cast<char>(0));
return str[i++];
}
/* encode_utf8(pt, out)
*
* Encode pt as UTF-8 and add it to out.
*/
void encode_utf8(long pt, string & out) {
if (pt < 0)
return;
if (pt < 0x80) {
out += static_cast<char>(pt);
} else if (pt < 0x800) {
out += static_cast<char>((pt >> 6) | 0xC0);
out += static_cast<char>((pt & 0x3F) | 0x80);
} else if (pt < 0x10000) {
out += static_cast<char>((pt >> 12) | 0xE0);
out += static_cast<char>(((pt >> 6) & 0x3F) | 0x80);
out += static_cast<char>((pt & 0x3F) | 0x80);
} else {
out += static_cast<char>((pt >> 18) | 0xF0);
out += static_cast<char>(((pt >> 12) & 0x3F) | 0x80);
out += static_cast<char>(((pt >> 6) & 0x3F) | 0x80);
out += static_cast<char>((pt & 0x3F) | 0x80);
}
}
/* parse_string()
*
* Parse a string, starting at the current position.
*/
string parse_string() {
string out;
long last_escaped_codepoint = -1;
while (true) {
if (i == str.size())
return fail("unexpected end of input in string", "");
char ch = str[i++];
if (ch == '"') {
encode_utf8(last_escaped_codepoint, out);
return out;
}
if (in_range(ch, 0, 0x1f))
return fail("unescaped " + esc(ch) + " in string", "");
// The usual case: non-escaped characters
if (ch != '\\') {
encode_utf8(last_escaped_codepoint, out);
last_escaped_codepoint = -1;
out += ch;
continue;
}
// Handle escapes
if (i == str.size())
return fail("unexpected end of input in string", "");
ch = str[i++];
if (ch == 'u') {
// Extract 4-byte escape sequence
string esc = str.substr(i, 4);
// Explicitly check length of the substring. The following loop
// relies on std::string returning the terminating NUL when
// accessing str[length]. Checking here reduces brittleness.
if (esc.length() < 4) {
return fail("bad \\u escape: " + esc, "");
}
for (size_t j = 0; j < 4; j++) {
if (!in_range(esc[j], 'a', 'f') && !in_range(esc[j], 'A', 'F')
&& !in_range(esc[j], '0', '9'))
return fail("bad \\u escape: " + esc, "");
}
long codepoint = strtol(esc.data(), nullptr, 16);
// JSON specifies that characters outside the BMP shall be encoded as a pair
// of 4-hex-digit \u escapes encoding their surrogate pair components. Check
// whether we're in the middle of such a beast: the previous codepoint was an
// escaped lead (high) surrogate, and this is a trail (low) surrogate.
if (in_range(last_escaped_codepoint, 0xD800, 0xDBFF)
&& in_range(codepoint, 0xDC00, 0xDFFF)) {
// Reassemble the two surrogate pairs into one astral-plane character, per
// the UTF-16 algorithm.
encode_utf8((((last_escaped_codepoint - 0xD800) << 10)
| (codepoint - 0xDC00)) + 0x10000, out);
last_escaped_codepoint = -1;
} else {
encode_utf8(last_escaped_codepoint, out);
last_escaped_codepoint = codepoint;
}
i += 4;
continue;
}
encode_utf8(last_escaped_codepoint, out);
last_escaped_codepoint = -1;
if (ch == 'b') {
out += '\b';
} else if (ch == 'f') {
out += '\f';
} else if (ch == 'n') {
out += '\n';
} else if (ch == 'r') {
out += '\r';
} else if (ch == 't') {
out += '\t';
} else if (ch == '"' || ch == '\\' || ch == '/') {
out += ch;
} else {
return fail("invalid escape character " + esc(ch), "");
}
}
}
/* parse_number()
*
* Parse a double.
*/
Json parse_number() {
size_t start_pos = i;
if (str[i] == '-')
i++;
// Integer part
if (str[i] == '0') {
i++;
if (in_range(str[i], '0', '9'))
return fail("leading 0s not permitted in numbers");
} else if (in_range(str[i], '1', '9')) {
i++;
while (in_range(str[i], '0', '9'))
i++;
} else {
return fail("invalid " + esc(str[i]) + " in number");
}
if (str[i] != '.' && str[i] != 'e' && str[i] != 'E'
&& (i - start_pos) <= static_cast<size_t>(std::numeric_limits<int>::digits10)) {
return std::atoi(str.c_str() + start_pos);
}
// Decimal part
if (str[i] == '.') {
i++;
if (!in_range(str[i], '0', '9'))
return fail("at least one digit required in fractional part");
while (in_range(str[i], '0', '9'))
i++;
}
// Exponent part
if (str[i] == 'e' || str[i] == 'E') {
i++;
if (str[i] == '+' || str[i] == '-')
i++;
if (!in_range(str[i], '0', '9'))
return fail("at least one digit required in exponent");
while (in_range(str[i], '0', '9'))
i++;
}
return std::strtod(str.c_str() + start_pos, nullptr);
}
/* expect(str, res)
*
* Expect that 'str' starts at the character that was just read. If it does, advance
* the input and return res. If not, flag an error.
*/
Json expect(const string &expected, Json res) {
assert(i != 0);
i--;
if (str.compare(i, expected.length(), expected) == 0) {
i += expected.length();
return res;
} else {
return fail("parse error: expected " + expected + ", got " + str.substr(i, expected.length()));
}
}
/* parse_json()
*
* Parse a JSON object.
*/
Json parse_json(int depth) {
if (depth > max_depth) {
return fail("exceeded maximum nesting depth");
}
char ch = get_next_token();
if (failed)
return Json();
if (ch == '-' || (ch >= '0' && ch <= '9')) {
i--;
return parse_number();
}
if (ch == 't')
return expect("true", true);
if (ch == 'f')
return expect("false", false);
if (ch == 'n')
return expect("null", Json());
if (ch == '"')
return parse_string();
if (ch == '{') {
map<string, Json> data;
ch = get_next_token();
if (ch == '}')
return data;
while (1) {
if (ch != '"')
return fail("expected '\"' in object, got " + esc(ch));
string key = parse_string();
if (failed)
return Json();
ch = get_next_token();
if (ch != ':')
return fail("expected ':' in object, got " + esc(ch));
data[std::move(key)] = parse_json(depth + 1);
if (failed)
return Json();
ch = get_next_token();
if (ch == '}')
break;
if (ch != ',')
return fail("expected ',' in object, got " + esc(ch));
ch = get_next_token();
}
return data;
}
if (ch == '[') {
vector<Json> data;
ch = get_next_token();
if (ch == ']')
return data;
while (1) {
i--;
data.push_back(parse_json(depth + 1));
if (failed)
return Json();
ch = get_next_token();
if (ch == ']')
break;
if (ch != ',')
return fail("expected ',' in list, got " + esc(ch));
ch = get_next_token();
(void)ch;
}
return data;
}
return fail("expected value, got " + esc(ch));
}
};
}//namespace {
Json Json::parse(const string &in, string &err, JsonParse strategy) {
JsonParser parser { in, 0, err, false, strategy };
Json result = parser.parse_json(0);
// Check for any trailing garbage
parser.consume_garbage();
if (parser.failed)
return Json();
if (parser.i != in.size())
return parser.fail("unexpected trailing " + esc(in[parser.i]));
return result;
}
// Documented in json11.hpp
vector<Json> Json::parse_multi(const string &in,
std::string::size_type &parser_stop_pos,
string &err,
JsonParse strategy) {
JsonParser parser { in, 0, err, false, strategy };
parser_stop_pos = 0;
vector<Json> json_vec;
while (parser.i != in.size() && !parser.failed) {
json_vec.push_back(parser.parse_json(0));
if (parser.failed)
break;
// Check for another object
parser.consume_garbage();
if (parser.failed)
break;
parser_stop_pos = parser.i;
}
return json_vec;
}
/* * * * * * * * * * * * * * * * * * * *
* Shape-checking
*/
bool Json::has_shape(const shape & types, string & err) const {
if (!is_object()) {
err = "expected JSON object, got " + dump();
return false;
}
const auto& obj_items = object_items();
for (auto & item : types) {
const auto it = obj_items.find(item.first);
if (it == obj_items.cend() || it->second.type() != item.second) {
err = "bad type for " + item.first + " in " + dump();
return false;
}
}
return true;
}
} // namespace json11

View File

@@ -0,0 +1,232 @@
/* json11
*
* json11 is a tiny JSON library for C++11, providing JSON parsing and serialization.
*
* The core object provided by the library is json11::Json. A Json object represents any JSON
* value: null, bool, number (int or double), string (std::string), array (std::vector), or
* object (std::map).
*
* Json objects act like values: they can be assigned, copied, moved, compared for equality or
* order, etc. There are also helper methods Json::dump, to serialize a Json to a string, and
* Json::parse (static) to parse a std::string as a Json object.
*
* Internally, the various types of Json object are represented by the JsonValue class
* hierarchy.
*
* A note on numbers - JSON specifies the syntax of number formatting but not its semantics,
* so some JSON implementations distinguish between integers and floating-point numbers, while
* some don't. In json11, we choose the latter. Because some JSON implementations (namely
* Javascript itself) treat all numbers as the same type, distinguishing the two leads
* to JSON that will be *silently* changed by a round-trip through those implementations.
* Dangerous! To avoid that risk, json11 stores all numbers as double internally, but also
* provides integer helpers.
*
* Fortunately, double-precision IEEE754 ('double') can precisely store any integer in the
* range +/-2^53, which includes every 'int' on most systems. (Timestamps often use int64
* or long long to avoid the Y2038K problem; a double storing microseconds since some epoch
* will be exact for +/- 275 years.)
*/
/* Copyright (c) 2013 Dropbox, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#pragma once
#include <string>
#include <vector>
#include <map>
#include <memory>
#include <initializer_list>
#ifdef _MSC_VER
#if _MSC_VER <= 1800 // VS 2013
#ifndef noexcept
#define noexcept throw()
#endif
#ifndef snprintf
#define snprintf _snprintf_s
#endif
#endif
#endif
namespace json11 {
enum JsonParse {
STANDARD, COMMENTS
};
class JsonValue;
class Json final {
public:
// Types
enum Type {
NUL, NUMBER, BOOL, STRING, ARRAY, OBJECT
};
// Array and object typedefs
typedef std::vector<Json> array;
typedef std::map<std::string, Json> object;
// Constructors for the various types of JSON value.
Json() noexcept; // NUL
Json(std::nullptr_t) noexcept; // NUL
Json(double value); // NUMBER
Json(int value); // NUMBER
Json(bool value); // BOOL
Json(const std::string &value); // STRING
Json(std::string &&value); // STRING
Json(const char * value); // STRING
Json(const array &values); // ARRAY
Json(array &&values); // ARRAY
Json(const object &values); // OBJECT
Json(object &&values); // OBJECT
// Implicit constructor: anything with a to_json() function.
template <class T, class = decltype(&T::to_json)>
Json(const T & t) : Json(t.to_json()) {}
// Implicit constructor: map-like objects (std::map, std::unordered_map, etc)
template <class M, typename std::enable_if<
std::is_constructible<std::string, decltype(std::declval<M>().begin()->first)>::value
&& std::is_constructible<Json, decltype(std::declval<M>().begin()->second)>::value,
int>::type = 0>
Json(const M & m) : Json(object(m.begin(), m.end())) {}
// Implicit constructor: vector-like objects (std::list, std::vector, std::set, etc)
template <class V, typename std::enable_if<
std::is_constructible<Json, decltype(*std::declval<V>().begin())>::value,
int>::type = 0>
Json(const V & v) : Json(array(v.begin(), v.end())) {}
// This prevents Json(some_pointer) from accidentally producing a bool. Use
// Json(bool(some_pointer)) if that behavior is desired.
Json(void *) = delete;
// Accessors
Type type() const;
bool is_null() const { return type() == NUL; }
bool is_number() const { return type() == NUMBER; }
bool is_bool() const { return type() == BOOL; }
bool is_string() const { return type() == STRING; }
bool is_array() const { return type() == ARRAY; }
bool is_object() const { return type() == OBJECT; }
// Return the enclosed value if this is a number, 0 otherwise. Note that json11 does not
// distinguish between integer and non-integer numbers - number_value() and int_value()
// can both be applied to a NUMBER-typed object.
double number_value() const;
int int_value() const;
// Return the enclosed value if this is a boolean, false otherwise.
bool bool_value() const;
// Return the enclosed string if this is a string, "" otherwise.
const std::string &string_value() const;
// Return the enclosed std::vector if this is an array, or an empty vector otherwise.
const array &array_items() const;
// Return the enclosed std::map if this is an object, or an empty map otherwise.
const object &object_items() const;
// Return a reference to arr[i] if this is an array, Json() otherwise.
const Json & operator[](size_t i) const;
// Return a reference to obj[key] if this is an object, Json() otherwise.
const Json & operator[](const std::string &key) const;
// Serialize.
void dump(std::string &out) const;
std::string dump() const {
std::string out;
dump(out);
return out;
}
// Parse. If parse fails, return Json() and assign an error message to err.
static Json parse(const std::string & in,
std::string & err,
JsonParse strategy = JsonParse::STANDARD);
static Json parse(const char * in,
std::string & err,
JsonParse strategy = JsonParse::STANDARD) {
if (in) {
return parse(std::string(in), err, strategy);
} else {
err = "null input";
return nullptr;
}
}
// Parse multiple objects, concatenated or separated by whitespace
static std::vector<Json> parse_multi(
const std::string & in,
std::string::size_type & parser_stop_pos,
std::string & err,
JsonParse strategy = JsonParse::STANDARD);
static inline std::vector<Json> parse_multi(
const std::string & in,
std::string & err,
JsonParse strategy = JsonParse::STANDARD) {
std::string::size_type parser_stop_pos;
return parse_multi(in, parser_stop_pos, err, strategy);
}
bool operator== (const Json &rhs) const;
bool operator< (const Json &rhs) const;
bool operator!= (const Json &rhs) const { return !(*this == rhs); }
bool operator<= (const Json &rhs) const { return !(rhs < *this); }
bool operator> (const Json &rhs) const { return (rhs < *this); }
bool operator>= (const Json &rhs) const { return !(*this < rhs); }
/* has_shape(types, err)
*
* Return true if this is a JSON object and, for each item in types, has a field of
* the given type. If not, return false and set err to a descriptive message.
*/
typedef std::initializer_list<std::pair<std::string, Type>> shape;
bool has_shape(const shape & types, std::string & err) const;
private:
std::shared_ptr<JsonValue> m_ptr;
};
// Internal class hierarchy - JsonValue objects are not exposed to users of this API.
class JsonValue {
protected:
friend class Json;
friend class JsonInt;
friend class JsonDouble;
virtual Json::Type type() const = 0;
virtual bool equals(const JsonValue * other) const = 0;
virtual bool less(const JsonValue * other) const = 0;
virtual void dump(std::string &out) const = 0;
virtual double number_value() const;
virtual int int_value() const;
virtual bool bool_value() const;
virtual const std::string &string_value() const;
virtual const Json::array &array_items() const;
virtual const Json &operator[](size_t i) const;
virtual const Json::object &object_items() const;
virtual const Json &operator[](const std::string &key) const;
virtual ~JsonValue() {}
};
} // namespace json11

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,560 @@
/**@file TYCoordinateMapper.h
* @brief Coordinate Conversion API
* @note Considering performance, we leave the responsibility of parameters check to users.
* @copyright Copyright(C)2016-2018 Percipio All Rights Reserved
**/
#ifndef TY_COORDINATE_MAPPER_H_
#define TY_COORDINATE_MAPPER_H_
#include <stdlib.h>
#include "TYApi.h"
typedef struct TY_PIXEL_DESC
{
int16_t x; // x coordinate in pixels
int16_t y; // y coordinate in pixels
uint16_t depth; // depth value
uint16_t rsvd;
}TY_PIXEL_DESC;
typedef struct TY_PIXEL_COLOR_DESC
{
int16_t x; // x coordinate in pixels
int16_t y; // y coordinate in pixels
uint8_t bgr_ch1; // color info <channel 1>
uint8_t bgr_ch2; // color info <channel 2>
uint8_t bgr_ch3; // color info <channel 3>
uint8_t rsvd;
}TY_PIXEL_COLOR_DESC;
// ------------------------------
// base convertion
// ------------------------------
/// @brief Calculate 4x4 extrinsic matrix's inverse matrix.
/// @param [in] orgExtrinsic Input extrinsic matrix.
/// @param [out] invExtrinsic Inverse matrix.
/// @retval TY_STATUS_OK Succeed.
/// @retval TY_STATUS_ERROR Calculation failed.
TY_CAPI TYInvertExtrinsic (const TY_CAMERA_EXTRINSIC* orgExtrinsic,
TY_CAMERA_EXTRINSIC* invExtrinsic);
/// @brief Map pixels on depth image to 3D points.
/// @param [in] src_calib Depth image's calibration data.
/// @param [in] depthW Width of depth image.
/// @param [in] depthH Height of depth image.
/// @param [in] depthPixels Pixels on depth image.
/// @param [in] count Number of depth pixels.
/// @param [out] point3d Output point3D.
/// @retval TY_STATUS_OK Succeed.
TY_CAPI TYMapDepthToPoint3d (const TY_CAMERA_CALIB_INFO* src_calib,
uint32_t depthW, uint32_t depthH,
const TY_PIXEL_DESC* depthPixels, uint32_t count,
TY_VECT_3F* point3d,
float f_scale_unit = 1.0f);
/// @brief Map 3D points to pixels on depth image. Reverse operation of TYMapDepthToPoint3d.
/// @param [in] dst_calib Target depth image's calibration data.
/// @param [in] point3d Input 3D points.
/// @param [in] count Number of points.
/// @param [in] depthW Width of target depth image.
/// @param [in] depthH Height of target depth image.
/// @param [out] depth Output depth pixels.
/// @retval TY_STATUS_OK Succeed.
TY_CAPI TYMapPoint3dToDepth (const TY_CAMERA_CALIB_INFO* dst_calib,
const TY_VECT_3F* point3d, uint32_t count,
uint32_t depthW, uint32_t depthH,
TY_PIXEL_DESC* depth,
float f_scale_unit = 1.0f);
/// @brief Map depth image to 3D points. 0 depth pixels maps to (NAN, NAN, NAN).
/// @param [in] src_calib Depth image's calibration data.
/// @param [in] depthW Width of depth image.
/// @param [in] depthH Height of depth image.
/// @param [in] depth Depth image.
/// @param [out] point3d Output point3D image.
/// @retval TY_STATUS_OK Succeed.
TY_CAPI TYMapDepthImageToPoint3d (const TY_CAMERA_CALIB_INFO* src_calib,
int32_t imageW, int32_t imageH,
const uint16_t* depth,
TY_VECT_3F* point3d,
float f_scale_unit = 1.0f);
/// @brief Fill depth image empty region.
/// @param [in] depth Depth image pixels.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
TY_CAPI TYDepthImageFillEmptyRegion(uint16_t* depth, uint32_t depthW, uint32_t depthH);
/// @brief Map 3D points to depth image. (NAN, NAN, NAN) will be skipped.
/// @param [in] dst_calib Target depth image's calibration data.
/// @param [in] point3d Input 3D points.
/// @param [in] count Number of points.
/// @param [in] depthW Width of target depth image.
/// @param [in] depthH Height of target depth image.
/// @param [in,out] depth Depth image buffer.
/// @retval TY_STATUS_OK Succeed.
TY_CAPI TYMapPoint3dToDepthImage (const TY_CAMERA_CALIB_INFO* dst_calib,
const TY_VECT_3F* point3d, uint32_t count,
uint32_t depthW, uint32_t depthH, uint16_t* depth,
float f_target_scale = 1.0f);
/// @brief Map 3D points to another coordinate.
/// @param [in] extrinsic Extrinsic matrix.
/// @param [in] point3dFrom Source 3D points.
/// @param [in] count Number of source 3D points.
/// @param [out] point3dTo Target 3D points.
/// @retval TY_STATUS_OK Succeed.
TY_CAPI TYMapPoint3dToPoint3d (const TY_CAMERA_EXTRINSIC* extrinsic,
const TY_VECT_3F* point3dFrom, int32_t count,
TY_VECT_3F* point3dTo);
// ------------------------------
// inlines
// ------------------------------
/// @brief Map depth pixels to color coordinate pixels.
/// @param [in] depth_calib Depth image's calibration data.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
/// @param [in] depth Depth image pixels.
/// @param [in] count Number of depth image pixels.
/// @param [in] color_calib Color image's calibration data.
/// @param [in] mappedW Width of target depth image.
/// @param [in] mappedH Height of target depth image.
/// @param [out] mappedDepth Output pixels.
/// @retval TY_STATUS_OK Succeed.
static inline TY_STATUS TYMapDepthToColorCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH,
const TY_PIXEL_DESC* depth, uint32_t count,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t mappedW, uint32_t mappedH,
TY_PIXEL_DESC* mappedDepth,
float f_scale_unit = 1.0f);
/// @brief Map original depth image to color coordinate depth image.
/// @param [in] depth_calib Depth image's calibration data.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
/// @param [in] depth Depth image.
/// @param [in] color_calib Color image's calibration data.
/// @param [in] mappedW Width of target depth image.
/// @param [in] mappedH Height of target depth image.
/// @param [out] mappedDepth Output pixels.
/// @retval TY_STATUS_OK Succeed.
static inline TY_STATUS TYMapDepthImageToColorCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t mappedW, uint32_t mappedH, uint16_t* mappedDepth,
float f_scale_unit = 1.0f);
/// @brief Create depth image to color coordinate lookup table.
/// @param [in] depth_calib Depth image's calibration data.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
/// @param [in] depth Depth image.
/// @param [in] color_calib Color image's calibration data.
/// @param [in] mappedW Width of target depth image.
/// @param [in] mappedH Height of target depth image.
/// @param [out] lut Output lookup table.
/// @retval TY_STATUS_OK Succeed.
static inline TY_STATUS TYCreateDepthToColorCoordinateLookupTable(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t mappedW, uint32_t mappedH,
TY_PIXEL_DESC* lut,
float f_scale_unit = 1.0f);
/// @brief Map original RGB pixels to depth coordinate.
/// @param [in] depth_calib Depth image's calibration data.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
/// @param [in] depth Current depth image.
/// @param [in] color_calib Color image's calibration data.
/// @param [in] rgbW Width of RGB image.
/// @param [in] rgbH Height of RGB image.
/// @param [in] src Input RGB pixels info.
/// @param [in] cnt Input src RGB pixels cnt
/// @param [in] min_distance The min distance(mm), which is generally set to the minimum measured distance of the current camera
/// @param [in] max_distance The longest distance(mm), which is generally set to the longest measuring distance of the current camera
/// @param [out] dst Output RGB pixels info.
/// @retval TY_STATUS_OK Succeed.
static inline TY_STATUS TYMapRGBPixelsToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t rgbW, uint32_t rgbH,
TY_PIXEL_COLOR_DESC* src, uint32_t cnt,
uint32_t min_distance,
uint32_t max_distance,
TY_PIXEL_COLOR_DESC* dst,
float f_scale_unit = 1.0f);
/// @brief Map original RGB image to depth coordinate RGB image.
/// @param [in] depth_calib Depth image's calibration data.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
/// @param [in] depth Current depth image.
/// @param [in] color_calib Color image's calibration data.
/// @param [in] rgbW Width of RGB image.
/// @param [in] rgbH Height of RGB image.
/// @param [in] inRgb Current RGB image.
/// @param [out] mappedRgb Output RGB image.
/// @retval TY_STATUS_OK Succeed.
static inline TY_STATUS TYMapRGBImageToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t rgbW, uint32_t rgbH, const uint8_t* inRgb,
uint8_t* mappedRgb,
float f_scale_unit = 1.0f);
/// @brief Map original RGB48 image to depth coordinate RGB image.
/// @param [in] depth_calib Depth image's calibration data.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
/// @param [in] depth Current depth image.
/// @param [in] color_calib Color image's calibration data.
/// @param [in] rgbW Width of RGB48 image.
/// @param [in] rgbH Height of RGB48 image.
/// @param [in] inRgb Current RGB48 image.
/// @param [out] mappedRgb Output RGB48 image.
/// @retval TY_STATUS_OK Succeed.
static inline TY_STATUS TYMapRGB48ImageToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t rgbW, uint32_t rgbH, const uint16_t* inRgb,
uint16_t* mappedRgb,
float f_scale_unit = 1.0f);
/// @brief Map original MONO16 image to depth coordinate MONO16 image.
/// @param [in] depth_calib Depth image's calibration data.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
/// @param [in] depth Current depth image.
/// @param [in] color_calib Color image's calibration data.
/// @param [in] rgbW Width of MONO16 image.
/// @param [in] rgbH Height of MONO16 image.
/// @param [in] gray Current MONO16 image.
/// @param [out] mappedGray Output MONO16 image.
/// @retval TY_STATUS_OK Succeed.
static inline TY_STATUS TYMapMono16ImageToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t rgbW, uint32_t rgbH, const uint16_t* gray,
uint16_t* mappedGray,
float f_scale_unit = 1.0f);
/// @brief Map original MONO8 image to depth coordinate MONO8 image.
/// @param [in] depth_calib Depth image's calibration data.
/// @param [in] depthW Width of current depth image.
/// @param [in] depthH Height of current depth image.
/// @param [in] depth Current depth image.
/// @param [in] color_calib Color image's calibration data.
/// @param [in] monoW Width of MONO8 image.
/// @param [in] monoH Height of MONO8 image.
/// @param [in] inMono Current MONO8 image.
/// @param [out] mappedMono Output MONO8 image.
/// @retval TY_STATUS_OK Succeed.
static inline TY_STATUS TYMapMono8ImageToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t monoW, uint32_t monoH, const uint8_t* inMono,
uint8_t* mappedMono,
float f_scale_unit = 1.0f);
#define TYMAP_CHECKRET(f, bufToFree) \
do{ \
TY_STATUS err = (f); \
if(err){ \
if(bufToFree) \
free(bufToFree); \
return err; \
} \
} while(0)
static inline TY_STATUS TYMapDepthToColorCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH,
const TY_PIXEL_DESC* depth, uint32_t count,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t mappedW, uint32_t mappedH,
TY_PIXEL_DESC* mappedDepth,
float f_scale_unit)
{
TY_VECT_3F* p3d = (TY_VECT_3F*)malloc(sizeof(TY_VECT_3F) * count);
TYMAP_CHECKRET(TYMapDepthToPoint3d(depth_calib, depthW, depthH, depth, count, p3d, f_scale_unit), p3d );
TY_CAMERA_EXTRINSIC extri_inv;
TYMAP_CHECKRET(TYInvertExtrinsic(&color_calib->extrinsic, &extri_inv), p3d);
TYMAP_CHECKRET(TYMapPoint3dToPoint3d(&extri_inv, p3d, count, p3d), p3d );
TYMAP_CHECKRET(TYMapPoint3dToDepth(color_calib, p3d, count, mappedW, mappedH, mappedDepth, f_scale_unit), p3d );
free(p3d);
return TY_STATUS_OK;
}
static inline TY_STATUS TYMapDepthImageToColorCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t mappedW, uint32_t mappedH, uint16_t* mappedDepth, float f_scale_unit)
{
TY_VECT_3F* p3d = (TY_VECT_3F*)malloc(sizeof(TY_VECT_3F) * depthW * depthH);
TYMAP_CHECKRET(TYMapDepthImageToPoint3d(depth_calib, depthW, depthH, depth, p3d, f_scale_unit), p3d);
TY_CAMERA_EXTRINSIC extri_inv;
TYMAP_CHECKRET(TYInvertExtrinsic(&color_calib->extrinsic, &extri_inv), p3d);
TYMAP_CHECKRET(TYMapPoint3dToPoint3d(&extri_inv, p3d, depthW * depthH, p3d), p3d);
TYMAP_CHECKRET(TYMapPoint3dToDepthImage(
color_calib, p3d, depthW * depthH, mappedW, mappedH, mappedDepth, f_scale_unit), p3d);
free(p3d);
return TY_STATUS_OK;
}
static inline TY_STATUS TYMapRGBPixelsToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t rgbW, uint32_t rgbH,
TY_PIXEL_COLOR_DESC* src, uint32_t cnt,
uint32_t min_distance,
uint32_t max_distance,
TY_PIXEL_COLOR_DESC* dst,
float f_scale_unit)
{
uint32_t m_distance_range = max_distance - min_distance;
TY_CAMERA_EXTRINSIC extri = color_calib->extrinsic;
TY_PIXEL_DESC* pixels_array = (TY_PIXEL_DESC*)malloc(sizeof(TY_PIXEL_DESC) * m_distance_range);
TY_PIXEL_DESC* pixels_mapped_array = (TY_PIXEL_DESC*)malloc(sizeof(TY_PIXEL_DESC) * m_distance_range);
TY_VECT_3F* p3d_array = (TY_VECT_3F*)malloc(sizeof(TY_VECT_3F) * m_distance_range);
for (uint32_t i = 0; i < cnt; i++) {
for (uint32_t m = 0; m < m_distance_range; m++) {
pixels_array[m].x = src[i].x;
pixels_array[m].y = src[i].y;
pixels_array[m].depth = m + min_distance;
}
TYMapDepthToPoint3d(color_calib, rgbW, rgbH, pixels_array, m_distance_range, &p3d_array[0], f_scale_unit);
TYMapPoint3dToPoint3d(&extri, &p3d_array[0], m_distance_range, &p3d_array[0]);
TYMapPoint3dToDepth(depth_calib, p3d_array, m_distance_range, depthW, depthH, pixels_mapped_array, f_scale_unit);
uint16_t m_min_delt = 0xffff;
dst[i].x = -1;
dst[i].y = -1;
for (uint32_t m = 0; m < m_distance_range; m++) {
int16_t pixel_x = pixels_mapped_array[m].x;
int16_t pixel_y = pixels_mapped_array[m].y;
uint16_t delt = abs(pixels_mapped_array[m].depth - depth[pixel_y*depthW + pixel_x]);
if (delt < m_min_delt) {
m_min_delt = delt;
if (m_min_delt < 10) {
dst[i].x = pixel_x;
dst[i].y = pixel_y;
dst[i].bgr_ch1 = src[i].bgr_ch1;
dst[i].bgr_ch2 = src[i].bgr_ch2;
dst[i].bgr_ch3 = src[i].bgr_ch3;
}
}
}
}
free(pixels_array);
free(pixels_mapped_array);
free(p3d_array);
return TY_STATUS_OK;
}
static inline TY_STATUS TYCreateDepthToColorCoordinateLookupTable(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t mappedW, uint32_t mappedH,
TY_PIXEL_DESC* lut,
float f_scale_unit)
{
TY_VECT_3F* p3d = (TY_VECT_3F*)malloc(sizeof(TY_VECT_3F) * depthW * depthH);
TYMAP_CHECKRET(TYMapDepthImageToPoint3d(depth_calib, depthW, depthH, depth, p3d, f_scale_unit), p3d);
TY_CAMERA_EXTRINSIC extri_inv;
TYMAP_CHECKRET(TYInvertExtrinsic(&color_calib->extrinsic, &extri_inv), p3d);
TYMAP_CHECKRET(TYMapPoint3dToPoint3d(&extri_inv, p3d, depthW * depthH, p3d), p3d);
TYMAP_CHECKRET(TYMapPoint3dToDepth(color_calib, p3d, depthW * depthH, mappedW, mappedH, lut, f_scale_unit), p3d );
free(p3d);
return TY_STATUS_OK;
}
inline void TYPixelsOverlapRemove(TY_PIXEL_DESC* lut, uint32_t count, uint32_t imageW, uint32_t imageH)
{
uint16_t* mappedDepth = (uint16_t*)calloc(imageW*imageH, sizeof(uint16_t));
for(size_t i = 0; i < count; i++) {
if(lut[i].x < 0 || lut[i].y < 0 || lut[i].x >= imageW || lut[i].y >= imageH) continue;
uint32_t offset = lut[i].y * imageW + lut[i].x;
if(lut[i].depth && (mappedDepth[offset] == 0 || mappedDepth[offset] >= lut[i].depth))
mappedDepth[offset] = lut[i].depth;
}
TYDepthImageFillEmptyRegion(mappedDepth, imageW, imageH);
for(size_t i = 0; i < count; i++) {
if(lut[i].x < 0 || lut[i].y < 0 || lut[i].x >= imageW || lut[i].y >= imageH) {
continue;
} else {
uint32_t offset = lut[i].y * imageW + lut[i].x;
int32_t delt = lut[i].depth - mappedDepth[offset];
if(lut[i].depth && delt > 10) {
lut[i].x = -1;
lut[i].y = -1;
lut[i].depth = 0;
}
}
}
free(mappedDepth);
}
static inline TY_STATUS TYMapRGBImageToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t rgbW, uint32_t rgbH, const uint8_t* inRgb,
uint8_t* mappedRgb, float f_scale_unit)
{
TY_PIXEL_DESC* lut = (TY_PIXEL_DESC*)malloc(sizeof(TY_PIXEL_DESC) * depthW * depthH);
TYMAP_CHECKRET(TYCreateDepthToColorCoordinateLookupTable(
depth_calib, depthW, depthH, depth,
color_calib, depthW, depthH, lut, f_scale_unit), lut);
TYPixelsOverlapRemove(lut, depthW * depthH, depthW, depthH);
for(uint32_t depthr = 0; depthr < depthH; depthr++)
for(uint32_t depthc = 0; depthc < depthW; depthc++)
{
TY_PIXEL_DESC* plut = &lut[depthr * depthW + depthc];
uint8_t* outPtr = &mappedRgb[depthW * depthr * 3 + depthc * 3];
if(plut->x < 0 || plut->x >= (int)depthW || plut->y < 0 || plut->y >= (int)depthH){
outPtr[0] = outPtr[1] = outPtr[2] = 0;
} else {
uint16_t scale_x = (uint16_t)(1.f * plut->x * rgbW / depthW + 0.5);
uint16_t scale_y = (uint16_t)(1.f * plut->y * rgbH / depthH + 0.5);
if(scale_x >= rgbW) scale_x = rgbW - 1;
if(scale_y >= rgbH) scale_y = rgbH - 1;
const uint8_t* inPtr = &inRgb[rgbW * scale_y * 3 + scale_x * 3];
outPtr[0] = inPtr[0];
outPtr[1] = inPtr[1];
outPtr[2] = inPtr[2];
}
}
free(lut);
return TY_STATUS_OK;
}
static inline TY_STATUS TYMapRGB48ImageToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t rgbW, uint32_t rgbH, const uint16_t* inRgb,
uint16_t* mappedRgb, float f_scale_unit)
{
TY_PIXEL_DESC* lut = (TY_PIXEL_DESC*)malloc(sizeof(TY_PIXEL_DESC) * depthW * depthH);
TYMAP_CHECKRET(TYCreateDepthToColorCoordinateLookupTable(
depth_calib, depthW, depthH, depth,
color_calib, depthW, depthH, lut, f_scale_unit), lut);
TYPixelsOverlapRemove(lut, depthW * depthH, depthW, depthH);
for(uint32_t depthr = 0; depthr < depthH; depthr++)
for(uint32_t depthc = 0; depthc < depthW; depthc++)
{
TY_PIXEL_DESC* plut = &lut[depthr * depthW + depthc];
uint16_t* outPtr = &mappedRgb[depthW * depthr * 3 + depthc * 3];
if(plut->x < 0 || plut->x >= (int)depthW || plut->y < 0 || plut->y >= (int)depthH){
outPtr[0] = outPtr[1] = outPtr[2] = 0;
} else {
uint16_t scale_x = (uint16_t)(1.f * plut->x * rgbW / depthW + 0.5);
uint16_t scale_y = (uint16_t)(1.f * plut->y * rgbH / depthH + 0.5);
if(scale_x >= rgbW) scale_x = rgbW - 1;
if(scale_y >= rgbH) scale_y = rgbH - 1;
const uint16_t* inPtr = &inRgb[rgbW * scale_y * 3 + scale_x * 3];
outPtr[0] = inPtr[0];
outPtr[1] = inPtr[1];
outPtr[2] = inPtr[2];
}
}
free(lut);
return TY_STATUS_OK;
}
static inline TY_STATUS TYMapMono16ImageToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t rgbW, uint32_t rgbH, const uint16_t* gray,
uint16_t* mappedGray, float f_scale_unit)
{
TY_PIXEL_DESC* lut = (TY_PIXEL_DESC*)malloc(sizeof(TY_PIXEL_DESC) * depthW * depthH);
TYMAP_CHECKRET(TYCreateDepthToColorCoordinateLookupTable(
depth_calib, depthW, depthH, depth,
color_calib, depthW, depthH, lut, f_scale_unit), lut);
TYPixelsOverlapRemove(lut, depthW * depthH, depthW, depthH);
for(uint32_t depthr = 0; depthr < depthH; depthr++)
for(uint32_t depthc = 0; depthc < depthW; depthc++)
{
TY_PIXEL_DESC* plut = &lut[depthr * depthW + depthc];
uint16_t* outPtr = &mappedGray[depthW * depthr + depthc];
if(plut->x < 0 || plut->x >= (int)depthW || plut->y < 0 || plut->y >= (int)depthH){
outPtr[0] = 0;
} else {
uint16_t scale_x = (uint16_t)(1.f * plut->x * rgbW / depthW + 0.5);
uint16_t scale_y = (uint16_t)(1.f * plut->y * rgbH / depthH + 0.5);
if(scale_x >= rgbW) scale_x = rgbW - 1;
if(scale_y >= rgbH) scale_y = rgbH - 1;
const uint16_t* inPtr = &gray[rgbW * scale_y + scale_x];
outPtr[0] = inPtr[0];
}
}
free(lut);
return TY_STATUS_OK;
}
static inline TY_STATUS TYMapMono8ImageToDepthCoordinate(
const TY_CAMERA_CALIB_INFO* depth_calib,
uint32_t depthW, uint32_t depthH, const uint16_t* depth,
const TY_CAMERA_CALIB_INFO* color_calib,
uint32_t monoW, uint32_t monoH, const uint8_t* inMono,
uint8_t* mappedMono, float f_scale_unit)
{
TY_PIXEL_DESC* lut = (TY_PIXEL_DESC*)malloc(sizeof(TY_PIXEL_DESC) * depthW * depthH);
TYMAP_CHECKRET(TYCreateDepthToColorCoordinateLookupTable(
depth_calib, depthW, depthH, depth,
color_calib, depthW, depthH, lut, f_scale_unit), lut);
TYPixelsOverlapRemove(lut, depthW * depthH, depthW, depthH);
for(uint32_t depthr = 0; depthr < depthH; depthr++)
for(uint32_t depthc = 0; depthc < depthW; depthc++)
{
TY_PIXEL_DESC* plut = &lut[depthr * depthW + depthc];
uint8_t* outPtr = &mappedMono[depthW * depthr + depthc];
if(plut->x < 0 || plut->x >= (int)depthW || plut->y < 0 || plut->y >= (int)depthH){
outPtr[0] = 0;
} else {
uint16_t scale_x = (uint16_t)(1.f * plut->x * monoW / depthW + 0.5);
uint16_t scale_y = (uint16_t)(1.f * plut->y * monoH / depthH + 0.5);
if(scale_x >= monoW) scale_x = monoW - 1;
if(scale_y >= monoH) scale_y = monoH - 1;
const uint8_t* inPtr = &inMono[monoW * scale_y + scale_x];
outPtr[0] = inPtr[0];
}
}
free(lut);
return TY_STATUS_OK;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,82 @@
/**@file TYImageProc.h
* @breif Image post-process API
* @copyright Copyright(C)2016-2018 Percipio All Rights Reserved
**/
#ifndef TY_IMAGE_PROC_H_
#define TY_IMAGE_PROC_H_
#include "TYApi.h"
#include "TYCoordinateMapper.h"
#include "TyIsp.h"
/// @brief Image processing acceleration switch
/// @param [in] en Enable image process acceleration switch
TY_CAPI TYImageProcesAcceEnable(bool en);
/// @brief Do image undistortion, only support TY_PIXEL_FORMAT_MONO ,TY_PIXEL_FORMAT_RGB,TY_PIXEL_FORMAT_BGR.
/// @param [in] srcCalibInfo Image calibration data.
/// @param [in] srcImage Source image.
/// @param [in] cameraNewIntrinsic Expected new image intrinsic, will use srcCalibInfo for new image intrinsic if set to NULL.
/// @param [out] dstImage Output image.
/// @retval TY_STATUS_OK Succeed.
/// @retval TY_STATUS_NULL_POINTER Any srcCalibInfo, srcImage, dstImage, srcImage->buffer, dstImage->buffer is NULL.
/// @retval TY_STATUS_INVALID_PARAMETER Invalid srcImage->width, srcImage->height, dstImage->width, dstImage->height or unsupported pixel format.
TY_CAPI TYUndistortImage (const TY_CAMERA_CALIB_INFO *srcCalibInfo
, const TY_IMAGE_DATA *srcImage
, const TY_CAMERA_INTRINSIC *cameraNewIntrinsic
, TY_IMAGE_DATA *dstImage
);
// -----------------------------------------------------------
struct DepthSpeckleFilterParameters {
int max_speckle_size; // blob size smaller than this will be removed
int max_speckle_diff; // Maximum difference between neighbor disparity pixels
};
///<default parameter value definition
#define DepthSpeckleFilterParameters_Initializer {150, 64}
/// @brief Remove speckles on depth image.
/// @param [in,out] depthImage Depth image to be processed.
/// @param [in] param Algorithm parameters.
/// @retval TY_STATUS_OK Succeed.
/// @retval TY_STATUS_NULL_POINTER Any depth, param or depth->buffer is NULL.
/// @retval TY_STATUS_INVALID_PARAMETER param->max_speckle_size <= 0 or param->max_speckle_diff <= 0
TY_CAPI TYDepthSpeckleFilter (TY_IMAGE_DATA* depthImage
, const DepthSpeckleFilterParameters* param
);
// -----------------------------------------------------------
struct DepthEnhenceParameters{
float sigma_s; ///< filter param on space
float sigma_r; ///< filter param on range
int outlier_win_sz; ///< outlier filter windows ize
float outlier_rate;
};
///<default parameter value definition
#define DepthEnhenceParameters_Initializer {10, 20, 10, 0.1f}
/// @brief Remove speckles on depth image.
/// @param [in] depthImage Pointer to depth image array.
/// @param [in] imageNum Depth image array size.
/// @param [in,out] guide Guide image.
/// @param [out] output Output depth image.
/// @param [in] param Algorithm parameters.
/// @retval TY_STATUS_OK Succeed.
/// @retval TY_STATUS_NULL_POINTER Any depthImage, param, output or output->buffer is NULL.
/// @retval TY_STATUS_INVALID_PARAMETER imageNum >= 11 or imageNum <= 0, or any image invalid
/// @retval TY_STATUS_OUT_OF_MEMORY Output image not suitable.
TY_CAPI TYDepthEnhenceFilter (const TY_IMAGE_DATA* depthImages
, int imageNum
, TY_IMAGE_DATA *guide
, TY_IMAGE_DATA *output
, const DepthEnhenceParameters* param
);
#endif

View File

@@ -0,0 +1,3 @@
#define TY_LIB_VERSION_MAJOR 3
#define TY_LIB_VERSION_MINOR 6
#define TY_LIB_VERSION_PATCH 75

View File

@@ -0,0 +1,109 @@
/**@file TyIsp.h
* @breif this file Include interface declare for raw color image (bayer format)
* process functions
*
* Copyright(C)2016-2019 Percipio All Rights Reserved
*
*/
#ifndef TY_COLOR_ISP_H_
#define TY_COLOR_ISP_H_
#include "TYApi.h"
#define TYISP_CAPI TY_CAPI
typedef void* TY_ISP_HANDLE;
typedef enum{
TY_ISP_FEATURE_CAM_MODEL = 0x000000,
TY_ISP_FEATURE_CAM_DEV_HANDLE = 0x000001, ///<device handle for device control
TY_ISP_FEATURE_CAM_DEV_COMPONENT = 0x000002, ///<the component to control
TY_ISP_FEATURE_IMAGE_SIZE = 0x000100, ///<image size width&height
TY_ISP_FEATURE_WHITEBALANCE_GAIN = 0x000200,
TY_ISP_FEATURE_ENABLE_AUTO_WHITEBALANCE = 0x000300,
TY_ISP_FEATURE_SHADING = 0x000400,
TY_ISP_FEATURE_SHADING_CENTER = 0x000500,
TY_ISP_FEATURE_BLACK_LEVEL = 0x000600, ///<global black level
TY_ISP_FEATURE_BLACK_LEVEL_COLUMN = 0x000610, ///<to set different black level for each image column
TY_ISP_FEATURE_BLACK_LEVEL_GAIN = 0x000700, ///<global pixel gain
TY_ISP_FEATURE_BLACK_LEVEL_GAIN_COLUMN = 0x000710, ///<to set different gain for each image column
TY_ISP_FEATURE_BAYER_PATTERN = 0x000800,
TY_ISP_FEATURE_DEMOSAIC_METHOD = 0x000900,
TY_ISP_FEATURE_GAMMA = 0x000A00,
TY_ISP_FEATURE_DEFECT_PIXEL_LIST = 0x000B00,
TY_ISP_FEATURE_CCM = 0x000C00,
TY_ISP_FEATURE_CCM_ENABLE = 0x000C10, ///<ENABLE CCM
TY_ISP_FEATURE_BRIGHT = 0x000D00,
TY_ISP_FEATURE_CONTRAST = 0x000E00,
TY_ISP_FEATURE_AUTOBRIGHT = 0x000F00,
TY_ISP_FEATURE_INPUT_RESAMPLE_SCALE = 0x001000, //<set this if bayer image resampled before softisp process.
TY_ISP_FEATURE_ENABLE_AUTO_EXPOSURE_GAIN = 0x001100,
TY_ISP_FEATURE_AUTO_EXPOSURE_RANGE = 0x001200, ///<exposure range ,default no limit
TY_ISP_FEATURE_AUTO_GAIN_RANGE = 0x001300, ///<gain range ,default no limit
TY_ISP_FEATURE_AUTO_EXPOSURE_UPDATE_INTERVAL = 0x001400, ///<update device exposure interval , default 5 frame
TY_ISP_FEATURE_DEBUG_LOG = 0xff000000, ///<display detail log information
} TY_ISP_FEATURE_ID;
typedef enum{
TY_ISP_BAYER_GB = 0,
TY_ISP_BAYER_BG = 1,
TY_ISP_BAYER_RG = 2,
TY_ISP_BAYER_GR = 3,
TY_ISP_BAYER_AUTO = 0xff,
}TY_ISP_BAYER_PATTERN;
typedef enum{
TY_DEMOSAIC_METHOD_SIMPLE = 0,
TY_DEMOSAIC_METHOD_BILINEAR = 1,
TY_DEMOSAIC_METHOD_HQLINEAR = 2,
TY_DEMOSAIC_METHOD_EDGESENSE = 3,
} TY_DEMOSAIC_METHOD;
typedef struct{
TY_ISP_FEATURE_ID id;
int32_t size;
const char * name;
const char * value_type;
TY_ACCESS_MODE mode;
} TY_ISP_FEATURE_INFO;
TYISP_CAPI TYISPCreate(TY_ISP_HANDLE *handle);
TYISP_CAPI TYISPRelease(TY_ISP_HANDLE *handle);
TYISP_CAPI TYISPLoadConfig(TY_ISP_HANDLE handle,const uint8_t *config, uint32_t config_size);
///@breif called by main thread to update & control device status for ISP
TYISP_CAPI TYISPUpdateDevice(TY_ISP_HANDLE handle);
TYISP_CAPI TYISPSetFeature(TY_ISP_HANDLE handle, TY_ISP_FEATURE_ID feature_id, const uint8_t *data, int32_t size);
TYISP_CAPI TYISPGetFeature(TY_ISP_HANDLE handle, TY_ISP_FEATURE_ID feature_id, uint8_t *data_buff, int32_t buff_size);
TYISP_CAPI TYISPGetFeatureSize(TY_ISP_HANDLE handle, TY_ISP_FEATURE_ID feature_id, int32_t *size);
TYISP_CAPI TYISPHasFeature(TY_ISP_HANDLE handle, TY_ISP_FEATURE_ID feature_id);
TYISP_CAPI TYISPGetFeatureInfoList(TY_ISP_HANDLE handle, TY_ISP_FEATURE_INFO *info_buffer, int buffer_size);
TYISP_CAPI TYISPGetFeatureInfoListSize(TY_ISP_HANDLE handle, int32_t *buffer_size);
///@breif convert bayer raw image to rgb image,output buffer is allocated by invoker
TYISP_CAPI TYISPProcessImage(TY_ISP_HANDLE handle,const TY_IMAGE_DATA *image_bayer, TY_IMAGE_DATA *image_out);
#ifdef __cplusplus
static inline TY_STATUS TYISPSetFeature(TY_ISP_HANDLE handle, TY_ISP_FEATURE_ID feature_id, int value){
return TYISPSetFeature(handle, feature_id, (uint8_t*)&(value), sizeof(int));
}
static inline TY_STATUS TYISPGetFeature(TY_ISP_HANDLE handle, TY_ISP_FEATURE_ID feature_id, int *value){
return TYISPGetFeature(handle, feature_id, (uint8_t*)value, sizeof(int));
}
static inline TY_STATUS TYISPSetFeature(TY_ISP_HANDLE handle, TY_ISP_FEATURE_ID feature_id, float value){
return TYISPSetFeature(handle, feature_id, (uint8_t*)&(value), sizeof(float));
}
static inline TY_STATUS TYISPGetFeature(TY_ISP_HANDLE handle, TY_ISP_FEATURE_ID feature_id, float *value){
return TYISPGetFeature(handle, feature_id, (uint8_t*)value, sizeof(float));
}
#endif
#endif

View File

@@ -0,0 +1,635 @@
#include "Device.hpp"
struct to_string
{
std::ostringstream ss;
template<class T> to_string & operator << (const T & val) { ss << val; return *this; }
operator std::string() const { return ss.str(); }
};
static std::string TY_ERROR(TY_STATUS status)
{
return to_string() << status << "(" << TYErrorString(status) << ").";
}
static inline TY_STATUS searchDevice(std::vector<TY_DEVICE_BASE_INFO>& out, const char *inf_id = nullptr, TY_INTERFACE_TYPE type = TY_INTERFACE_ALL)
{
out.clear();
ASSERT_OK( TYUpdateInterfaceList() );
uint32_t n = 0;
ASSERT_OK( TYGetInterfaceNumber(&n) );
if(n == 0) return TY_STATUS_ERROR;
std::vector<TY_INTERFACE_INFO> ifaces(n);
ASSERT_OK( TYGetInterfaceList(&ifaces[0], n, &n) );
bool found = false;
std::vector<TY_INTERFACE_HANDLE> hIfaces;
for(uint32_t i = 0; i < ifaces.size(); i++){
TY_INTERFACE_HANDLE hIface;
if(type & ifaces[i].type) {
//Interface Not setted
if (nullptr == inf_id ||
//Interface been setted and matched
strcmp(inf_id, ifaces[i].id) == 0) {
ASSERT_OK( TYOpenInterface(ifaces[i].id, &hIface) );
hIfaces.push_back(hIface);
found = true;
//Interface been setted, found and just break
if(nullptr != inf_id) {
break;
}
}
}
}
if(!found) return TY_STATUS_ERROR;
updateDevicesParallel(hIfaces);
for (uint32_t i = 0; i < hIfaces.size(); i++) {
TY_INTERFACE_HANDLE hIface = hIfaces[i];
uint32_t n = 0;
TYGetDeviceNumber(hIface, &n);
if(n > 0){
std::vector<TY_DEVICE_BASE_INFO> devs(n);
TYGetDeviceList(hIface, &devs[0], n, &n);
for(uint32_t j = 0; j < n; j++) {
out.push_back(devs[j]);
}
}
TYCloseInterface(hIface);
}
if(out.size() == 0){
std::cout << "not found any device" << std::endl;
return TY_STATUS_ERROR;
}
return TY_STATUS_OK;
}
namespace percipio_layer {
TYDeviceInfo::TYDeviceInfo(const TY_DEVICE_BASE_INFO& info)
{
_info = info;
}
TYDeviceInfo::~TYDeviceInfo()
{
}
const char* TYDeviceInfo::mac()
{
if(!TYIsNetworkInterface(_info.iface.type)) {
return nullptr;
}
return _info.netInfo.mac;
}
const char* TYDeviceInfo::ip()
{
if(!TYIsNetworkInterface(_info.iface.type))
return nullptr;
return _info.netInfo.ip;
}
const char* TYDeviceInfo::netmask()
{
if(!TYIsNetworkInterface(_info.iface.type))
return nullptr;
return _info.netInfo.netmask;
}
const char* TYDeviceInfo::gateway()
{
if(!TYIsNetworkInterface(_info.iface.type))
return nullptr;
return _info.netInfo.gateway;
}
const char* TYDeviceInfo::broadcast()
{
if(!TYIsNetworkInterface(_info.iface.type))
return nullptr;
return _info.netInfo.broadcast;
}
static void eventCallback(TY_EVENT_INFO *event_info, void *userdata) {
TYDevice* handle = (TYDevice*)userdata;
handle->_event_callback(event_info);
}
TYCamInterface::TYCamInterface()
{
TYContext::getInstance();
Reset();
}
TYCamInterface::~TYCamInterface()
{
}
TY_STATUS TYCamInterface::Reset()
{
TY_STATUS status;
status = TYUpdateInterfaceList();
if(status != TY_STATUS_OK) return status;
uint32_t n = 0;
status = TYGetInterfaceNumber(&n);
if(status != TY_STATUS_OK) return status;
if(n == 0) return TY_STATUS_OK;
ifaces.resize(n);
status = TYGetInterfaceList(&ifaces[0], n, &n);
return status;
}
void TYCamInterface::List(std::vector<std::string>& interfaces)
{
for(auto& iter : ifaces) {
std::cout << iter.id << std::endl;
interfaces.push_back(iter.id);
}
}
FastCamera::FastCamera()
{
}
FastCamera::FastCamera(const char* sn)
{
const char *inf = nullptr;
if (!mIfaceId.empty()) {
inf = mIfaceId.c_str();
}
auto devList = TYContext::getInstance().queryDeviceList(inf);
if(devList->empty()) {
return;
}
device = (sn && strlen(sn) != 0) ? devList->getDeviceBySN(sn) : devList->getDevice(0);
if(!device) {
return;
}
TYGetComponentIDs(device->_handle, &components);
}
TY_STATUS FastCamera::open(const char* sn)
{
const char *inf = nullptr;
if (!mIfaceId.empty()) {
inf = mIfaceId.c_str();
}
auto devList = TYContext::getInstance().queryDeviceList(inf);
if(devList->empty()) {
std::cout << "deivce list is empty!" << std::endl;
return TY_STATUS_ERROR;
}
device = (sn && strlen(sn) != 0) ? devList->getDeviceBySN(sn) : devList->getDevice(0);
if(!device) {
return TY_STATUS_ERROR;
}
return TYGetComponentIDs(device->_handle, &components);
}
TY_STATUS FastCamera::openByIP(const char* ip)
{
const char *inf = nullptr;
if (!mIfaceId.empty()) {
inf = mIfaceId.c_str();
}
std::unique_lock<std::mutex> lock(_dev_lock);
auto devList = TYContext::getInstance().queryNetDeviceList(inf);
if(devList->empty()) {
std::cout << "net deivce list is empty!" << std::endl;
return TY_STATUS_ERROR;
}
device = (ip && strlen(ip) != 0) ? devList->getDeviceByIP(ip) : devList->getDevice(0);
if(!device) {
std::cout << "open device failed!" << std::endl;
return TY_STATUS_ERROR;
}
return TYGetComponentIDs(device->_handle, &components);
}
TY_STATUS FastCamera::setIfaceId(const char* inf)
{
mIfaceId = inf;
return TY_STATUS_OK;
}
FastCamera::~FastCamera()
{
if(isRuning) {
doStop();
}
}
void FastCamera::close()
{
std::unique_lock<std::mutex> lock(_dev_lock);
if(isRuning) {
doStop();
}
if(device) device.reset();
}
std::shared_ptr<TYFrame> FastCamera::fetchFrames(uint32_t timeout_ms)
{
TY_FRAME_DATA tyframe;
TY_STATUS status = TYFetchFrame(handle(), &tyframe, timeout_ms);
if(status != TY_STATUS_OK) {
std::cout << "Frame fetch failed with err code: " << status << "(" << TYErrorString(status) << ")."<< std::endl;
return std::shared_ptr<TYFrame>();
}
std::shared_ptr<TYFrame> frame = std::shared_ptr<TYFrame>(new TYFrame(tyframe));
CHECK_RET(TYEnqueueBuffer(handle(), tyframe.userBuffer, tyframe.bufferSize));
return frame;
}
static TY_COMPONENT_ID StreamIdx2CompID(FastCamera::stream_idx idx)
{
TY_COMPONENT_ID comp = 0;
switch (idx)
{
case FastCamera::stream_depth:
comp = TY_COMPONENT_DEPTH_CAM;
break;
case FastCamera::stream_color:
comp = TY_COMPONENT_RGB_CAM;
break;
case FastCamera::stream_ir_left:
comp = TY_COMPONENT_IR_CAM_LEFT;
break;
case FastCamera::stream_ir_right:
comp = TY_COMPONENT_IR_CAM_RIGHT;
break;
default:
break;
}
return comp;
}
bool FastCamera::has_stream(stream_idx idx)
{
return components & StreamIdx2CompID(idx);
}
TY_STATUS FastCamera::stream_enable(stream_idx idx)
{
std::unique_lock<std::mutex> lock(_dev_lock);
return TYEnableComponents(handle(), StreamIdx2CompID(idx));
}
TY_STATUS FastCamera::stream_disable(stream_idx idx)
{
std::unique_lock<std::mutex> lock(_dev_lock);
return TYDisableComponents(handle(), StreamIdx2CompID(idx));
}
TY_STATUS FastCamera::start()
{
std::unique_lock<std::mutex> lock(_dev_lock);
if(isRuning) {
std::cout << "Device is busy!" << std::endl;
return TY_STATUS_BUSY;
}
uint32_t stream_buffer_size;
TY_STATUS status = TYGetFrameBufferSize(handle(), &stream_buffer_size);
if(status != TY_STATUS_OK) {
std::cout << "Get frame buffer size failed with error code: " << TY_ERROR(status) << std::endl;
return status;
}
if(stream_buffer_size == 0) {
std::cout << "Frame buffer size is 0, is the data flow component not enabled?" << std::endl;
return TY_STATUS_DEVICE_ERROR;
}
for(int i = 0; i < BUF_CNT; i++) {
stream_buffer[i].resize(stream_buffer_size);
TYEnqueueBuffer(handle(), &stream_buffer[i][0], stream_buffer_size);
}
status = TYStartCapture(handle());
if(TY_STATUS_OK != status) {
std::cout << "Start capture failed with error code: " << TY_ERROR(status) << std::endl;
return status;
}
isRuning = true;
return TY_STATUS_OK;
}
TY_STATUS FastCamera::stop()
{
std::unique_lock<std::mutex> lock(_dev_lock);
return doStop();
}
TY_STATUS FastCamera::doStop()
{
if(!isRuning)
return TY_STATUS_IDLE;
isRuning = false;
TY_STATUS status = TYStopCapture(handle());
if(TY_STATUS_OK != status) {
std::cout << "Stop capture failed with error code: " << TY_ERROR(status) << std::endl;
}
//Stop will stop receive, need TYClearBufferQueue any way
//Ignore TYClearBufferQueue ret val
TYClearBufferQueue(handle());
for(int i = 0; i < BUF_CNT; i++) {
stream_buffer[i].clear();
}
return status;
}
std::shared_ptr<TYFrame> FastCamera::tryGetFrames(uint32_t timeout_ms)
{
std::unique_lock<std::mutex> lock(_dev_lock);
return fetchFrames(timeout_ms);
}
TYDevice::TYDevice(const TY_DEV_HANDLE handle, const TY_DEVICE_BASE_INFO& info)
{
_handle = handle;
_dev_info = info;
_event_callback = std::bind(&TYDevice::onDeviceEventCallback, this, std::placeholders::_1);
TYRegisterEventCallback(_handle, eventCallback, this);
}
TYDevice::~TYDevice()
{
CHECK_RET(TYCloseDevice(_handle));
}
void TYDevice::registerEventCallback(const TY_EVENT eventID, void* data, EventCallback cb)
{
_eventCallbackMap[eventID] = {data, cb};
}
void TYDevice::onDeviceEventCallback(const TY_EVENT_INFO *event_info)
{
if(_eventCallbackMap[event_info->eventId].second != nullptr) {
_eventCallbackMap[event_info->eventId].second(_eventCallbackMap[event_info->eventId].first);
}
}
std::shared_ptr<TYDeviceInfo> TYDevice::getDeviceInfo()
{
return std::shared_ptr<TYDeviceInfo>(new TYDeviceInfo(_dev_info));
}
std::set<TY_INTERFACE_HANDLE> DeviceList::gifaces;
DeviceList::DeviceList(std::vector<TY_DEVICE_BASE_INFO>& devices)
{
devs = devices;
}
DeviceList::~DeviceList()
{
for (TY_INTERFACE_HANDLE iface : gifaces) {
TYCloseInterface(iface);
}
gifaces.clear();
}
std::shared_ptr<TYDeviceInfo> DeviceList::getDeviceInfo(int idx)
{
if((idx < 0) || (idx > devCount())) {
std::cout << "idx out of range" << std::endl;
return nullptr;
}
return std::shared_ptr<TYDeviceInfo>(new TYDeviceInfo(devs[idx]));
}
std::shared_ptr<TYDevice> DeviceList::getDevice(int idx)
{
if((idx < 0) || (idx > devCount())) {
std::cout << "idx out of range" << std::endl;
return nullptr;
}
TY_INTERFACE_HANDLE hIface = NULL;
TY_DEV_HANDLE hDevice = NULL;
TY_STATUS status = TY_STATUS_OK;
status = TYOpenInterface(devs[idx].iface.id, &hIface);
if(status != TY_STATUS_OK) {
std::cout << "Open interface failed with error code: " << TY_ERROR(status) << std::endl;
return nullptr;
}
gifaces.insert(hIface);
std::string ifaceId = devs[idx].iface.id;
std::string open_log = std::string("open device ") + devs[idx].id +
"\non interface " + parseInterfaceID(ifaceId);
std::cout << open_log << std::endl;
status = TYOpenDevice(hIface, devs[idx].id, &hDevice);
if(status != TY_STATUS_OK) {
std::cout << "Open device < " << devs[idx].id << "> failed with error code: " << TY_ERROR(status) << std::endl;
return nullptr;
}
TY_DEVICE_BASE_INFO info;
status = TYGetDeviceInfo(hDevice, &info);
if(status != TY_STATUS_OK) {
std::cout << "Get device info failed with error code: " << TY_ERROR(status) << std::endl;
return nullptr;
}
return std::shared_ptr<TYDevice>(new TYDevice(hDevice, info));
}
std::shared_ptr<TYDevice> DeviceList::getDeviceBySN(const char* sn)
{
TY_STATUS status = TY_STATUS_OK;
TY_INTERFACE_HANDLE hIface = NULL;
TY_DEV_HANDLE hDevice = NULL;
if(!sn) {
std::cout << "Invalid parameters" << std::endl;
return nullptr;
}
for(size_t i = 0; i < devs.size(); i++) {
if(strcmp(devs[i].id, sn) == 0) {
status = TYOpenInterface(devs[i].iface.id, &hIface);
if(status != TY_STATUS_OK) continue;
gifaces.insert(hIface);
std::string ifaceId = devs[i].iface.id;
std::string open_log = std::string("open device ") + devs[i].id +
"\non interface " + parseInterfaceID(ifaceId);
std::cout << open_log << std::endl;
status = TYOpenDevice(hIface, devs[i].id, &hDevice);
if(status != TY_STATUS_OK) continue;
TY_DEVICE_BASE_INFO info;
status = TYGetDeviceInfo(hDevice, &info);
if(status != TY_STATUS_OK) {
TYCloseDevice(hDevice);
continue;
}
return std::shared_ptr<TYDevice>(new TYDevice(hDevice, info));
}
}
std::cout << "Device <sn:" << sn << "> not found!" << std::endl;
return nullptr;
}
std::shared_ptr<TYDevice> DeviceList::getDeviceByIP(const char* ip)
{
TY_STATUS status = TY_STATUS_OK;
TY_INTERFACE_HANDLE hIface = NULL;
TY_DEV_HANDLE hDevice = NULL;
if(!ip) {
std::cout << "Invalid parameters" << std::endl;
return nullptr;
}
for(size_t i = 0; i < devs.size(); i++) {
if(TYIsNetworkInterface(devs[i].iface.type)) {
status = TYOpenInterface(devs[i].iface.id, &hIface);
if(status != TY_STATUS_OK) continue;
std::string open_log = "open device ";
if(ip && strlen(ip)) {
open_log += ip;
status = TYOpenDeviceWithIP(hIface, ip, &hDevice);
} else {
open_log += devs[i].id;
status = TYOpenDevice(hIface, devs[i].id, &hDevice);
}
std::string ifaceId = devs[i].iface.id;
open_log += "\non interface " + parseInterfaceID(ifaceId);
std::cout << open_log << std::endl;
if(status != TY_STATUS_OK) continue;
TY_DEVICE_BASE_INFO info;
status = TYGetDeviceInfo(hDevice, &info);
if(status != TY_STATUS_OK) {
TYCloseDevice(hDevice);
continue;;
}
return std::shared_ptr<TYDevice>(new TYDevice(hDevice, info));
}
}
std::cout << "Device <ip:" << ip << "> not found!" << std::endl;
return nullptr;
}
std::shared_ptr<DeviceList> TYContext::queryDeviceList(const char *iface)
{
std::vector<TY_DEVICE_BASE_INFO> devs;
searchDevice(devs, iface);
return std::shared_ptr<DeviceList>(new DeviceList(devs));
}
std::shared_ptr<DeviceList> TYContext::queryNetDeviceList(const char *iface)
{
std::vector<TY_DEVICE_BASE_INFO> devs;
searchDevice(devs, iface, TY_INTERFACE_ETHERNET | TY_INTERFACE_IEEE80211);
return std::shared_ptr<DeviceList>(new DeviceList(devs));
}
bool TYContext::ForceNetDeviceIP(const ForceIPStyle style, const std::string& mac, const std::string& ip, const std::string& mask, const std::string& gateway)
{
ASSERT_OK( TYUpdateInterfaceList() );
uint32_t n = 0;
ASSERT_OK( TYGetInterfaceNumber(&n) );
if(n == 0) return false;
std::vector<TY_INTERFACE_INFO> ifaces(n);
ASSERT_OK( TYGetInterfaceList(&ifaces[0], n, &n) );
ASSERT( n == ifaces.size() );
bool open_needed = false;
const char * ip_save = ip.c_str();
const char * netmask_save = mask.c_str();
const char * gateway_save = gateway.c_str();
switch(style)
{
case ForceIPStyleDynamic:
if(strcmp(ip_save, "0.0.0.0") != 0) {
open_needed = true;
}
ip_save = "0.0.0.0";
netmask_save = "0.0.0.0";
gateway_save = "0.0.0.0";
break;
case ForceIPStyleStatic:
open_needed = true;
break;
default:
break;
}
bool result = false;
for(uint32_t i = 0; i < n; i++) {
if(TYIsNetworkInterface(ifaces[i].type)) {
TY_INTERFACE_HANDLE hIface;
ASSERT_OK( TYOpenInterface(ifaces[i].id, &hIface) );
if (TYForceDeviceIP(hIface, mac.c_str(), ip.c_str(), mask.c_str(), gateway.c_str()) == TY_STATUS_OK) {
LOGD("**** Set Temporary IP/Netmask/Gateway ...Done! ****");
if(open_needed) {
TYUpdateDeviceList(hIface);
TY_DEV_HANDLE hDev;
if(TYOpenDeviceWithIP(hIface, ip.c_str(), &hDev) == TY_STATUS_OK){
int32_t ip_i[4];
uint8_t ip_b[4];
int32_t ip32;
sscanf(ip_save, "%d.%d.%d.%d", &ip_i[0], &ip_i[1], &ip_i[2], &ip_i[3]);
ip_b[0] = ip_i[0];ip_b[1] = ip_i[1];ip_b[2] = ip_i[2];ip_b[3] = ip_i[3];
ip32 = TYIPv4ToInt(ip_b);
ASSERT_OK( TYSetInt(hDev, TY_COMPONENT_DEVICE, TY_INT_PERSISTENT_IP, ip32) );
sscanf(netmask_save, "%d.%d.%d.%d", &ip_i[0], &ip_i[1], &ip_i[2], &ip_i[3]);
ip_b[0] = ip_i[0];ip_b[1] = ip_i[1];ip_b[2] = ip_i[2];ip_b[3] = ip_i[3];
ip32 = TYIPv4ToInt(ip_b);
ASSERT_OK( TYSetInt(hDev, TY_COMPONENT_DEVICE, TY_INT_PERSISTENT_SUBMASK, ip32) );
sscanf(gateway_save, "%d.%d.%d.%d", &ip_i[0], &ip_i[1], &ip_i[2], &ip_i[3]);
ip_b[0] = ip_i[0];ip_b[1] = ip_i[1];ip_b[2] = ip_i[2];ip_b[3] = ip_i[3];
ip32 = TYIPv4ToInt(ip_b);
ASSERT_OK( TYSetInt(hDev, TY_COMPONENT_DEVICE, TY_INT_PERSISTENT_GATEWAY, ip32) );
result = true;
std::cout << "**** Set Persistent IP/Netmask/Gateway ...Done! ****" <<std::endl;
} else {
result = false;
}
} else {
result = true;
}
}
ASSERT_OK( TYCloseInterface(hIface));
}
}
return result;
}
}

View File

@@ -0,0 +1,472 @@
#include <thread>
#include "Frame.hpp"
#include "TYImageProc.h"
namespace percipio_layer {
TYImage::TYImage()
{
memset(&image_data, 0, sizeof(image_data));
}
TYImage::TYImage(const TY_IMAGE_DATA& image) :
m_isOwner(false)
{
memcpy(&image_data, &image, sizeof(TY_IMAGE_DATA));
}
TYImage::TYImage(const TYImage& src)
{
image_data.timestamp = src.timestamp();
image_data.imageIndex = src.imageIndex();
image_data.status = src.status();
image_data.componentID = src.componentID();
image_data.size = src.size();
image_data.width = src.width();
image_data.height = src.height();
image_data.pixelFormat = src.pixelFormat();
if(image_data.size) {
m_isOwner = true;
image_data.buffer = malloc(image_data.size);
memcpy(image_data.buffer, src.buffer(), image_data.size);
}
}
TYImage::TYImage(int32_t width, int32_t height, TY_COMPONENT_ID compID, TY_PIXEL_FORMAT format, int32_t size)
{
image_data.size = size;
image_data.width = width;
image_data.height = height;
image_data.componentID = compID;
image_data.pixelFormat = format;
if(image_data.size) {
m_isOwner = true;
image_data.buffer = calloc(image_data.size, 1);
}
}
bool TYImage::resize(int w, int h)
{
#ifdef OPENCV_DEPENDENCIES
cv::Mat src, dst;
switch(image_data.pixelFormat)
{
case TY_PIXEL_FORMAT_BGR:
case TY_PIXEL_FORMAT_RGB:
src = cv::Mat(cv::Size(width(), height()), CV_8UC3, buffer());
break;
case TY_PIXEL_FORMAT_MONO:
src = cv::Mat(cv::Size(width(), height()), CV_8U, buffer());
break;
case TY_PIXEL_FORMAT_MONO16:
src = cv::Mat(cv::Size(width(), height()), CV_16U, buffer());
break;
case TY_PIXEL_FORMAT_BGR48:
src = cv::Mat(cv::Size(width(), height()), CV_16UC3, buffer());
break;
case TY_PIXEL_FORMAT_RGB48:
src = cv::Mat(cv::Size(width(), height()), CV_16UC3, buffer());
break;
case TY_PIXEL_FORMAT_DEPTH16:
src = cv::Mat(cv::Size(width(), height()), CV_16U, buffer());
break;
default:
return false;
}
if(image_data.pixelFormat == TY_PIXEL_FORMAT_DEPTH16)
cv::resize(src, dst, cv::Size(w, h), 0, 0, cv::INTER_NEAREST);
else
cv::resize(src, dst, cv::Size(w, h));
image_data.size = dst.cols * dst.rows * dst.elemSize() * dst.channels();
image_data.width = dst.cols;
image_data.height = dst.rows;
if(m_isOwner) free(image_data.buffer);
image_data.buffer = malloc(image_data.size);
memcpy(image_data.buffer, dst.data, image_data.size);
return true;
#else
std::cout << "not support!" << std::endl;
return false;
#endif
}
TYImage::~TYImage()
{
if(m_isOwner) {
free(image_data.buffer);
}
}
ImageProcesser::ImageProcesser(const char* win, const TY_CAMERA_CALIB_INFO* calib_data, const TY_ISP_HANDLE isp_handle)
{
win_name = win;
hasWin = false;
color_isp_handle = isp_handle;
if(calib_data != nullptr) {
_calib_data = std::shared_ptr<TY_CAMERA_CALIB_INFO>(new TY_CAMERA_CALIB_INFO(*calib_data));
}
}
int ImageProcesser::parse(const std::shared_ptr<TYImage>& image)
{
if(!image) return -1;
TY_PIXEL_FORMAT format = image->pixelFormat();
#ifndef OPENCV_DEPENDENCIES
std::cout << win() << " image size : " << image->width() << " x " << image->height() << std::endl;
#endif
switch(format) {
/*
case TY_PIXEL_FORMAT_BGR:
case TY_PIXEL_FORMAT_RGB:
case TY_PIXEL_FORMAT_MONO:
case TY_PIXEL_FORMAT_MONO16:
case TY_PIXEL_FORMAT_BGR48:
case TY_PIXEL_FORMAT_RGB48:
*/
case TY_PIXEL_FORMAT_DEPTH16:
{
_image = std::shared_ptr<TYImage>(new TYImage(*image));
return 0;
}
case TY_PIXEL_FORMAT_XYZ48:
{
std::vector<int16_t> depth_data(image->width() * image->height());
int16_t* src = static_cast<int16_t*>(image->buffer());
for (int pix = 0; pix < image->width()*image->height(); pix++) {
depth_data[pix] = *(src + 3*pix + 2);
}
_image = std::shared_ptr<TYImage>(new TYImage(image->width(), image->height(), image->componentID(), TY_PIXEL_FORMAT_DEPTH16, depth_data.size() * sizeof(int16_t)));
memcpy(_image->buffer(), depth_data.data(), image->size());
return 0;
}
default:
{
#ifdef OPENCV_DEPENDENCIES
cv::Mat cvImage;
int32_t image_size;
TY_PIXEL_FORMAT image_fmt;
TY_COMPONENT_ID comp_id;
comp_id = image->componentID();
parseImage(image->image(), &cvImage, color_isp_handle);
switch(cvImage.type())
{
case CV_8U:
//MONO8
image_size = cvImage.size().area();
image_fmt = TY_PIXEL_FORMAT_MONO;
break;
case CV_16U:
//MONO16
image_size = cvImage.size().area() * 2;
image_fmt = TY_PIXEL_FORMAT_MONO16;
break;
case CV_16UC3:
//BGR48
image_size = cvImage.size().area() * 6;
image_fmt = TY_PIXEL_FORMAT_BGR48;
break;
default:
//BGR888
image_size = cvImage.size().area() * 3;
image_fmt = TY_PIXEL_FORMAT_BGR;
break;
}
_image = std::shared_ptr<TYImage>(new TYImage(cvImage.cols, cvImage.rows, comp_id, image_fmt, image_size));
memcpy(_image->buffer(), cvImage.data, image_size);
return 0;
#else
//Without the OpenCV library, image decoding is not supported yet.
return -1;
#endif
}
}
}
int ImageProcesser::DepthImageRender()
{
if(!_image) return -1;
TY_PIXEL_FORMAT format = _image->pixelFormat();
if(format != TY_PIXEL_FORMAT_DEPTH16) return -1;
#ifdef OPENCV_DEPENDENCIES
static DepthRender render;
cv::Mat depth = cv::Mat(_image->height(), _image->width(), CV_16U, _image->buffer());
cv::Mat bgr = render.Compute(depth);
_image = std::shared_ptr<TYImage>(new TYImage(_image->width(), _image->height(), _image->componentID(), TY_PIXEL_FORMAT_BGR, bgr.size().area() * 3));
memcpy(_image->buffer(), bgr.data, _image->size());
return 0;
#else
return -1;
#endif
}
TY_STATUS ImageProcesser::doUndistortion()
{
int ret = 0;
if(ret == 0) {
if(!_calib_data) {
std::cout << "Calib data is empty!" << std::endl;
return TY_STATUS_ERROR;
}
int32_t image_size = _image->size();
TY_PIXEL_FORMAT image_fmt = _image->pixelFormat();
TY_COMPONENT_ID comp_id = _image->componentID();
std::vector<uint8_t> undistort_image(image_size);
TY_IMAGE_DATA src;
src.width = _image->width();
src.height = _image->height();
src.size = image_size;
src.pixelFormat = image_fmt;
src.buffer = _image->buffer();
TY_IMAGE_DATA dst;
dst.width = _image->width();
dst.height = _image->height();
dst.size = image_size;
dst.pixelFormat = image_fmt;
dst.buffer = undistort_image.data();
TY_STATUS status = TYUndistortImage(&*_calib_data, &src, NULL, &dst);
if(status != TY_STATUS_OK) {
std::cout << "Do image undistortion failed!" << std::endl;
return status;
}
_image = std::shared_ptr<TYImage>(new TYImage(_image->width(), _image->height(), comp_id, image_fmt, image_size));
memcpy(_image->buffer(), undistort_image.data(), image_size);
return TY_STATUS_OK;
} else {
std::cout << "Image decoding failed." << std::endl;
return TY_STATUS_ERROR;
}
}
int ImageProcesser::show()
{
if(!_image) return -1;
#ifdef OPENCV_DEPENDENCIES
cv::Mat display;
switch(_image->pixelFormat())
{
case TY_PIXEL_FORMAT_MONO:
{
display = cv::Mat(_image->height(), _image->width(), CV_8U, _image->buffer());
break;
}
case TY_PIXEL_FORMAT_MONO16:
{
display = cv::Mat(_image->height(), _image->width(), CV_16U, _image->buffer());
break;
}
case TY_PIXEL_FORMAT_BGR:
{
display = cv::Mat(_image->height(), _image->width(), CV_8UC3, _image->buffer());
break;
}
case TY_PIXEL_FORMAT_BGR48:
{
display = cv::Mat(_image->height(), _image->width(), CV_16UC3, _image->buffer());
break;
}
case TY_PIXEL_FORMAT_DEPTH16:
{
DepthImageRender();
display = cv::Mat(_image->height(), _image->width(), CV_8UC3, _image->buffer());
break;
}
default:
{
break;
}
}
if(!display.empty()) {
hasWin = true;
cv::imshow(win_name.c_str(), display);
int key = cv::waitKey(1);
return key;
}
else
std::cout << "Unknown image encoding format." << std::endl;
#endif
return 0;
}
void ImageProcesser::clear()
{
#ifdef OPENCV_DEPENDENCIES
if (hasWin) {
cv::destroyWindow(win_name.c_str());
}
#endif
}
TYFrame::TYFrame(const TY_FRAME_DATA& frame)
{
bufferSize = frame.bufferSize;
userBuffer.resize(bufferSize);
memcpy(userBuffer.data(), frame.userBuffer, bufferSize);
#define TY_IMAGE_MOVE(src, dst, from, to) do { \
(to) = (from); \
(to.buffer) = reinterpret_cast<void*>((std::intptr_t(dst)) + (std::intptr_t(from.buffer) - std::intptr_t(src)));\
}while(0)
for (int i = 0; i < frame.validCount; i++) {
TY_IMAGE_DATA img;
if (frame.image[i].status != TY_STATUS_OK) continue;
// get depth image
if (frame.image[i].componentID == TY_COMPONENT_DEPTH_CAM) {
TY_IMAGE_MOVE(frame.userBuffer, userBuffer.data(), frame.image[i], img);
_images[TY_COMPONENT_DEPTH_CAM] = std::shared_ptr<TYImage>(new TYImage(img));
}
// get left ir image
if (frame.image[i].componentID == TY_COMPONENT_IR_CAM_LEFT) {
TY_IMAGE_MOVE(frame.userBuffer, userBuffer.data(), frame.image[i], img);
_images[TY_COMPONENT_IR_CAM_LEFT] = std::shared_ptr<TYImage>(new TYImage(img));
}
// get right ir image
if (frame.image[i].componentID == TY_COMPONENT_IR_CAM_RIGHT) {
TY_IMAGE_MOVE(frame.userBuffer, userBuffer.data(), frame.image[i], img);
_images[TY_COMPONENT_IR_CAM_RIGHT] = std::shared_ptr<TYImage>(new TYImage(img));
}
// get color image
if (frame.image[i].componentID == TY_COMPONENT_RGB_CAM) {
TY_IMAGE_MOVE(frame.userBuffer, userBuffer.data(), frame.image[i], img);
_images[TY_COMPONENT_RGB_CAM] = std::shared_ptr<TYImage>(new TYImage(img));
}
}
}
TYFrame::~TYFrame()
{
}
TYFrameParser::TYFrameParser(uint32_t max_queue_size, const TY_ISP_HANDLE isp_handle)
{
_max_queue_size = max_queue_size;
isRuning = true;
setImageProcesser(TY_COMPONENT_DEPTH_CAM, std::shared_ptr<ImageProcesser>(new ImageProcesser("depth")));
setImageProcesser(TY_COMPONENT_IR_CAM_LEFT, std::shared_ptr<ImageProcesser>(new ImageProcesser("Left-IR")));
setImageProcesser(TY_COMPONENT_IR_CAM_RIGHT, std::shared_ptr<ImageProcesser>(new ImageProcesser("Right-IR")));
setImageProcesser(TY_COMPONENT_RGB_CAM, std::shared_ptr<ImageProcesser>(new ImageProcesser("color", nullptr, isp_handle)));
processThread_ = std::thread(&TYFrameParser::display, this);
}
TYFrameParser::~TYFrameParser()
{
isRuning = false;
processThread_.join();
}
int TYFrameParser::setImageProcesser(TY_COMPONENT_ID id, std::shared_ptr<ImageProcesser> proc)
{
stream[id] = proc;
return 0;
}
int TYFrameParser::doProcess(const std::shared_ptr<TYFrame>& img)
{
auto depth = img->depthImage();
auto color = img->colorImage();
auto left_ir = img->leftIRImage();
auto right_ir = img->rightIRImage();
if (left_ir) {
stream[TY_COMPONENT_IR_CAM_LEFT]->parse(left_ir);
}
if (right_ir) {
stream[TY_COMPONENT_IR_CAM_RIGHT]->parse(right_ir);
}
if (color) {
stream[TY_COMPONENT_RGB_CAM]->parse(color);
}
if (depth) {
stream[TY_COMPONENT_DEPTH_CAM]->parse(depth);
}
return 0;
}
void TYFrameParser::display()
{
int ret = 0;
while(isRuning) {
if(images.size()) {
std::unique_lock<std::mutex> lock(_queue_lock);
std::shared_ptr<TYFrame> img = images.front();
if(img) {
images.pop();
doProcess(img);
}
}
for(auto& iter : stream) {
ret = iter.second->show();
if(ret > 0) {
if(func_keyboard_event) func_keyboard_event(ret, user_data);
}
}
}
}
inline void TYFrameParser::ImageQueueSizeCheck()
{
while(images.size() >= _max_queue_size)
images.pop();
}
void TYFrameParser::update(const std::shared_ptr<TYFrame>& frame)
{
std::unique_lock<std::mutex> lock(_queue_lock);
if(frame) {
ImageQueueSizeCheck();
images.push(frame);
#ifndef OPENCV_DEPENDENCIES
auto depth = frame->depthImage();
auto color = frame->colorImage();
auto left_ir = frame->leftIRImage();
auto right_ir = frame->rightIRImage();
if (left_ir) {
auto image = left_ir;
std::cout << "Left" << " image size : " << image->width() << " x " << image->height() << std::endl;
}
if (right_ir) {
auto image = right_ir;
std::cout << "Right" << " image size : " << image->width() << " x " << image->height() << std::endl;
}
if (color) {
auto image = color;
std::cout << "Color" << " image size : " << image->width() << " x " << image->height() << std::endl;
}
if (depth) {
auto image = depth;
std::cout << "Depth" << " image size : " << image->width() << " x " << image->height() << std::endl;
}
#endif
}
}
}//namespace percipio_layer

View File

@@ -0,0 +1,239 @@
#pragma once
#include <memory>
#include <vector>
#include <set>
#include <functional>
#include <mutex>
#include <queue>
#include <thread>
#include <condition_variable>
#include <stdint.h>
#include "Frame.hpp"
namespace percipio_layer {
class TYDevice;
class DeviceList;
class TYContext;
class TYFrame;
class FastCamera;
static std::string parseInterfaceID(std::string &ifaceId)
{
std::string type_s = ifaceId.substr(0, ifaceId.find('-'));
if ("usb" == type_s) {
//add usb specific parse if needed
}
if ("eth" == type_s || "wifi" == type_s) {
//eth-2c:f0:5d:ac:5d:6265eea8c0
//eth-2c:f0:5d:ac:5d:62
size_t IdLength = 18 + type_s.length();
std::string new_id = ifaceId.substr(0, IdLength);
// 65eea8c0
std::string ip_s = ifaceId.substr(IdLength, ifaceId.size() - IdLength);
//base = 16
uint32_t ip = static_cast<uint32_t>(std::stoul(ip_s, nullptr, 16));
uint8_t *ip_arr = (uint8_t *)&ip;
new_id += " ip:";
for(int i = 0; i < 3; i++) {
new_id += std::to_string((uint32_t) ip_arr[i]) + ".";
}
new_id += std::to_string((uint32_t) ip_arr[3]);
return new_id;
}
return ifaceId;
}
class TYDeviceInfo
{
public:
~TYDeviceInfo();
TYDeviceInfo(TYDeviceInfo const&) = delete;
void operator=(TYDeviceInfo const&) = delete;
friend class TYDevice;
friend class DeviceList;
const char* id() { return _info.id; }
const TY_INTERFACE_INFO& Interface() { return _info.iface; }
const char* vendorName()
{
//specific Vendor name for some camera
if (strlen(_info.userDefinedName) != 0) {
return _info.userDefinedName;
} else {
return _info.vendorName;
}
}
const char* modelName() { return _info.modelName; }
const char* buildHash() { return _info.buildHash; }
const char* configVersion() { return _info.configVersion; }
const TY_VERSION_INFO& hardwareVersion() { return _info.hardwareVersion; }
const TY_VERSION_INFO& firmwareVersion() { return _info.firmwareVersion; }
const char* mac();
const char* ip();
const char* netmask();
const char* gateway();
const char* broadcast();
private:
TYDeviceInfo(const TY_DEVICE_BASE_INFO& info);
TY_DEVICE_BASE_INFO _info;
};
typedef std::function<void(void* userdata)> EventCallback;
typedef std::pair<void*, EventCallback> event_pair;
static void eventCallback(TY_EVENT_INFO *event_info, void *userdata);
class TYDevice
{
public:
~TYDevice();
void operator=(TYDevice const&) = delete;
friend class FastCamera;
friend class TYStream;
friend class DeviceList;
friend class TYPropertyManager;
friend void eventCallback(TY_EVENT_INFO *event_info, void *userdata);
std::shared_ptr<TYDeviceInfo> getDeviceInfo();
void registerEventCallback (const TY_EVENT eventID, void* data, EventCallback cb);
private:
TYDevice(const TY_DEV_HANDLE handle, const TY_DEVICE_BASE_INFO& info);
TY_DEV_HANDLE _handle;
TY_DEVICE_BASE_INFO _dev_info;
std::map<TY_EVENT, event_pair> _eventCallbackMap;
std::function<void(TY_EVENT_INFO*)> _event_callback;
void onDeviceEventCallback(const TY_EVENT_INFO *event_info);
};
class DeviceList {
public:
~DeviceList();
DeviceList(DeviceList const&) = delete;
void operator=(DeviceList const&) = delete;
bool empty() { return devs.size() == 0; }
int devCount() { return devs.size(); }
std::shared_ptr<TYDeviceInfo> getDeviceInfo(int idx);
std::shared_ptr<TYDevice> getDevice(int idx);
std::shared_ptr<TYDevice> getDeviceBySN(const char* sn);
std::shared_ptr<TYDevice> getDeviceByIP(const char* ip);
friend class TYContext;
private:
std::vector<TY_DEVICE_BASE_INFO> devs;
static std::set<TY_INTERFACE_HANDLE> gifaces;
DeviceList(std::vector<TY_DEVICE_BASE_INFO>& devices);
};
enum ForceIPStyle {
ForceIPStyleDynamic = 0,
ForceIPStyleForce = 1,
ForceIPStyleStatic = 2
};
class TYContext {
public:
static TYContext& getInstance() {
static TYContext instance;
return instance;
}
TYContext(TYContext const&) = delete;
void operator=(TYContext const&) = delete;
std::shared_ptr<DeviceList> queryDeviceList(const char *iface = nullptr);
std::shared_ptr<DeviceList> queryNetDeviceList(const char *iface = nullptr);
bool ForceNetDeviceIP(const ForceIPStyle style, const std::string& mac, const std::string& ip, const std::string& mask, const std::string& gateway);
private:
TYContext() {
ASSERT_OK(TYInitLib());
TY_VERSION_INFO ver;
ASSERT_OK( TYLibVersion(&ver) );
std::cout << "=== lib version: " << ver.major << "." << ver.minor << "." << ver.patch << std::endl;
}
~TYContext() {
ASSERT_OK(TYDeinitLib());
}
};
class TYCamInterface
{
public:
TYCamInterface();
~TYCamInterface();
TY_STATUS Reset();
void List(std::vector<std::string>& );
private:
std::vector<TY_INTERFACE_INFO> ifaces;
};
class FastCamera
{
public:
enum stream_idx
{
stream_depth = 0x1,
stream_color = 0x2,
stream_ir_left = 0x4,
stream_ir_right = 0x8,
stream_ir = stream_ir_left
};
friend class TYFrame;
FastCamera();
FastCamera(const char* sn);
~FastCamera();
virtual TY_STATUS open(const char* sn);
TY_STATUS setIfaceId(const char* inf);
virtual TY_STATUS openByIP(const char* ip);
virtual bool has_stream(stream_idx idx);
virtual TY_STATUS stream_enable(stream_idx idx);
virtual TY_STATUS stream_disable(stream_idx idx);
virtual TY_STATUS start();
virtual TY_STATUS stop();
virtual void close();
std::shared_ptr<TYFrame> tryGetFrames(uint32_t timeout_ms);
TY_DEV_HANDLE handle() {
if (!device) {
// std::cerr << "Error: Device handle accessed but device is null!" << std::endl;
return 0;
}
return device->_handle;
}
void RegisterOfflineEventCallback(EventCallback cb, void* data) { device->registerEventCallback(TY_EVENT_DEVICE_OFFLINE, data, cb); }
private:
std::string mIfaceId;
std::mutex _dev_lock;
TY_COMPONENT_ID components = 0;
#define BUF_CNT (3)
bool isRuning = false;
std::shared_ptr<TYFrame> fetchFrames(uint32_t timeout_ms);
TY_STATUS doStop();
std::shared_ptr<TYDevice> device;
std::vector<uint8_t> stream_buffer[BUF_CNT];
};
}

View File

@@ -0,0 +1,126 @@
#pragma once
#include <memory>
#include <mutex>
#include <queue>
#include <thread>
#include <condition_variable>
#include "common.hpp"
namespace percipio_layer {
class TYImage
{
public:
TYImage();
TYImage(const TY_IMAGE_DATA& image);
TYImage(const TYImage& src);
TYImage(int32_t width, int32_t height, TY_COMPONENT_ID compID, TY_PIXEL_FORMAT format, int32_t size);
~TYImage();
int32_t size() const { return image_data.size; }
int32_t width() const { return image_data.width; }
int32_t height() const { return image_data.height; }
void* buffer() const { return image_data.buffer; }
int32_t status() const { return image_data.status; }
uint64_t timestamp() const { return image_data.timestamp; }
int32_t imageIndex() const { return image_data.imageIndex; }
bool resize(int w, int h);
TY_PIXEL_FORMAT pixelFormat() const { return image_data.pixelFormat; }
TY_COMPONENT_ID componentID() const { return image_data.componentID; }
const TY_IMAGE_DATA* image() const { return &image_data; }
private:
bool m_isOwner = false;
TY_IMAGE_DATA image_data;
};
class TYFrame
{
public:
~TYFrame();
void operator=(TYFrame const&) = delete;
TYFrame(TYFrame const&) = delete;
TYFrame(const TY_FRAME_DATA& frame);
std::shared_ptr<TYImage> depthImage() { return _images[TY_COMPONENT_DEPTH_CAM];}
std::shared_ptr<TYImage> colorImage() { return _images[TY_COMPONENT_RGB_CAM];}
std::shared_ptr<TYImage> leftIRImage() { return _images[TY_COMPONENT_IR_CAM_LEFT];}
std::shared_ptr<TYImage> rightIRImage() { return _images[TY_COMPONENT_IR_CAM_RIGHT];}
private:
int32_t bufferSize = 0;
std::vector<uint8_t> userBuffer;
typedef std::map<TY_COMPONENT_ID, std::shared_ptr<TYImage>> ty_image;
ty_image _images;
};
class ImageProcesser
{
public:
ImageProcesser(const char* win, const TY_CAMERA_CALIB_INFO* calib_data = nullptr, const TY_ISP_HANDLE isp_handle = nullptr);
~ImageProcesser() {clear();}
virtual int parse(const std::shared_ptr<TYImage>& image);
int DepthImageRender();
TY_STATUS doUndistortion();
int show();
void clear();
TY_ISP_HANDLE isp_handle() const { return color_isp_handle; }
const std::shared_ptr<TYImage>& image() const { return _image; }
const std::string& win() { return win_name; }
protected:
std::shared_ptr<TYImage> _image;
private:
std::string win_name;
TY_ISP_HANDLE color_isp_handle;
std::shared_ptr<TY_CAMERA_CALIB_INFO> _calib_data;
bool hasWin;
};
typedef void (*TYFrameKeyBoardEventCallback) (int, void*);
typedef std::map<TY_COMPONENT_ID, std::shared_ptr<ImageProcesser>> ty_stream;
class TYFrameParser
{
public:
TYFrameParser(uint32_t max_queue_size = 4, const TY_ISP_HANDLE isp_handle = nullptr);
~TYFrameParser();
void RegisterKeyBoardEventCallback(TYFrameKeyBoardEventCallback cb, void* data) {
user_data = data;
func_keyboard_event = cb;
}
int setImageProcesser(TY_COMPONENT_ID id, std::shared_ptr<ImageProcesser> proc);
virtual int doProcess(const std::shared_ptr<TYFrame>& frame);
void update(const std::shared_ptr<TYFrame>& frame);
protected:
ty_stream stream;
private:
std::mutex _queue_lock;
uint32_t _max_queue_size;
bool isRuning;
std::thread processThread_;
void* user_data;
TYFrameKeyBoardEventCallback func_keyboard_event;
std::queue<std::shared_ptr<TYFrame>> images;
inline void ImageQueueSizeCheck();
inline void display();
};
}

View File

@@ -0,0 +1,221 @@
# 如何设置图像分辨率和帧率
根据示例程序 `sample_v2` 的代码,以下是设置图像分辨率和帧率的方法:
## 设置图像分辨率
有两种方式可以设置图像分辨率:
### 方法1使用 TY_INT_WIDTH 和 TY_INT_HEIGHT直接设置宽高
```cpp
#include "TYApi.h"
// 获取设备句柄(假设已经打开设备)
TY_DEV_HANDLE hDevice = ...; // 从 FastCamera::handle() 获取
// 设置宽度和高度
TY_STATUS status;
// 设置宽度例如1280
status = TYSetInt(hDevice, TY_COMPONENT_RGB_CAM, TY_INT_WIDTH, 1280);
if (status != TY_STATUS_OK) {
// 处理错误
}
// 设置高度例如960
status = TYSetInt(hDevice, TY_COMPONENT_RGB_CAM, TY_INT_HEIGHT, 960);
if (status != TY_STATUS_OK) {
// 处理错误
}
// 对于深度相机
status = TYSetInt(hDevice, TY_COMPONENT_DEPTH_CAM, TY_INT_WIDTH, 640);
status = TYSetInt(hDevice, TY_COMPONENT_DEPTH_CAM, TY_INT_HEIGHT, 480);
```
### 方法2使用 TY_ENUM_IMAGE_MODE推荐同时设置分辨率和像素格式
```cpp
#include "TYApi.h"
#include "TYDefs.h"
TY_DEV_HANDLE hDevice = ...;
// 获取支持的图像模式列表
TY_ENUM_ENTRY mode_entry[10];
uint32_t num;
TY_STATUS status = TYGetEnumEntryInfo(hDevice, TY_COMPONENT_RGB_CAM,
TY_ENUM_IMAGE_MODE, mode_entry, 10, &num);
if (status == TY_STATUS_OK) {
// 查看所有支持的模式
for (uint32_t i = 0; i < num; i++) {
printf("Mode %d: %s (value: 0x%x)\n", i, mode_entry[i].description, mode_entry[i].value);
}
// 设置图像模式例如RGB 1280x960
// 可以使用预定义的模式,如:
// TY_IMAGE_MODE_RGB_1280x960
// TY_IMAGE_MODE_RGB_640x480
// TY_IMAGE_MODE_MONO_640x480
// 等等
TY_IMAGE_MODE img_mode = TY_IMAGE_MODE_RGB_1280x960;
status = TYSetEnum(hDevice, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE, img_mode);
if (status != TY_STATUS_OK) {
// 处理错误
}
}
// 或者使用第一个可用的模式(默认模式)
TY_IMAGE_MODE img_mode;
status = TYGetEnum(hDevice, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE, &img_mode);
if (status == TY_STATUS_OK) {
// 从图像模式中解析宽高
int width = TYImageWidth(img_mode);
int height = TYImageHeight(img_mode);
printf("Current mode: %dx%d\n", width, height);
}
```
### 使用示例代码中的辅助函数
`Utils.hpp` 中提供了辅助函数:
```cpp
#include "Utils.hpp"
TY_DEV_HANDLE hDevice = ...;
TY_COMPONENT_ID compID = TY_COMPONENT_RGB_CAM;
TY_IMAGE_MODE image_mode;
// 获取默认图像模式
TY_STATUS status = get_default_image_mode(hDevice, compID, image_mode);
// 或者获取指定索引的模式
status = get_image_mode(hDevice, compID, image_mode, 0); // 获取第一个模式
// 设置图像模式
status = TYSetEnum(hDevice, compID, TY_ENUM_IMAGE_MODE, image_mode);
```
## 设置帧率
帧率通常通过触发参数Trigger Parameter来设置
```cpp
#include "TYApi.h"
#include "TYDefs.h"
TY_DEV_HANDLE hDevice = ...;
// 设置触发参数(包含帧率)
TY_TRIGGER_PARAM trigger_param;
trigger_param.mode = TY_TRIGGER_MODE_M_PER; // 主模式,周期性触发
trigger_param.fps = 30; // 设置帧率为 30 FPS
trigger_param.rsvd = 0;
TY_STATUS status = TYSetStruct(hDevice, TY_COMPONENT_DEVICE,
TY_STRUCT_TRIGGER_PARAM,
&trigger_param, sizeof(trigger_param));
if (status != TY_STATUS_OK) {
// 处理错误
}
```
### 触发模式说明
- `TY_TRIGGER_MODE_OFF`: 关闭触发(连续模式)
- `TY_TRIGGER_MODE_ON`: 单次触发
- `TY_TRIGGER_MODE_M_PER`: 主模式,周期性发送触发信号,需要设置 `fps` 参数
## 完整示例
以下是一个完整的示例,展示如何在 `FastCamera` 类中设置分辨率和帧率:
```cpp
#include "Device.hpp"
#include "TYApi.h"
using namespace percipio_layer;
// 打开相机
FastCamera cam;
TY_STATUS status = cam.open(nullptr); // 或使用序列号
if (status != TY_STATUS_OK) {
return;
}
TY_DEV_HANDLE hDevice = cam.handle();
// 1. 设置图像分辨率方法1直接设置宽高
status = TYSetInt(hDevice, TY_COMPONENT_RGB_CAM, TY_INT_WIDTH, 1280);
status = TYSetInt(hDevice, TY_COMPONENT_RGB_CAM, TY_INT_HEIGHT, 960);
// 或者方法2使用图像模式
TY_IMAGE_MODE img_mode = TY_IMAGE_MODE_RGB_1280x960;
status = TYSetEnum(hDevice, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE, img_mode);
// 2. 设置帧率
TY_TRIGGER_PARAM trigger_param;
trigger_param.mode = TY_TRIGGER_MODE_M_PER;
trigger_param.fps = 30; // 30 FPS
trigger_param.rsvd = 0;
status = TYSetStruct(hDevice, TY_COMPONENT_DEVICE, TY_STRUCT_TRIGGER_PARAM,
&trigger_param, sizeof(trigger_param));
// 3. 启用流并开始采集
cam.stream_enable(FastCamera::stream_color);
status = cam.start();
// 4. 获取帧
auto frame = cam.tryGetFrames(1000); // 超时时间 1000ms
if (frame) {
auto color_img = frame->colorImage();
if (color_img) {
printf("Image size: %dx%d\n", color_img->width(), color_img->height());
}
}
```
## 注意事项
1. **设置时机**:分辨率应该在启动采集(`start()`)之前设置
2. **组件ID**不同的组件RGB相机、深度相机、IR相机需要分别设置
- `TY_COMPONENT_RGB_CAM`: RGB彩色相机
- `TY_COMPONENT_DEPTH_CAM`: 深度相机
- `TY_COMPONENT_IR_CAM_LEFT`: 左IR相机
- `TY_COMPONENT_IR_CAM_RIGHT`: 右IR相机
3. **支持的参数**:不是所有设备都支持所有分辨率和帧率,建议先查询支持的参数范围
4. **图像模式**:使用 `TY_ENUM_IMAGE_MODE` 时,模式同时包含分辨率和像素格式信息
## 查询支持的参数
```cpp
// 查询宽度范围
TY_INT_RANGE width_range;
TY_STATUS status = TYGetIntRange(hDevice, TY_COMPONENT_RGB_CAM,
TY_INT_WIDTH, &width_range);
if (status == TY_STATUS_OK) {
printf("Width range: %d - %d\n", width_range.min, width_range.max);
}
// 查询所有支持的图像模式
TY_ENUM_ENTRY mode_entry[20];
uint32_t num;
status = TYGetEnumEntryInfo(hDevice, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE,
mode_entry, 20, &num);
if (status == TY_STATUS_OK) {
for (uint32_t i = 0; i < num; i++) {
printf("Mode %d: %s\n", i, mode_entry[i].description);
}
}
```
## 参考文件
- `image_capture/camera_sdk/sample_v2/cpp/Device.cpp`: 设备操作示例
- `image_capture/camera_sdk/common/BayerISP.hpp`: 图像模式使用示例第97-111行
- `image_capture/camera_sdk/include/TYDefs.h`: 定义和枚举值
- `image_capture/camera_sdk/include/TYApi.h`: API函数声明