PDA

View Full Version : OpenCv & Qt4



switch
4th August 2009, 16:12
Hi, i have been trying to integrate opencv with qt4, i have had some success using a piece of code i found on this forum which allowed me to convert a standard Iplmage with 3 channels (RGB) however i am now trying to convert a mono IpImage. I am using the opencv function inranges to generate a binary image (1 channel image). Which works fine, i have managed to check this using cvShowImage(); However, when i pass my mono image to the function which converts it intoa qimage -> IplImageToQImage. The resulting image is a mono image containg the same image 8 times. Does anyone know whats happening?

I am calling the function as followed:

qImage = IplImageToQImage(mono, &data,0,0);

i am then displaying it using:

label_cameraStream->setPixmap(QPixmap::fromImage(*mono));

Results

Opencv mono Image
http://i701.photobucket.com/albums/ww17/robokinetic/monoOpenCv.png

Image after convertion to QImage

http://i701.photobucket.com/albums/ww17/robokinetic/QimageMono.png

--------------------------------------------------------------------------------------------------
Function to convert an IpImage (opencv image) to an QImage (qt4)
---------------------------------------------------------------------------------------------------
QImage *IplImageToQImage(const IplImage * temp, uchar **data, double mini, double maxi)
{
uchar *qImageBuffer = NULL;
int width = temp->width;

/* Note here that OpenCV image is stored so that each lined is
32-bits aligned thus
* explaining the necessity to "skip" the few last bytes of each
line of OpenCV image buffer.
*/
int widthStep = temp->widthStep;
int height = temp->height;

switch (temp->depth) {
case IPL_DEPTH_8U:
if (temp->nChannels == 1) {

/* OpenCV image is stored with one byte grey pixel. We convert it
to an 8 bit depth QImage.
*/

qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uchar *iplImagePtr = (const uchar *)temp->imageData;

for (int y = 0; y < height; y++) {
// Copy line by line
memcpy(QImagePtr, iplImagePtr, width);
QImagePtr += width;
iplImagePtr += widthStep;
}
}
else if (temp->nChannels == 3) {

/* OpenCV image is stored with 3 byte color pixels (3 channels).
We convert it to a 32 bit depth QImage.
*/

qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uchar *iplImagePtr = (const uchar *) temp->imageData;

for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
// We cannot help but copy manually.
QImagePtr[0] = iplImagePtr[0];
QImagePtr[1] = iplImagePtr[1];
QImagePtr[2] = iplImagePtr[2];
QImagePtr[3] = 0;

QImagePtr += 4;
iplImagePtr += 3;
}
iplImagePtr += widthStep-3*width;
}
}
else {

qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", temp->nChannels);

}

break;

case IPL_DEPTH_16U:

if (temp->nChannels == 1) {

/* OpenCV image is stored with 2 bytes grey pixel. We convert it
to an 8 bit depth QImage.
*/

qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uint16_t *iplImagePtr = (const uint16_t *)
temp->imageData;

for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
// We take only the highest part of the 16 bit value. It is similar to dividing by 256.
*QImagePtr++ = ((*iplImagePtr++) >> 8);
}
iplImagePtr += widthStep/sizeof(uint16_t)-width;
}
}
else {
qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", temp->nChannels);
}
break;

case IPL_DEPTH_32F:
if (temp->nChannels == 1) {
/* OpenCV image is stored with float (4 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const float *iplImagePtr = (const float *) temp->imageData;

for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
uchar p;
float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);

if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;

*QImagePtr++ = p;
}
iplImagePtr += widthStep/sizeof(float)-width;
}
}
else {
qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", temp->nChannels);
}
break;

case IPL_DEPTH_64F:
if (temp->nChannels == 1) {

/* OpenCV image is stored with double (8 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/

qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const double *iplImagePtr = (const double *) temp->imageData;

for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
uchar p;
double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);

if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;

*QImagePtr++ = p;
}
iplImagePtr += widthStep/sizeof(double)-width;
}
}
else {
qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", temp->nChannels);
}
break;

default:
qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", temp->depth, temp->nChannels);

}
QImage *qImage;
if (temp->nChannels == 1) {
// We should check who is going to destroy this allocation.
QRgb *colorTable = new QRgb[256];

for (int i = 0; i < 256; i++)
colorTable[i] = qRgb(i, i, i);

qImage = new QImage(qImageBuffer, width, height,QImage::Format_RGB32);// 8);//, QImage::IgnoreEndian);
}
else {
qImage = new QImage(qImageBuffer, width, height,QImage::Format_RGB32);// 32);//, 256 );//,QImage::IgnoreEndian);
}
*data = qImageBuffer;
return qImage;
}