You may think why we need to convert the image from one format to another format. There are lot of advantages if we convert the format for transmission as well as for display purpose. Some times processing of the one format of images i s easy compared to another. Generally images are processed by algorithms in YUV domain and then converted to RGB domain for Display purpose. While capturing the images camera uses RGB format b ut for storing we use YUV format for compression. When we need to display them we will again convert them to the RGB format. Generally YUV format images require the less band width compared to the RGB. Generally color correction is done in the RGB color space and contrast enhancement is done in the YUV color space. Y component used for carrying the brightness of the image and remaining two components are used for the color representation.
Fallowing code explains how we can achieve the above things:
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include <conio.h>
void main()
{
IplImage *img=cvLoadImage("E:/test.jpg");
IplImage *Dimg=cvCreateImage(cvSize(img->width,img->height),img->depth,img->nChannels);
//VUY
IplImage *Y=cvCreateImage(cvSize(img->width,img->height),img->depth,3);
IplImage *U=cvCreateImage(cvSize(img->width,img->height),img->depth,3);
IplImage *V=cvCreateImage(cvSize(img->width,img->height),img->depth,3);
for(int i=0;i<img->width*img->height*3;i+=3)
{
int y=0.257*img->imageData[i+2]+0.504*img->imageData[i+1]+0.098*img->imageData[i]+16;
int u=-0.148*img->imageData[i+2]-0.291*img->imageData[i+1]+0.439*img->imageData[i]+128;
int v=img->imageData[i+2]*0.439-0.368*img->imageData[i+1]-0.071*img->imageData[i]+128;
Dimg->imageData[i]=(v<255||v>0)?v:(v>255?255:0);
Dimg->imageData[i+1]=(u<255||u>0)?u:(u>255?255:0);
Dimg->imageData[i+2]=(y<255||y>0)?y:(y>255?255:0);
Y->imageData[i+2]=Dimg->imageData[i+2];
U->imageData[i+1]=Dimg->imageData[i+1];
V->imageData[i]=Dimg->imageData[i];
}
cvNamedWindow("Y",0);
cvResizeWindow("Y",300,300);
cvNamedWindow("U",0);
cvResizeWindow("U",300,300);
cvNamedWindow("V",0);
cvResizeWindow("V",300,300);
cvShowImage("Y",Y);
cvShowImage("U",U);
cvShowImage("V",V);
cvWaitKey(0);
_getch();
}
While calculating the YUV components some times we will encounter the under flow as well as over flow. so we have to make sure that the code does not breaks while running. So we have to tap all these things in our code.
Above code loads the image in to the code using open cv. Then the individual channels are extracted and then they are converted to the YUV domain and stored in the newly created image. To get the difference between the YUV components we represented the each channel separately in separate windows.
Fallowing code explains how we can achieve the above things:
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include <conio.h>
void main()
{
IplImage *img=cvLoadImage("E:/test.jpg");
IplImage *Dimg=cvCreateImage(cvSize(img->width,img->height),img->depth,img->nChannels);
//VUY
IplImage *Y=cvCreateImage(cvSize(img->width,img->height),img->depth,3);
IplImage *U=cvCreateImage(cvSize(img->width,img->height),img->depth,3);
IplImage *V=cvCreateImage(cvSize(img->width,img->height),img->depth,3);
for(int i=0;i<img->width*img->height*3;i+=3)
{
int y=0.257*img->imageData[i+2]+0.504*img->imageData[i+1]+0.098*img->imageData[i]+16;
int u=-0.148*img->imageData[i+2]-0.291*img->imageData[i+1]+0.439*img->imageData[i]+128;
int v=img->imageData[i+2]*0.439-0.368*img->imageData[i+1]-0.071*img->imageData[i]+128;
Dimg->imageData[i]=(v<255||v>0)?v:(v>255?255:0);
Dimg->imageData[i+1]=(u<255||u>0)?u:(u>255?255:0);
Dimg->imageData[i+2]=(y<255||y>0)?y:(y>255?255:0);
Y->imageData[i+2]=Dimg->imageData[i+2];
U->imageData[i+1]=Dimg->imageData[i+1];
V->imageData[i]=Dimg->imageData[i];
}
cvNamedWindow("Y",0);
cvResizeWindow("Y",300,300);
cvNamedWindow("U",0);
cvResizeWindow("U",300,300);
cvNamedWindow("V",0);
cvResizeWindow("V",300,300);
cvShowImage("Y",Y);
cvShowImage("U",U);
cvShowImage("V",V);
cvWaitKey(0);
_getch();
}
While calculating the YUV components some times we will encounter the under flow as well as over flow. so we have to make sure that the code does not breaks while running. So we have to tap all these things in our code.
Above code loads the image in to the code using open cv. Then the individual channels are extracted and then they are converted to the YUV domain and stored in the newly created image. To get the difference between the YUV components we represented the each channel separately in separate windows.