您的位置:首页 > 编程语言 > Java开发

BufferedImage(JDK)<----interoperate---->Mat(OpenCV)

2014-03-06 19:00 344 查看

Alpha Channel Support

OpenCV 不支持 alpha channel, 只支持masking;但是BufferedImage支持alpha。那么怎样获取BufferedImage img 的数据呢?目前有两种方法:

(1)img.getRGB(0, 0, w, h, pixels, 0, w);//这里的pixels是int[]型。每一个int四个字节,第一个是alpha值,之后分别是BGR值。当然这是对于TYPE_3BYTE_BGR类型而言的。 之所以列举这种类型是因为项目中的图像是这种类型。如下:

BufferedImahe img=ImageIO.read(new File("***"));
int[] rgb=new int[w*h];
img.getRGB(0,0,w,h,rgb,0,w);
byte r=(rgb[0]&ff);
byte g=(rgb[0]>>8)&ff;
byte b=(rgb[0]>>16)&ff


注意:TYPE_3BYTE_BGR Represents an image with 8-bit RGB color components, corresponding to a Windows-style BGR color model) with the colors Blue, Green, and Red stored in 3 bytes. There is no alpha. The image has a
ComponentColorModel
. When data with non-opaque alpha is stored in an image of this type, the color data must be adjusted to a non-premultiplied form and the alpha discarded。

(2)
byte[] pixels =((DataBufferByte) img.getRaster().getDataBuffer()).getData();这里直接是将(1)中的每一个int转换为了3个byte,这3个byte分别对应bgr,这里没有了alpha。没有了alpha。。。。那部是和OpenCV的数据结构更加相似了吗?!


BufferedImage img=ImageIO.read(new File("/home/chase/Pictures/hand.png"));
System.out.println(img.getType());
int w=img.getWidth();
int h=img.getHeight();
System.out.println(h+" "+w+" "+w*h);
int[]bgr=new int[w*h];
img.getRGB(0, 0, w, h, bgr, 0, w);
byte[] by=((DataBufferByte)img.getRaster().getDataBuffer()).getData();
System.out.println(by.length);
int r=(bgr[3] & 0xff);System.out.println("r:"+r);
int g=(bgr[3] & 0xff00)>>8;System.out.println("g:"+g);
int b=(bgr[3] & 0xff0000)>>16;System.out.println("b:"+b);
System.out.println("b:"+by[9]);
System.out.println("g:"+by[10]);
System.out.println("r:"+by[11]);


问题:但是我好像发现(1)(2)中的rgb[i]和pixels[3*i],pixels[3*i+1],pixels[3*i+2]不对应。rgb[i]中的24bit是【alpha】【B】【G】【R】,然而pixels[3*i],pixels[3*i+1],pixels[3*i+2]对应的顺序是【R】【G】【B】。但是这应该没什么关系吧我觉得。

TYPE_3BYTE_BGR 型图像是CS_sRGB标准彩色模型。

BufferedImage-->Mat

用OpenCV显示一张Bufferedmage

cpp代码:

/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
/* Header for class gmmopencv_BufferedImg2Mat */

#ifndef _Included_gmmopencv_BufferedImg2Mat
#define _Included_gmmopencv_BufferedImg2Mat
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class:     gmmopencv_BufferedImg2Mat
* Method:    JNIShowBufferedImg
* Signature: (III[B)V
*/
JNIEXPORT void JNICALL Java_gmmopencv_BufferedImg2Mat_JNIShowBufferedImg(JNIEnv *env, jobject, jint w, jint h, jint comps, jbyteArray pixels)
{
IplImage* img=cvCreateImageHeader(cvSize(w,h),8,comps);
jbyte*pixel=env->GetByteArrayElements(pixels, 0);
cvSetData(img,pixel,3*w);
Mat frame=Mat(img);
imshow("image",frame);
cvWaitKey(0);
env->ReleaseByteArrayElements(pixels,pixel, 0);
}

#ifdef __cplusplus
}
#endif
#endif


java代码:

package gmmopencv;

import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.io.File;
import java.io.IOException;

import javax.imageio.ImageIO;

public class BufferedImg2Mat {
public native void JNIShowBufferedImg(int w,int h,int num_of_channel,byte[]pixels);
static{System.loadLibrary("jnishowimg");}
public static void main(String args[]) throws IOException{
BufferedImage img=ImageIO.read(new File("/home/chase/Pictures/desktop.jpg"));
int w=img.getWidth();
int h=img.getHeight();
int comps=img.getColorModel().getNumColorComponents();
byte[] pixels=((DataBufferByte)img.getRaster().getDataBuffer()).getData();
new BufferedImg2Mat().JNIShowBufferedImg(w, h, comps, pixels);
}
}


具体怎样使用这个JNI,以及我一值bug的一些问题,参考:Install (JDK+Eclipse)--->Try JNI--->Some issues

用OpenCV显示连续BufferedImage图片序列

我们使用的云视频项目,需要从摄像头里面提取连续的帧序列,所以需要连续显示。cpp代码与显示单张的代码唯一区别就是cvWaitKey(x)。如下所示:

/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
/* Header for class showjnimat_JNIMat */

#ifndef _Included_showjnimat_JNIMat
#define _Included_showjnimat_JNIMat
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class:     showjnimat_JNIMat
* Method:    JNIShowBufferedImg
* Signature: (III[B)V
*/
Mat frame;
JNIEXPORT void JNICALL Java_showjnimat_JNIMat_JNIShowBufferedImg(JNIEnv *env, jobject, jint w, jint h, jint comps, jbyteArray pixels)
{
IplImage* img=cvCreateImageHeader(cvSize(w,h),8,comps);
jbyte*pixel=env->GetByteArrayElements(pixels, 0);
cvSetData(img,pixel,3*w);
frame=Mat(img);
imshow("image",frame);
char c=cv::waitKey(33);
env->ReleaseByteArrayElements(pixels,pixel, 0);

}

#ifdef __cplusplus
}
#endif
#endif


java代码可能区别大了一点,因为涉及到连续帧的提取问题。如下所示:

package showjnimat;

import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.net.MalformedURLException;
import java.util.Date;

import com.github.sarxos.webcam.Webcam;
import com.github.sarxos.webcam.WebcamDevice;
import com.github.sarxos.webcam.ds.ipcam.IpCamDeviceRegistry;
import com.github.sarxos.webcam.ds.ipcam.IpCamDriver;
import com.github.sarxos.webcam.ds.ipcam.IpCamMode;

public class JNIMat {
public native void JNIShowBufferedImg(int w,int h,int comps,byte[]pixels);
static{
System.loadLibrary("jnishowimg");
Webcam.setDriver(new IpCamDriver());
}
public static void main(String args[]) throws MalformedURLException, InterruptedException{
IpCamDeviceRegistry.register("ZModo", "http://admin:123456@192.168.1.20:80/videostream.cgi", IpCamMode.PUSH);
WebcamDevice camdevice=Webcam.getWebcams().get(0).getDevice();

camdevice.open();//MUST OPEN!!!I Bug this for a LONG LONG TIME! OH MY TIME....
JNIMat jnimat=new JNIMat();
BufferedImage img;
int w,h,comps;
byte[] pixels;
while(true){
Date date1=new Date();
long t1=date1.getTime();
img=camdevice.getImage();
w=img.getWidth();
h=img.getHeight();
comps=img.getColorModel().getNumColorComponents();
pixels=((DataBufferByte)img.getRaster().getDataBuffer()).getData();
jnimat.JNIShowBufferedImg(w, h, comps, pixels);
Date date2=new Date();
long t2=date2.getTime();
System.out.println(1000f/(t2-t1));
}
}

}


Mat---->BufferedImage

这一部分在我这里主要设计对一个CV_8U的Mat转化为BufferedImage二值图像。分为两部分,第一部分是在JNI本地代码中实现的,第二部分是在java中实现的。第一部分返回一个byte[],第二部分将这个byte[]弄成一个java二值图像。

第一部分代码:

/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
/* Header for class upc_se_intelmonitor_GMM4BgS */

#ifndef _Included_upc_se_intelmonitor_GMM4BgS
#define _Included_upc_se_intelmonitor_GMM4BgS
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class:     upc_se_intelmonitor_GMM4BgS
* Method:    subtract
* Signature: (II[B)[B
*/
BackgroundSubtractorMOG2 mog;
Mat fg;
Mat frame;

JNIEXPORT jbyteArray JNICALL Java_upc_se_intelmonitor_GMM4BgS_subtract
(JNIEnv *env, jclass, jint w, jint h, jbyteArray bgr)
{
IplImage* img=cvCreateImageHeader(cvSize(w,h),8,3);
jbyte*pixel=env->GetByteArrayElements(bgr, 0);
cvSetData(img,pixel,3*w);
frame=cv::Mat(img);
imshow("raw_frame",frame);
mog.operator()(frame,fg);
imshow("fg",fg);
cvWaitKey(10);
IplImage p_fg=IplImage(fg);

jbyteArray bArray;
long length=w*h;
jbyte*byteArrayElement;

bArray=env->NewByteArray((jsize)length);
byteArrayElement =env->GetByteArrayElements(bArray,NULL);
memcpy(byteArrayElement,p_fg.imageData,length);
//cvGetRawData(&p_frame,(uchar**)&fg,NULL,NULL);
env->ReleaseByteArrayElements(bgr,pixel, 0);
env->ReleaseByteArrayElements(bArray,byteArrayElement, 0);
return  bArray;
}

#ifdef __cplusplus
}
#endif
#endif


另外,还有一个版本,就是不用memcpy,关键部分如下

jbyteArray bArray;
long length=fg.rows*fg.cols;
jbyte*byteArrayElement;

bArray=env->NewByteArray((jsize)length);
//byteArrayElement =env->GetByteArrayElements(bArray,NULL);
//memcpy(byteArrayElement,p_fg.imageData,length);
env->SetByteArrayRegion(bArray,0,length,(jbyte*)p_fg.imageData);
env->ReleaseByteArrayElements(bgr,pixel, 0);
//env->ReleaseByteArrayElements(bArray,byteArrayElement, 0);
return  bArray;


第二部分代码:

package upc.se.intelmonitor;

import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.awt.image.IndexColorModel;
import java.awt.image.WritableRaster;

import java.io.IOException;

import bwimage.ShowImage;

import com.github.sarxos.webcam.Webcam;
import com.github.sarxos.webcam.WebcamDevice;
import com.github.sarxos.webcam.ds.ipcam.IpCamDeviceRegistry;
import com.github.sarxos.webcam.ds.ipcam.IpCamDriver;
import com.github.sarxos.webcam.ds.ipcam.IpCamMode;

public class GMM4BgS {
static {
Webcam.setDriver(new IpCamDriver());
}
static {
System.out.println(System.getProperty("java.library.path"));
System.loadLibrary("GMM4BgS");
}
public static native byte[] subtract(int w,int h,byte[]bgr);

public static void main(String args[]) throws IOException{
IpCamDeviceRegistry.register("ZModo", "http://admin:123456@192.168.1.20:80/videostream.cgi", IpCamMode.PUSH);
WebcamDevice dev=Webcam.getWebcams().get(0).getDevice();
dev.open();
BufferedImage img=dev.getImage();
byte[] bw = {(byte) 0, (byte) 0xff};
IndexColorModel blackAndWhite = new IndexColorModel(1, 2, bw, bw, bw);
BufferedImage fg_img = new BufferedImage(img.getWidth(),img.getHeight(),BufferedImage.TYPE_BYTE_BINARY,blackAndWhite);
WritableRaster raster = fg_img.getRaster();
ShowImage s=new ShowImage(fg_img);
int count=0;
while(count<3000){
img=dev.getImage();
int w=img.getWidth();
int h=img.getHeight();
byte[]pixels=((DataBufferByte)img.getRaster().getDataBuffer()).getData();
byte[]fg=GMM4BgS.subtract(w, h,pixels);
raster.setDataElements(0, 0, w, h,fg);
s.SetImage(fg_img);
s.draw();
count++;
}
}
}


这里使用了一个ipcamera,和一个ShowImage,去分别获得和显示图像序列。其中ShowImage代码如下:

package bwimage;

import java.awt.BorderLayout;
import java.awt.Dimension;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;

import javax.imageio.ImageIO;
import javax.swing.JComponent;
import javax.swing.JFrame;

public class ShowImage extends JComponent{
private static final long serialVersionUID = 1L;
static JFrame frame = new JFrame("frame");
BufferedImage b_img;

public ShowImage(BufferedImage img){
super();
this.setOpaque(false);
b_img=img;
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
frame.setLayout(new BorderLayout());
frame.getContentPane().add(this, BorderLayout.CENTER);
frame.setPreferredSize(new Dimension(640,480));
frame.pack();
frame.setVisible(true);
}

public void SetImage(BufferedImage img){
b_img=img;
}

@Override
protected void paintComponent(Graphics g) {
// TODO Auto-generated method stub
super.paintComponent(g);
Graphics2D g2=(Graphics2D)g;
g2.drawImage(b_img, 0, 0,null);
}

public void draw(){

repaint();
}

public static void main(String args[]) throws IOException{
BufferedImage img=ImageIO.read(new File("lena.jpg"));
ShowImage s=new ShowImage(img);
s.draw();
}

}


Split Mat and return to byte[] in Java

2014-04-09 11:16:32

/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
/* Header for class upc_se_intelmonitor_SplitImage */

#ifndef _Included_upc_se_intelmonitor_SplitImage
#define _Included_upc_se_intelmonitor_SplitImage
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class:     upc_se_intelmonitor_SplitImage
* Method:    split
* Signature: (III[B)[B
*/
JNIEXPORT jbyteArray JNICALL Java_upc_se_intelmonitor_SplitImage_split
(JNIEnv *env, jclass, jint w, jint h, jint whichpart, jbyteArray bgr)
{
IplImage*img=cvCreateImageHeader(cvSize(w,h),8,3);
jbyte*pixel=env->GetByteArrayElements(bgr,0);
cvSetData(img,pixel,3*w);
Mat org_img=Mat(img);

Mat m_part;
if(whichpart==1)m_part=org_img(Rect(0,0,w,h/3)).clone();
else if(whichpart==2)m_part=org_img(Rect(0,h/3,w,h/3)).clone();
else if(whichpart==3)m_part=org_img(Rect(0,(h/3)*2,w,h/3)).clone();
else cerr<<"whichpart must be 1/2/3";
IplImage ipl_part=IplImage(m_part);

long part_len=m_part.cols*m_part.rows*3;
jbyteArray new_partArray=env->NewByteArray((jsize)part_len);
jbyte*new_partElem=env->GetByteArrayElements(new_partArray,NULL);
memcpy(new_partElem,ipl_part.imageData,part_len);

env->ReleaseByteArrayElements(bgr,pixel, 0);
env->ReleaseByteArrayElements(new_partArray,new_partElem, 0);

return new_partArray;

}

#ifdef __cplusplus
}
#endif
#endif


Merge 多个 byte[] in Java to a Mat

#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
/* Header for class upc_se_intelmonitor_MergeImage */

#ifndef _Included_upc_se_intelmonitor_MergeImage
#define _Included_upc_se_intelmonitor_MergeImage
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class:     upc_se_intelmonitor_MergeImage
* Method:    merge
* Signature: (II[B[B[B)[B
*/
JNIEXPORT jbyteArray JNICALL Java_upc_se_intelmonitor_MergeImage_merge
(JNIEnv *env, jclass, jint w, jint num, jbyteArray part1, jbyteArray part2, jbyteArray part3)
{
IplImage*img1=cvCreateImageHeader(cvSize(w,num),8,3);
jbyte*pixel1=env->GetByteArrayElements(part1,0);
cvSetData(img1,pixel1,3*w);
Mat mat1=Mat(img1);

IplImage*img2=cvCreateImageHeader(cvSize(w,num),8,3);
jbyte*pixel2=env->GetByteArrayElements(part2,0);
cvSetData(img2,pixel2,3*w);
Mat mat2=Mat(img2);

IplImage*img3=cvCreateImageHeader(cvSize(w,num),8,3);
jbyte*pixel3=env->GetByteArrayElements(part3,0);
cvSetData(img3,pixel3,3*w);
Mat mat3=Mat(img3);

mat1.push_back(mat2);
mat1.push_back(mat3);
IplImage i_img=IplImage(mat1);

long len=w*num*3*3;
jbyteArray new_Array=env->NewByteArray((jsize)len);
jbyte*new_Elem=env->GetByteArrayElements(new_Array,NULL);
memcpy(new_Elem,i_img.imageData,len);

env->ReleaseByteArrayElements(part1,pixel1, 0);
env->ReleaseByteArrayElements(part2,pixel2, 0);
env->ReleaseByteArrayElements(part3,pixel3, 0);

env->ReleaseByteArrayElements(new_Array,new_Elem, 0);

return new_Array;
}

#ifdef __cplusplus
}
#endif
#endif
~
58,1          Bot
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: