您的位置:首页 > 其它

建立渲染管线

2015-09-19 22:34 288 查看
之前讲过光栅化,裁剪和纹理,这次将会把这些东西集合在一起建立一条渲染管线模拟opengl函数.

缓冲区

事实上要建立渲染管线最重要且最基础的步骤是建立颜色和深度缓冲区,颜色缓冲区至少需要一个,一个深度缓冲区,双缓冲需要两个颜色缓冲区.缓冲区的数据结构声明如下:
struct FrameBuffer {
unsigned char* colorBuffer;
int width,height;
};
struct DepthBuffer {
float* depthBuffer;
int width,height;
};


创建和回收函数如下:
void initFrameBuffer(FrameBuffer** pfb,int width,int height) {
*pfb=(FrameBuffer*)malloc(sizeof(FrameBuffer));
(*pfb)->width=width; (*pfb)->height=height;
(*pfb)->colorBuffer=new unsigned char[width*height*3];
memset((*pfb)->colorBuffer,0,sizeof(unsigned char)*width*height*3);
}
void releaseFrameBuffer(FrameBuffer** pfb) {
if(*pfb==NULL)
return;
delete[] (*pfb)->colorBuffer;
free(*pfb);
*pfb=NULL;
}

void initDepthBuffer(DepthBuffer** pdb,int width,int height) {
*pdb=(DepthBuffer*)malloc(sizeof(DepthBuffer));
(*pdb)->width=width; (*pdb)->height=height;
(*pdb)->depthBuffer=new float[width*height];
memset((*pdb)->depthBuffer,0,sizeof(float)*width*height);
}
void releaseDepthBuffer(DepthBuffer** pdb) {
if(*pdb==NULL)
return;
delete[] (*pdb)->depthBuffer;
free(*pdb);
*pdb=NULL;
}


创建单缓冲渲染环境:
void initDevice(FrameBuffer** pfb,DepthBuffer** pdb,int width,int height) {
initFrameBuffer(pfb,width,height);
initDepthBuffer(pdb,width,height);
}

void releaseDevice(FrameBuffer** pfb,DepthBuffer** pdb) {
releaseFrameBuffer(pfb);
releaseDepthBuffer(pdb);
}


创建双缓冲渲染环境:
void initDevice2Buf(FrameBuffer** pfb1,FrameBuffer** pfb2,DepthBuffer** pdb,int width,int height) {
initFrameBuffer(pfb1,width,height);
initFrameBuffer(pfb2,width,height);
initDepthBuffer(pdb,width,height);
frontBuffer=*pfb1;
backBuffer=*pfb2;
buffersReady=false;
}

void releaseDevice2Buf(FrameBuffer** pfb1,FrameBuffer** pfb2,DepthBuffer** pdb) {
frontBuffer=NULL;
backBuffer=NULL;
releaseFrameBuffer(pfb1);
releaseFrameBuffer(pfb2);
releaseDepthBuffer(pdb);
}


交换缓冲区:
void swapBuffer() {
if(frontBuffer==frameBuffer1) {
frontBuffer=frameBuffer2;
backBuffer=frameBuffer1;
} else {
frontBuffer=frameBuffer1;
backBuffer=frameBuffer2;
}

if(!buffersReady) {
buffersReady=true;
return;
}
}
双缓冲的作用是显示已经画完的前缓冲并且在后缓冲区渲染,后一帧交换前后缓冲区,把已经画完的后缓冲区显示到屏幕,然后清除前缓冲区并渲染.

清除缓冲区:
void clearScreen(FrameBuffer* fb,unsigned char red,unsigned char green,unsigned char blue) {
for(int i=0;i<fb->height;i++) {
for(int j=0;j<fb->width;j++) {
int index=(i*fb->width+j)*3;
fb->colorBuffer[index]=red;
fb->colorBuffer[index+1]=green;
fb->colorBuffer[index+2]=blue;
}
}
}
void clearScreenFast(FrameBuffer* fb,unsigned char color) {
int size=fb->width*fb->height*3;
memset(fb->colorBuffer,color*sizeof(unsigned char),size);
}
void clearDepth(DepthBuffer* db) {
for(int i=0;i<db->height;i++) {
for(int j=0;j<db->width;j++)
db->depthBuffer[i*db->width+j]=1.0;
}
}
每次渲染之前都要做清除缓冲区操作.

坐标变换:
void convertToScreen(int height,int& sx,int& sy) {
sy=height-1-sy;
}
windows采用的屏幕坐标系是y轴从上到下和opengl的坐标系正相反,所以要对y轴进行变换以便把opengl屏幕坐标映射到windows屏幕.

读写缓冲区:
void drawPixel(FrameBuffer* fb,int x,int y,unsigned char r,unsigned char g,unsigned char b) {
convertToScreen(fb->height,x,y);
int index=(y*fb->width+x)*3;
fb->colorBuffer[index]=r;
fb->colorBuffer[index+1]=g;
fb->colorBuffer[index+2]=b;
}
void writeDepth(DepthBuffer* db,int x,int y,float depth) {
convertToScreen(db->height,x,y);
db->depthBuffer[y*db->width+x]=depth;
}
float readDepth(DepthBuffer* db,int x,int y) {
convertToScreen(db->height,x,y);
return db->depthBuffer[y*db->width+x];
}
读写缓冲区用于颜色写入,深度写入和比较深度.

颜色缩放:
void scaleColor(float red,float green,float blue,
unsigned char& iRed,unsigned char& iGreen,unsigned char& iBlue) {
red*=255.0;
green*=255.0;
blue*=255.0;
iRed=red>255?255:red;
iGreen=green>255?255:green;
iBlue=blue>255?255:blue;
}
shader输出的颜色是float范围从0.0到1.0,颜色缓冲区的格式是unsigned char类型范围从0到255,所以要把各个颜色分量乘以255转为unsigned char然后写入颜色缓冲区.

顶点 片段和面

还需要定义顶点,片段和面的数据结构和函数,这样就能方便地将数据输入管线了.
顶点定义和操作:
struct Vertex {
float x,y,z,w;
float nx,ny,nz;
float s,t;
Vertex(): x(0),y(0),z(0),w(1),
nx(0),ny(0),nz(0),
s(0),t(0) {}
Vertex(float vx,float vy,float vz,
float vnx,float vny,float vnz,
float vs,float vt):
x(vx),y(vy),z(vz),w(1),
nx(vnx),ny(vny),nz(vnz),
s(vs),t(vt) {}
};

inline void vertexCopy(Vertex* d,Vertex* s) {
d->x=s->x;
d->y=s->y;
d->z=s->z;
d->w=s->w;
d->nx=s->nx;
d->ny=s->ny;
d->nz=s->nz;
d->s=s->s;
d->t=s->t;
}

struct VertexOut {
float x,y,z,w;
float wx,wy,wz,ww;
float vx,vy,vz,vw;
float nx,ny,nz;
float s,t;
VertexOut(): x(0),y(0),z(0),w(1),
wx(0),wy(0),wz(0),ww(1),
vx(0),vy(0),vz(0),vw(1),
nx(0),ny(0),nz(0),
s(0),t(0) {}
VertexOut(float vx,float vy,float vz,
float vwx,float vwy,float vwz,
float vvx,float vvy,float vvz,
float vnx,float vny,float vnz,
float vs,float vt):
x(vx),y(vy),z(vz),w(1),
wx(vwx),wy(vwy),wz(vwz),ww(1),
vx(vvx),vy(vvy),vz(vvz),vw(1),
nx(vnx),ny(vny),nz(vnz),
s(vs),t(vt) {}
};

inline void vertexOutCopy(VertexOut* d,VertexOut* s) {
d->x=s->x;
d->y=s->y;
d->z=s->z;
d->w=s->w;
d->wx=s->wx;
d->wy=s->wy;
d->wz=s->wz;
d->ww=s->ww;
d->vx=s->vx;
d->vy=s->vy;
d->vz=s->vz;
d->vw=s->vw;
d->nx=s->nx;
d->ny=s->ny;
d->nz=s->nz;
d->s=s->s;
d->t=s->t;
}
事实上这边的顶点属性类似glsl shader里的attribute.

片段定义和操作:
struct Fragment {
float ndcX,ndcY,ndcZ;
float wx,wy,wz,ww;
float nx,ny,nz;
float s,t;
Fragment(): ndcX(0),ndcY(0),ndcZ(1),
wx(0),wy(0),wz(0),ww(1),
nx(0),ny(0),nz(0),
s(0),t(0) {}
};

struct FragmentOut {
float r,g,b,a;
FragmentOut():r(0),g(0),b(0),a(1) {}
};
带out后缀的是shader输出的数据结构,没有out的则是输入shader的数据结构.

面的定义和操作:
class Face {
public:
Vertex modelA,modelB,modelC;
VertexOut clipA,clipB,clipC;
VECTOR2D ndcA,ndcB,ndcC;
MATRIX4X4 clipMatrixInv;

Face(Vertex ma,Vertex mb,Vertex mc);
Face();
~Face();
void copy2Face(Vertex a,Vertex b,Vertex c);
void copy2FaceOut(VertexOut a,VertexOut b,VertexOut c);
void calculateNDCVertex();
void calculateClipMatrixInv();
};
之前提到过clipMatrixInv,这个值将会在面光栅化的函数中被用作求取片段在面上的重心坐标.

#include "face.h"

Face::Face(Vertex ma,Vertex mb,Vertex mc) {
modelA=ma;
modelB=mb;
modelC=mc;

clipMatrixInv.LoadIdentity();
}

Face::Face() {
clipMatrixInv.LoadIdentity();
}

Face::~Face() {

}

void Face::calculateNDCVertex() {
float invClipAW=1.0/clipA.w;
float invClipBW=1.0/clipB.w;
float invClipCW=1.0/clipC.w;
ndcA.x=clipA.x*invClipAW;
ndcA.y=clipA.y*invClipAW;
ndcB.x=clipB.x*invClipBW;
ndcB.y=clipB.y*invClipBW;
ndcC.x=clipC.x*invClipCW;
ndcC.y=clipC.y*invClipCW;
}

void Face::calculateClipMatrixInv() {
clipMatrixInv.LoadIdentity();
clipMatrixInv.entries[0]=clipA.x;
clipMatrixInv.entries[1]=clipA.y;
clipMatrixInv.entries[2]=clipA.w;
clipMatrixInv.entries[4]=clipB.x;
clipMatrixInv.entries[5]=clipB.y;
clipMatrixInv.entries[6]=clipB.w;
clipMatrixInv.entries[8]=clipC.x;
clipMatrixInv.entries[9]=clipC.y;
clipMatrixInv.entries[10]=clipC.w;
clipMatrixInv.Invert();
}

void Face::copy2Face(Vertex a,Vertex b,Vertex c) {
vertexCopy(&modelA,&a);
vertexCopy(&modelB,&b);
vertexCopy(&modelC,&c);
}

void Face::copy2FaceOut(VertexOut a,VertexOut b,VertexOut c) {
vertexOutCopy(&clipA,&a);
vertexOutCopy(&clipB,&b);
vertexOutCopy(&clipC,&c);
}


渲染管线

这一部分是重点,要理解透彻.
视口变换和逆视口变换:
void viewPortTransform(float ndcX,float ndcY,float width,float height,
float& screenX,float& screenY) {
float biasX=(ndcX+1.0)*0.5;
float biasY=(ndcY+1.0)*0.5;
screenX=biasX*(width-1.0);
screenY=biasY*(height-1.0);
}
void invViewPortTransform(int screenX,int screenY,float width,float height,
float& ndcX,float& ndcY) {
float scaleX=(float)screenX/(width-1.0);
float scaleY=(float)screenY/(height-1.0);
ndcX=scaleX*2.0-1.0;
ndcY=scaleY*2.0-1.0;
}


背面剔除:
bool cullFace(Face* face,int flag) {
VECTOR3D faceNormal(face->clipA.nx,face->clipA.ny,face->clipA.nz);
VECTOR3D eyeVec(eyeX-face->clipA.wx,eyeY-face->clipA.wy,eyeZ-face->clipA.wz);
if(flag==CULL_NONE)
return false;
if(flag==CULL_BACK) {
if(eyeVec.DotProduct(faceNormal)<=0)
return true;
return false;
} else if(flag==CULL_FRONT) {
if(eyeVec.DotProduct(faceNormal)>=0)
return true;
return false;
}
return false;
}
这个方法是在世界空间内完成的,把视点到面上顶点组成的向量和法线进行点乘判断视点在面前还是面后.

渲染一个面:
void drawFace(FrameBuffer* fb,DepthBuffer* db,VertexShader vs,FragmentShader fs,int cullFlag,Face* face) {
vs(face->modelA,face->clipA);
vs(face->modelB,face->clipB);
vs(face->modelC,face->clipC);
if(cullFace(face,cullFlag))
return;
int clipFlag=checkFace(face);
if(clipFlag!=111) {
if(clipFlag==000)
return;
fixFaces(face,clipFlag);
if(!cullFace(nFace1,cullFlag)) {
nFace1->calculateClipMatrixInv();
nFace1->calculateNDCVertex();
rasterize2(fb,db,fs,nFace1);
}
if(clipFlag==011||clipFlag==101||clipFlag==110) {
if(!cullFace(nFace2,cullFlag)) {
nFace2->calculateClipMatrixInv();
nFace2->calculateNDCVertex();
rasterize2(fb,db,fs,nFace2);
}
}
} else if(clipFlag==111) {
face->calculateClipMatrixInv();
face->calculateNDCVertex();
rasterize2(fb,db,fs,face);
}
}
这边的函数除了VertexShader和FragmentShader以外都在之前有实现过.

批量渲染面:
void drawFaces(FrameBuffer* fb,DepthBuffer* db,VertexShader vs,FragmentShader fs,int cullFlag,Vertex* buffer,int count) {
for(int i=0;i<count;i++) {
Face face(buffer[i*3],buffer[i*3+1],buffer[i*3+2]);
drawFace(fb,db,vs,fs,cullFlag,&face);
}
}
批量提交顶点组织成三角形面,然后批量渲染.

模拟shader

定义函数类型:
typedef void (*VertexShader)(Vertex input,VertexOut& output);
typedef void (*FragmentShader)(Fragment input,FragmentOut& output);
定义shader函数的类型让之后的操作变得更容易.

vertex shader:
void vertexShader(Vertex input,VertexOut& output) {
MATRIX4X4 mv=viewMatrix*modelMatrix;
MATRIX4X4 mvp=projectMatrix*mv;
VECTOR4D modelVert(input.x,input.y,input.z,input.w);
VECTOR4D worldVert=modelMatrix*modelVert;
VECTOR4D viewVert=mv*modelVert;
VECTOR4D clipVert=mvp*modelVert;

VECTOR4D modelNormal(input.nx,input.ny,input.nz,0.0);
VECTOR4D worldNormal=modelMatrix*modelNormal;

output.x=clipVert.x;
output.y=clipVert.y;
output.z=clipVert.z;
output.w=clipVert.w;

output.wx=worldVert.x;
output.wy=worldVert.y;
output.wz=worldVert.z;
output.ww=worldVert.w;

output.vx=viewVert.x;
output.vy=viewVert.y;
output.vz=viewVert.z;
output.vw=viewVert.w;

output.nx=worldNormal.x;
output.ny=worldNormal.y;
output.nz=worldNormal.z;

output.s=input.s;
output.t=input.t;
}
和glsl的vertex shader并没有太大差别.

fragment shader:
void simpleFragShader(Fragment input,FragmentOut& output) {
VECTOR3D worldNormal(input.nx,input.ny,input.nz);
worldNormal.Normalize();
VECTOR3D worldLightDir(lightDir.x,lightDir.y,lightDir.z);
worldLightDir.Normalize();
float nDotL=worldLightDir.DotProduct(worldNormal);
nDotL=max(nDotL,0.0);

VECTOR4D lightColor=mul(amb,ambMat)+mul(diff,diffMat)*nDotL;
VECTOR4D texColor(1,1,0,1);
texColor*=1.2;

output.r=texColor.x*lightColor.x;
output.g=texColor.y*lightColor.y;
output.b=texColor.z*lightColor.z;
output.a=texColor.w*lightColor.w;
}
和glsl的fragment shader亦没有太大差别.

模型数据组织和渲染调用

数据声明:
Vertex* verts;

void render(FrameBuffer* fb,DepthBuffer* db,VertexShader vs,FragmentShader fs,int cullFlag);


函数示例:
void Square::initVerts() {
verts=new Vertex[faceNum*3];

verts[0]=Vertex(
-1,0,-1,
0,1,0,
0,1);
verts[1]=Vertex(
-1,0,1,
0,1,0,
0,0);
verts[2]=Vertex(
1,0,1,
0,1,0,
1,0);
verts[3]=Vertex(
1,0,1,
0,1,0,
1,0);
verts[4]=Vertex(
1,0,-1,
0,1,0,
1,1);
verts[5]=Vertex(
-1,0,-1,
0,1,0,
0,1);
}

void Square::render(FrameBuffer* fb,DepthBuffer* db,VertexShader vs,FragmentShader fs,int cullFlag) {
drawFaces(fb,db,vs,fs,cullFlag,verts,faceNum);
}


渲染调用:
void renderSquare() {
modelMatrix.LoadIdentity();
MATRIX4X4 transMat=translate(0,0,0);
MATRIX4X4 scaleMat=scale(50);
modelMatrix=transMat*scaleMat;
currTexture=texGround->sampler;
square->render(frontBuffer,depthBuffer,vertexShader,fragmentShader,CULL_BACK);
}
用法和opengl差不多.

类opengl渲染框架

Sight* sight=NULL;
void buildCamera() {
MATRIX4X4 trans,rotX,rotY;
rotX=rotateX(sight->yrot);
rotY=rotateY(sight->xrot);
trans=translate(sight->sx,sight->sy,sight->sz);
viewMatrix=rotX*rotY*trans;
}

void draw() {
clearScreen(frontBuffer,128,178,204);
//	clearScreenFast(frontBuffer,200);
clearDepth(depthBuffer);

buildCamera();
modelMatrix.LoadIdentity();

renderShadowMap(renderShadow);

renderCube();
renderSquare();
renderSphere();

//	flush(frontBuffer);
swapBuffer();
}

void buildProjectMatrix(int w,int h) {
clipNear=1;
float fAspect=(float)w/(float)h;
projectMatrix=perspective(60.0,fAspect,clipNear,100.0);
}

void initTextures() {
texWood=new Texture("../texture/cube24.bmp");
texGround=new Texture("../texture/ground24.bmp");
if(!texWood->sampler&&!texGround->sampler) {
delete texWood;
delete texGround;
texWood=new Texture("texture/cube24.bmp");
texGround=new Texture("texture/ground24.bmp");
}
}
void releaseTextures() {
delete texWood;
delete texGround;
}

void init() {
initFixFace();
initUniforms();
initTextures();
initShadow(256,256);
initCube();
initSquare();
initSphere();

eyeX=-2; eyeY=2; eyeZ=-4;
sight=new Sight(-eyeX,-eyeY,-eyeZ);
sight->xrot=180;
sight->yrot=-10;
initKeys();
}

void release() {
releaseKeys();
delete sight;

releaseSphere();
releaseSquare();
releaseCube();
releaseShadow();
releaseTextures();
releaseFixFace();
releaseDevice2Buf(&frameBuffer1,&frameBuffer2,&depthBuffer);
}

void resize(int width,int height) {
releaseDevice2Buf(&frameBuffer1,&frameBuffer2,&depthBuffer);
initDevice2Buf(&frameBuffer1,&frameBuffer2,&depthBuffer,width,height);
buildProjectMatrix(width,height);
}
整体结构和函数名字类似于opengl,如果熟悉opengl和directx应该不难明白.

以上就是所有关于我的类opengl渲染库的实现细节,这些都是平台无关的内容,所有的数据格式都是用的c++基本数据类型,具体的windows渲染环境将会在下一篇blog讲解.
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: