Merging r6405 through r6424 from trunk to ogl-es branch

git-svn-id: svn://svn.code.sf.net/p/irrlicht/code/branches/ogl-es@6425 dfc29bdd-3216-0410-991c-e03cc46cb475
This commit is contained in:
cutealien
2022-09-22 21:55:03 +00:00
parent ddc14ea87e
commit 07f17647d2
46 changed files with 589 additions and 463 deletions

View File

@ -25,6 +25,7 @@ all: all_linux
# target specific settings
all_linux all_win32 static_win32: LDFLAGS += -L$(IrrlichtHome)/lib/$(SYSTEM) -lIrrlicht
all_linux: LDFLAGS += -L/usr/X11R6/lib$(LIBSELECT) -lGL -lEGL -lGLESv1_CM -lGLESv2 -lXxf86vm -lXext -lX11 -lXcursor
#all_linux: LDFLAGS += `sdl-config --libs`
all_linux clean_linux: SYSTEM=Linux
all_win32 clean_win32 static_win32: SYSTEM=Win32-gcc
all_win32 clean_win32 static_win32: SUF=.exe

View File

@ -42,7 +42,7 @@ public:
/*
Always return false by default. If you return true you tell the engine
that you handled this event completely and the Irrlicht should not
process it any further. So for example if you return true for all
process it any further. So for example if you return true for all
EET_KEY_INPUT_EVENT events then Irrlicht would not pass on key-events
to it's GUI system.
*/
@ -54,7 +54,7 @@ public:
{
return KeyIsDown[keyCode];
}
MyEventReceiver()
{
for (u32 i=0; i<KEY_KEY_CODES_COUNT; ++i)
@ -71,7 +71,7 @@ private:
The event receiver for keeping the pressed keys is ready, the actual responses
will be made inside the render loop, right before drawing the scene. So lets
create an irr::IrrlichtDevice and the scene node we want to move. We also
create some additional scene nodes to show different possibilities to move and
create some additional scene nodes to show different possibilities to move and
animate scene nodes.
*/
int main()
@ -82,7 +82,7 @@ int main()
return 1;
/*
Create the event receiver. Take care that the pointer to it has to
Create the event receiver. Take care that the pointer to it has to
stay valid as long as the IrrlichtDevice uses it. Event receivers are not
reference counted.
*/
@ -139,7 +139,7 @@ int main()
}
/*
The last scene node we add is a b3d model of a walking ninja. Is shows the
The last scene node we add is a b3d model of a walking ninja. Is shows the
use of a 'fly straight' animator to move the node between two points.
*/
scene::IAnimatedMeshSceneNode* ninjaNode =
@ -196,7 +196,7 @@ int main()
core::position2d<s32>(10,20));
/*
Lets draw the scene and also write the current frames per second and the
Lets draw the scene and also write the current frames per second and the
name of the driver to the caption of the window.
*/
int lastFPS = -1;
@ -259,7 +259,7 @@ int main()
In the end, delete the Irrlicht device.
*/
device->drop();
return 0;
}

View File

@ -25,6 +25,7 @@ all: all_linux
# target specific settings
all_linux all_win32 static_win32: LDFLAGS += -L$(IrrlichtHome)/lib/$(SYSTEM) -lIrrlicht
all_linux: LDFLAGS += -L/usr/X11R6/lib$(LIBSELECT) -lGL -lEGL -lGLESv1_CM -lGLESv2 -lXxf86vm -lXext -lX11 -lXcursor
#all_linux: LDFLAGS += `sdl-config --libs`
all_linux clean_linux: SYSTEM=Linux
all_win32 clean_win32 static_win32: SYSTEM=Win32-gcc
all_win32 clean_win32 static_win32: SUF=.exe

View File

@ -35,8 +35,6 @@ of the objects and camera.
*/
#ifdef _MSC_VER
// We'll also define this to stop MSVC complaining about sprintf().
#define _CRT_SECURE_NO_WARNINGS
#pragma comment(lib, "Irrlicht.lib")
#endif
@ -150,6 +148,7 @@ int main()
*/
smgr->addCameraSceneNode();
int lastFPS = -1;
u32 lastPrimitives = 0;
u32 timeNow = device->getTimer()->getTime();
bool nodeVisible=true;
@ -190,16 +189,20 @@ int main()
driver->endScene();
int fps = driver->getFPS();
u32 numPrimitives = driver->getPrimitiveCountDrawn();
if (lastFPS != fps)
if (lastFPS != fps || lastPrimitives != numPrimitives)
{
core::stringw tmp(L"OcclusionQuery Example [");
tmp += driver->getName();
tmp += L"] fps: ";
tmp += fps;
tmp += L" polygons: ";
tmp += numPrimitives;
device->setWindowCaption(tmp.c_str());
lastFPS = fps;
lastPrimitives = numPrimitives;
}
}

View File

@ -1,16 +1,16 @@
/** Example 027 Post Processing
This tutorial shows how to implement post processing for D3D9 and OpenGL with
This tutorial shows how to implement post processing for D3D9 and OpenGL with
the engine. In order to do post processing, scene objects are firstly rendered
to render target. With the help of screen quad, the render target texture
is then drawn on the quad with shader-defined effects applied.
This tutorial shows how to create a screen quad. It also shows how to create a
This tutorial shows how to create a screen quad. It also shows how to create a
render target texture and associate it with the quad. Effects are defined as
shaders which are applied during rendering the quad with the render target
shaders which are applied during rendering the quad with the render target
texture attached to it.
A simple color inverse example is presented in this tutorial. The effect is
A simple color inverse example is presented in this tutorial. The effect is
written in HLSL and GLSL.
@author Boshen Guan
@ -29,46 +29,42 @@ using namespace irr;
/*
We write a class derived from IShaderConstantSetCallBack class and implement
OnSetConstants callback interface. In this callback, we will set constants
OnSetConstants callback interface. In this callback, we will set constants
used by the shader.
In this example, our HLSL shader needs texture size as input in its vertex
In this example, our HLSL shader needs texture size as input in its vertex
shader. Therefore, we set texture size in OnSetConstants callback using
setVertexShaderConstant function.
*/
IrrlichtDevice* device = 0;
video::ITexture* rt = 0;
class QuadShaderCallBack : public video::IShaderConstantSetCallBack
{
public:
QuadShaderCallBack() : FirstUpdate(true), TextureSizeID(-1), TextureSamplerID(-1)
{ }
virtual void OnSetConstants(video::IMaterialRendererServices* services,
s32 userData)
{
core::dimension2d<u32> size = rt->getSize();
// get texture size array
f32 textureSize[] =
{
(f32)size.Width, (f32)size.Height
};
virtual void OnSetConstants(video::IMaterialRendererServices* services,
s32 userData)
{
if ( FirstUpdate )
{
FirstUpdate = false;
TextureSizeID = services->getVertexShaderConstantID("TextureSize");
TextureSamplerID = services->getPixelShaderConstantID("TextureSampler");
}
// set texture size to vertex shader
services->setVertexShaderConstant(TextureSizeID, reinterpret_cast<f32*>(textureSize), 2);
// set texture for an OpenGL driver
s32 textureLayer = 0;
services->setPixelShaderConstant(TextureSamplerID, &textureLayer, 1);
}
// get texture size array (for our simple example HLSL just needs that to calculate pixel centers)
core::dimension2d<u32> size = services->getVideoDriver()->getCurrentRenderTargetSize();
f32 textureSize[2];
textureSize[0] = (f32)size.Width;
textureSize[1] = (f32)size.Height;
// set texture size to vertex shader
services->setVertexShaderConstant(TextureSizeID, textureSize, 2);
// set texture for an OpenGL driver
s32 textureLayer = 0;
services->setPixelShaderConstant(TextureSamplerID, &textureLayer, 1);
}
private:
bool FirstUpdate;
@ -80,102 +76,91 @@ class ScreenQuad : public IReferenceCounted
{
public:
ScreenQuad(video::IVideoDriver* driver)
: Driver(driver)
{
// --------------------------------> u
// |[1](-1, 1)----------[2](1, 1)
// | | ( 0, 0) / | (1, 0)
// | | / |
// | | / |
// | | / |
// | | / |
// | | / |
// | | / |
// | | / |
// | | / |
// |[0](-1, -1)---------[3](1, -1)
// | ( 0, 1) (1, 1)
// V
// v
ScreenQuad(video::IVideoDriver* driver)
: Driver(driver)
{
// --------------------------------> u
// |[1](-1, 1)----------[2](1, 1)
// | | ( 0, 0) / | (1, 0)
// | | / |
// | | / |
// | | / |
// | | / |
// | | / |
// | | / |
// | | / |
// | | / |
// |[0](-1, -1)---------[3](1, -1)
// | ( 0, 1) (1, 1)
// V
// v
/*
A screen quad is composed of two adjacent triangles with 4 vertices.
Vertex [0], [1] and [2] create the first triangle and Vertex [0],
[2] and [3] create the second one. To map texture on the quad, UV
coordinates are assigned to the vertices. The origin of UV coordinate
locates on the top-left corner. And the value of UVs range from 0 to 1.
*/
/*
A screen quad is composed of two adjacent triangles with 4 vertices.
Vertex [0], [1] and [2] create the first triangle and Vertex [0],
[2] and [3] create the second one. To map texture on the quad, UV
coordinates are assigned to the vertices. The origin of UV coordinate
locates on the top-left corner. And the value of UVs range from 0 to 1.
*/
// define vertices array
// define vertices array
Vertices[0] = irr::video::S3DVertex(-1.0f, -1.0f, 0.0f, 1, 1, 0, irr::video::SColor(0,255,255,255), 0.0f, 1.0f);
Vertices[1] = irr::video::S3DVertex(-1.0f, 1.0f, 0.0f, 1, 1, 0, irr::video::SColor(0,255,255,255), 0.0f, 0.0f);
Vertices[2] = irr::video::S3DVertex( 1.0f, 1.0f, 0.0f, 1, 1, 0, irr::video::SColor(0,255,255,255), 1.0f, 0.0f);
Vertices[3] = irr::video::S3DVertex( 1.0f, -1.0f, 0.0f, 1, 1, 0, irr::video::SColor(0,255,255,255), 1.0f, 1.0f);
Vertices[0] = irr::video::S3DVertex(-1.0f, -1.0f, 0.0f, 1, 1, 0, irr::video::SColor(0,255,255,255), 0.0f, 1.0f);
Vertices[1] = irr::video::S3DVertex(-1.0f, 1.0f, 0.0f, 1, 1, 0, irr::video::SColor(0,255,255,255), 0.0f, 0.0f);
Vertices[2] = irr::video::S3DVertex( 1.0f, 1.0f, 0.0f, 1, 1, 0, irr::video::SColor(0,255,255,255), 1.0f, 0.0f);
Vertices[3] = irr::video::S3DVertex( 1.0f, -1.0f, 0.0f, 1, 1, 0, irr::video::SColor(0,255,255,255), 1.0f, 1.0f);
// define indices for triangles
// define indices for triangles
Indices[0] = 0;
Indices[1] = 1;
Indices[2] = 2;
Indices[3] = 0;
Indices[4] = 2;
Indices[5] = 3;
Indices[0] = 0;
Indices[1] = 1;
Indices[2] = 2;
Indices[3] = 0;
Indices[4] = 2;
Indices[5] = 3;
// turn off lighting as default
Material.setFlag(video::EMF_LIGHTING, false);
// turn off lighting as default
Material.setFlag(video::EMF_LIGHTING, false);
// set texture warp settings to clamp to edge pixel
for (u32 i = 0; i < video::MATERIAL_MAX_TEXTURES; i++)
{
Material.TextureLayer[i].TextureWrapU = video::ETC_CLAMP_TO_EDGE;
Material.TextureLayer[i].TextureWrapV = video::ETC_CLAMP_TO_EDGE;
}
}
// set texture warp settings to clamp to edge pixel
for (u32 i = 0; i < video::MATERIAL_MAX_TEXTURES; i++)
{
Material.TextureLayer[i].TextureWrapU = video::ETC_CLAMP_TO_EDGE;
Material.TextureLayer[i].TextureWrapV = video::ETC_CLAMP_TO_EDGE;
}
}
virtual ~ScreenQuad() {}
virtual ~ScreenQuad() {}
//! render the screen quad
virtual void render()
{
// set the material of screen quad
Driver->setMaterial(Material);
//! render the screen quad
virtual void render()
{
// set the material of screen quad
Driver->setMaterial(Material);
// set matrices to fit the quad to full viewport
Driver->setTransform(video::ETS_WORLD, core::IdentityMatrix);
Driver->setTransform(video::ETS_VIEW, core::IdentityMatrix);
Driver->setTransform(video::ETS_PROJECTION, core::IdentityMatrix);
// set world matrix to fit the quad to full viewport
Driver->setTransform(video::ETS_WORLD, core::IdentityMatrix);
// view & projection not used in shader, but matter to burnings driver
Driver->setTransform(video::ETS_VIEW, core::IdentityMatrix);
Driver->setTransform(video::ETS_PROJECTION, core::IdentityMatrix);
// draw screen quad
Driver->drawVertexPrimitiveList(Vertices, 4, Indices, 2);
}
// draw screen quad
Driver->drawVertexPrimitiveList(Vertices, 4, Indices, 2);
}
//! sets a flag of material to a new value
virtual void setMaterialFlag(video::E_MATERIAL_FLAG flag, bool newvalue)
{
Material.setFlag(flag, newvalue);
}
//! sets the texture of the specified layer in material to the new texture.
void setMaterialTexture(u32 textureLayer, video::ITexture* texture)
{
Material.setTexture(textureLayer, texture);
}
//! sets the material type to a new material type.
virtual void setMaterialType(video::E_MATERIAL_TYPE newType)
{
Material.MaterialType = newType;
}
//! Access the material
virtual video::SMaterial& getMaterial()
{
return Material;
}
private:
video::IVideoDriver *Driver;
video::S3DVertex Vertices[4];
u16 Indices[6];
video::SMaterial Material;
video::IVideoDriver *Driver;
video::S3DVertex Vertices[4];
u16 Indices[6];
video::SMaterial Material;
};
/*
@ -184,233 +169,237 @@ according to the driver type.
*/
int main()
{
// ask user for driver
video::E_DRIVER_TYPE driverType=driverChoiceConsole();
if (driverType==video::EDT_COUNT)
return 1;
// ask user for driver
video::E_DRIVER_TYPE driverType=driverChoiceConsole();
if (driverType==video::EDT_COUNT)
return 1;
// create device
device = createDevice(driverType, core::dimension2d<u32>(640, 480));
// create device
IrrlichtDevice* device = createDevice(driverType, core::dimension2d<u32>(640, 480));
if (device == 0)
return 1; // could not create selected driver.
if (device == 0)
return 1; // could not create selected driver.
video::IVideoDriver* driver = device->getVideoDriver();
scene::ISceneManager* smgr = device->getSceneManager();
video::IVideoDriver* driver = device->getVideoDriver();
scene::ISceneManager* smgr = device->getSceneManager();
/*
In this example, high level post processing shaders are loaded for both
Direct3D and OpenGL drivers.
File pp_d3d9.hlsl is for Direct3D 9, and pp_opengl.frag/pp_opengl.vert
are for OpenGL.
*/
/*
In this example, high level post processing shaders are loaded for both
Direct3D and OpenGL drivers.
File pp_d3d9.hlsl is for Direct3D 9, and pp_opengl.frag/pp_opengl.vert
are for OpenGL.
*/
const io::path mediaPath = getExampleMediaPath();
io::path vsFileName; // filename for the vertex shader
io::path psFileName; // filename for the pixel shader
io::path vsFileName; // filename for the vertex shader
io::path psFileName; // filename for the pixel shader
switch(driverType)
{
case video::EDT_DIRECT3D9:
psFileName = mediaPath + "pp_d3d9.hlsl";
vsFileName = psFileName; // both shaders are in the same file
break;
switch(driverType)
{
case video::EDT_DIRECT3D9:
psFileName = mediaPath + "pp_d3d9.hlsl";
vsFileName = psFileName; // both shaders are in the same file
break;
case video::EDT_OPENGL:
case video::EDT_BURNINGSVIDEO:
psFileName = mediaPath + "pp_opengl.frag";
vsFileName = mediaPath + "pp_opengl.vert";
break;
}
case video::EDT_OPENGL:
case video::EDT_BURNINGSVIDEO:
psFileName = mediaPath + "pp_opengl.frag";
vsFileName = mediaPath + "pp_opengl.vert";
break;
}
/*
Check for hardware capability of executing the corresponding shaders
on selected renderer. This is not necessary though.
*/
/*
Check for hardware capability of executing the corresponding shaders
on selected renderer. This is not necessary though.
*/
if (!driver->queryFeature(video::EVDF_PIXEL_SHADER_1_1) &&
!driver->queryFeature(video::EVDF_ARB_FRAGMENT_PROGRAM_1))
{
device->getLogger()->log("WARNING: Pixel shaders disabled "\
"because of missing driver/hardware support.");
psFileName = "";
}
if (!driver->queryFeature(video::EVDF_PIXEL_SHADER_1_1) &&
!driver->queryFeature(video::EVDF_ARB_FRAGMENT_PROGRAM_1))
{
device->getLogger()->log("WARNING: Pixel shaders disabled "\
"because of missing driver/hardware support.");
psFileName = "";
}
if (!driver->queryFeature(video::EVDF_VERTEX_SHADER_1_1) &&
!driver->queryFeature(video::EVDF_ARB_VERTEX_PROGRAM_1))
{
device->getLogger()->log("WARNING: Vertex shaders disabled "\
"because of missing driver/hardware support.");
vsFileName = "";
}
if (!driver->queryFeature(video::EVDF_VERTEX_SHADER_1_1) &&
!driver->queryFeature(video::EVDF_ARB_VERTEX_PROGRAM_1))
{
device->getLogger()->log("WARNING: Vertex shaders disabled "\
"because of missing driver/hardware support.");
vsFileName = "";
}
/*
An animated mesh is loaded to be displayed. As in most examples,
we'll take the fairy md2 model.
*/
/*
An animated mesh is loaded to be displayed. As in most examples,
we'll take the fairy md2 model.
*/
// load and display animated fairy mesh
// load and display animated fairy mesh
scene::IAnimatedMeshSceneNode* fairy = smgr->addAnimatedMeshSceneNode(
smgr->getMesh(mediaPath + "faerie.md2"));
scene::IAnimatedMeshSceneNode* fairy = smgr->addAnimatedMeshSceneNode(
smgr->getMesh(mediaPath + "faerie.md2"));
if (fairy)
{
fairy->setMaterialTexture(0,
driver->getTexture(mediaPath + "faerie2.bmp")); // set diffuse texture
fairy->setMaterialFlag(video::EMF_LIGHTING, false); // disable dynamic lighting
fairy->setPosition(core::vector3df(-10,0,-100));
fairy->setMD2Animation ( scene::EMAT_STAND );
}
if (fairy)
{
fairy->setMaterialTexture(0,
driver->getTexture(mediaPath + "faerie2.bmp")); // set diffuse texture
fairy->setMaterialFlag(video::EMF_LIGHTING, false); // disable dynamic lighting
fairy->setPosition(core::vector3df(-10,0,-100));
fairy->setMD2Animation ( scene::EMAT_STAND );
}
// add scene camera
smgr->addCameraSceneNode(0, core::vector3df(10,10,-80),
core::vector3df(-10,10,-100));
// add scene camera
smgr->addCameraSceneNode(0, core::vector3df(10,10,-80),
core::vector3df(-10,10,-100));
/*
We create a render target texture (RTT) with the same size as frame buffer.
Instead of rendering the scene directly to the frame buffer, we firstly
render it to this RTT. Post processing is then applied based on this RTT.
RTT size needs not to be the same with frame buffer though. However in this
example, we expect the result of rendering to RTT to be consistent with the
result of rendering directly to the frame buffer. Therefore, the size of
RTT keeps the same with frame buffer.
*/
/*
We create a render target texture (RTT) with the same size as frame buffer.
Instead of rendering the scene directly to the frame buffer, we firstly
render it to this RTT. Post processing is then applied based on this RTT.
RTT size needs not to be the same with frame buffer though. However in this
example, we expect the result of rendering to RTT to be consistent with the
result of rendering directly to the frame buffer. Therefore, the size of
RTT keeps the same with frame buffer.
*/
// create render target
// create render target
video::ITexture* rt = 0;
if (driver->queryFeature(video::EVDF_RENDER_TO_TARGET))
{
rt = driver->addRenderTargetTexture(core::dimension2d<u32>(640, 480), "RTT1");
}
else
{
device->getLogger()->log("Your hardware or this renderer is not able to use the "\
"render to texture feature. RTT Disabled.");
}
if (driver->queryFeature(video::EVDF_RENDER_TO_TARGET))
{
rt = driver->addRenderTargetTexture(core::dimension2d<u32>(640, 480), "RTT1");
}
else
{
device->getLogger()->log("Your hardware or this renderer is not able to use the "\
"render to texture feature. RTT Disabled.");
}
/*
Post processing is achieved by rendering a screen quad with this RTT (with
previously rendered result) as a texture on the quad. A screen quad is
geometry of flat plane composed of two adjacent triangles covering the
entire area of viewport. In this pass of rendering, RTT works just like
a normal texture and is drawn on the quad during rendering. We can then
take control of this rendering process by applying various shader-defined
materials to the quad. In other words, we can achieve different effect by
writing different shaders.
This process is called post processing because it normally does not rely
on scene geometry. The inputs of this process are just textures, or in
other words, just images. With the help of screen quad, we can draw these
images on the screen with different effects. For example, we can adjust
contrast, make grayscale, add noise, do more fancy effect such as blur,
bloom, ghost, or just like in this example, we invert the color to produce
negative image.
Note that post processing is not limited to use only one texture. It can
take multiple textures as shader inputs to provide desired result. In
addition, post processing can also be chained to produce compound result.
*/
/*
Post processing is achieved by rendering a screen quad with this RTT (with
previously rendered result) as a texture on the quad. A screen quad is
geometry of flat plane composed of two adjacent triangles covering the
entire area of viewport. In this pass of rendering, RTT works just like
a normal texture and is drawn on the quad during rendering. We can then
take control of this rendering process by applying various shader-defined
materials to the quad. In other words, we can achieve different effect by
writing different shaders.
This process is called post processing because it normally does not rely
on scene geometry. The inputs of this process are just textures, or in
other words, just images. With the help of screen quad, we can draw these
images on the screen with different effects. For example, we can adjust
contrast, make grayscale, add noise, do more fancy effect such as blur,
bloom, ghost, or just like in this example, we invert the color to produce
negative image.
Note that post processing is not limited to use only one texture. It can
take multiple textures as shader inputs to provide desired result. In
addition, post processing can also be chained to produce compound result.
*/
// we create a screen quad
ScreenQuad *screenQuad = new ScreenQuad(driver);
video::SMaterial& screenQuadMaterial = screenQuad->getMaterial();
// we create a screen quad
ScreenQuad *screenQuad = new ScreenQuad(driver);
// turn off mip maps and bilinear filter since we do not want interpolated results
screenQuadMaterial.setFlag(video::EMF_USE_MIP_MAPS, false);
screenQuadMaterial.setFlag(video::EMF_BILINEAR_FILTER, false);
// turn off mip maps and bilinear filter since we do not want interpolated result
screenQuad->setMaterialFlag(video::EMF_USE_MIP_MAPS, false);
screenQuad->setMaterialFlag(video::EMF_BILINEAR_FILTER, false);
// turn off depth buffer, because our full screen 2D overlay doesn't process depth
screenQuadMaterial.setFlag(video::EMF_ZBUFFER, false);
// set quad texture to RTT we just create
screenQuad->setMaterialTexture(0, rt);
// set quad texture to RTT we just create
screenQuadMaterial.setTexture(0, rt);
/*
Let's create material for the quad. Like in other example, we create material
using IGPUProgrammingServices and call addShaderMaterialFromFiles, which
returns a material type identifier.
*/
/*
Let's create material for the quad. Like in other example, we create material
using IGPUProgrammingServices and call addShaderMaterialFromFiles, which
returns a material type identifier.
*/
// create materials
// create materials
video::IGPUProgrammingServices* gpu = driver->getGPUProgrammingServices();
s32 ppMaterialType = 0;
video::IGPUProgrammingServices* gpu = driver->getGPUProgrammingServices();
s32 ppMaterialType = 0;
if (gpu)
{
// We write a QuadShaderCallBack class that implements OnSetConstants
// callback of IShaderConstantSetCallBack class at the beginning of
// this tutorial. We set shader constants in this callback.
// create an instance of callback class
if (gpu)
{
// We write a QuadShaderCallBack class that implements OnSetConstants
// callback of IShaderConstantSetCallBack class at the beginning of
// this tutorial. We set shader constants in this callback.
QuadShaderCallBack* mc = new QuadShaderCallBack();
// create an instance of callback class
// create material from post processing shaders
QuadShaderCallBack* mc = new QuadShaderCallBack();
ppMaterialType = gpu->addHighLevelShaderMaterialFromFiles(
vsFileName, "vertexMain", video::EVST_VS_1_1,
psFileName, "pixelMain", video::EPST_PS_1_1, mc);
// create material from post processing shaders
mc->drop();
}
ppMaterialType = gpu->addHighLevelShaderMaterialFromFiles(
vsFileName, "vertexMain", video::EVST_VS_1_1,
psFileName, "pixelMain", video::EPST_PS_1_1, mc);
// set post processing material type to the quad
screenQuad->setMaterialType((video::E_MATERIAL_TYPE)ppMaterialType);
mc->drop();
}
/*
Now draw everything. That's all.
*/
// set post processing material type to the quad
screenQuadMaterial.MaterialType = (video::E_MATERIAL_TYPE)ppMaterialType;
int lastFPS = -1;
/*
Now draw everything. That's all.
*/
while(device->run())
{
if (device->isWindowActive())
{
driver->beginScene(true, true, video::SColor(255,0,0,0));
int lastFPS = -1;
if (rt)
{
// draw scene into render target
while(device->run())
{
if (device->isWindowActive())
{
driver->beginScene(true, true, video::SColor(255,0,0,0));
// set render target to RTT
driver->setRenderTarget(rt, true, true, video::SColor(255,0,0,0));
if (rt)
{
// draw scene into render target
// draw scene to RTT just like normal rendering
smgr->drawAll();
// set render target to RTT
driver->setRenderTarget(rt, true, true, video::SColor(255,0,0,0));
// after rendering to RTT, we change render target back
driver->setRenderTarget(0, true, true, video::SColor(255,0,0,0));
// draw scene to RTT just like normal rendering
smgr->drawAll();
// render screen quad to apply post processing
screenQuad->render();
}
else
{
// draw scene normally
smgr->drawAll();
}
// after rendering to RTT, we change render target back
driver->setRenderTarget(0, true, true, video::SColor(255,0,0,0));
driver->endScene();
// render screen quad to apply post processing
screenQuad->render();
}
else
{
// draw scene normally
smgr->drawAll();
}
int fps = driver->getFPS();
driver->endScene();
if (lastFPS != fps)
{
core::stringw str = L"Irrlicht Engine - Post processing example [";
str += driver->getName();
str += "] FPS:";
str += fps;
int fps = driver->getFPS();
device->setWindowCaption(str.c_str());
lastFPS = fps;
}
}
}
if (lastFPS != fps)
{
core::stringw str = L"Irrlicht Engine - Post processing example [";
str += driver->getName();
str += "] FPS:";
str += fps;
// do not forget to manually drop the screen quad
device->setWindowCaption(str.c_str());
lastFPS = fps;
}
}
}
screenQuad->drop();
// do not forget to manually drop the screen quad
device->drop();
screenQuad->drop();
return 0;
device->drop();
return 0;
}
/*

View File

@ -316,7 +316,7 @@ private:
Irrlicht internally uses textures with left-top origin and then corrects the texture-matrices in the fixed-function pipeline.
For shader materials it's left to the users to handle those UV-flips for the texture-matrix.
Render target textures (RTT's) in OpenGL are rendered with left-bottom origin and Irrlicht can't change that, so all RTT textures
in memory are upside-down (unlike all other Irrlicht textures).
in memory are upside-down (compared to other Irrlicht textures).
In the fixed function pipeline Irrlicht handles this by flipping the RTT's texture matrix once more and for shaders it's again
left to the users to handle it.
Cubemap textures are different from other textures in OpenGL. Each cube side has left-top as the origin. So not flipping Irrlicht textures for those would be fine.
@ -325,7 +325,7 @@ private:
So... the following 2 defines are two different workarounds I found. Both are ugly, which one is better in reality depends probably on the scene.
Only use one of those:
CUBEMAP_UPSIDE_DOWN_GL_PROJECTION is relatively fast as it just changes the project matrix. The problem is that changing the projection matrix
CUBEMAP_UPSIDE_DOWN_GL_PROJECTION is relatively fast as it just changes the projection matrix. The problem is that changing the projection matrix
means changing front/backside culling. So every node rendered has to flip the material flags for those.
CUBEMAP_USPIDE_DOWN_RTT will change the texture memory itself and flip the image upside-down.
@ -591,7 +591,7 @@ int main()
}
/* Add some background which will show up in the environment maps.
For first one we use the same textures as used in the spheres.
For the first background we use the same textures as used in the spheres.
Note the difference between a skybox and a cubemap is that the skybox really uses 6 different
textures. While the cubemap uses a single texture created from 6 images. */
eventReceiver.BackgroundSkybox = smgr->addSkyBoxSceneNode(
@ -639,7 +639,7 @@ int main()
#endif
/*
Add some moving node to show the difference between static/dynamic environment maps
Add a moving node to show the difference between static/dynamic environment maps
*/
scene::IMeshSceneNode * movingNode = smgr->addCubeSceneNode(30.f);
movingNode->getMaterial(0).Lighting = false;
@ -692,7 +692,7 @@ int main()
driver->beginScene(true, true, video::SColor(255, 127, 127, 255));
/* Check if we want to update the environment maps.
Usually not something you'll do every frame, but either once at the star
Usually not something you'll do every frame, but either once at the start
or maybe updating an environment map once in a while.
*/
int updateCubemaps = eventReceiver.checkCubemapUpdate();
@ -704,7 +704,7 @@ int main()
{
/*
Flipping projection matrix flips front/backface culling.
We only have a skybox so in this case this still would be fast, with more objects it's getting more ugly.
We only have a skybox so in this case it's fast, with more objects it's getting more ugly.
*/
smgr->getSceneNodesFromType(scene::ESNT_ANY, allNodes);
flipCullingFlags(allNodes);