void SampleRenderer::render()
{
// sanity check: make sure we launch only after first resize is
// already done:
if (launchParams.frame.size.x == 0) return;
launchParamsBuffer.upload(&launchParams, 1);
OPTIX_CHECK(optixLaunch(/*! pipeline we're launching launch: */
pipeline, stream,
/*! parameters and SBT */
launchParamsBuffer.d_pointer(),
launchParamsBuffer.sizeInBytes,
&sbt,
/*! dimensions of the launch: */
launchParams.frame.size.x,
launchParams.frame.size.y,
1
));
// sync - make sure the frame is rendered before we download and
// display (obviously, for a high-performance application you
// want to use streams and double-buffering, but for this simple
// example, this will have to do)
CUDA_SYNC_CHECK();
}
void SampleRenderer::setCamera(const Camera &camera)
{
lastSetCamera = camera;
launchParams.camera.position = camera.from;
launchParams.camera.direction = normalize(camera.at-camera.from);
const float cosFovy = 0.66f;
const float aspect = launchParams.frame.size.x / float(launchParams.frame.size.y);
launchParams.camera.horizontal
= cosFovy * aspect * normalize(cross(launchParams.camera.direction,
camera.up));
launchParams.camera.vertical
= cosFovy * normalize(cross(launchParams.camera.horizontal,
launchParams.camera.direction));
}
void SampleRenderer::resize(const vec2i &newSize)
{
// if window minimized
if (newSize.x == 0 | newSize.y == 0) return;
// resize our cuda frame buffer
colorBuffer.resize(newSize.x*newSize.y*sizeof(uint32_t));
// update the launch parameters that we'll pass to the optix
// launch:
launchParams.frame.size = newSize;
launchParams.frame.colorBuffer = (uint32_t*)colorBuffer.d_pointer();
// and re-set the camera, since aspect may have changed
setCamera(lastSetCamera);
}
上面这段代码就是本文的精华部分了。但说句实话,基本又全是套路式地调用api...所以我也是一知半解。尝试解读一下这段代码,首先我们要将模型的顶点和索引上传到vertex和index两个buffer中,然后定义一个triangleInput,将这两大缓冲区送入注册绑定;同时每个物体要通过sbt对shader进行绑定,不过这个example没有特殊材质shader,所有的三角面最后都绑定一个相同的hit group shader。
extern "C" __global__ void __miss__radiance()
{
vec3f &prd = *(vec3f*)getPRD<vec3f>();
// set to constant white as background color
prd = vec3f(1.f);
}
这个更简单,直接返回纯色(1,1,1)。
最后是最复杂的Raygen Shader:
extern "C" __global__ void __raygen__renderFrame()
{
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const auto &camera = optixLaunchParams.camera;
// our per-ray data for this example. what we initialize it to
// won't matter, since this value will be overwritten by either
// the miss or hit program, anyway
vec3f pixelColorPRD = vec3f(0.f);
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer( &pixelColorPRD, u0, u1 );
// normalized screen plane position, in [0,1]^2
const vec2f screen(vec2f(ix+.5f,iy+.5f)
/ vec2f(optixLaunchParams.frame.size));
// generate ray direction
vec3f rayDir = normalize(camera.direction
+ (screen.x - 0.5f) * camera.horizontal
+ (screen.y - 0.5f) * camera.vertical);
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_DISABLE_ANYHIT,//OPTIX_RAY_FLAG_NONE,
SURFACE_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
SURFACE_RAY_TYPE, // missSBTIndex
u0, u1 );
const int r = int(255.99f*pixelColorPRD.x);
const int g = int(255.99f*pixelColorPRD.y);
const int b = int(255.99f*pixelColorPRD.z);
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000
| (r<<0) | (g<<8) | (b<<16);
// and write to frame buffer ...
const uint32_t fbIndex = ix+iy*optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
extern "C" int main(int ac, char** av)
{
try {
TriangleMesh model;
// 100x100 thin ground plane
model.addCube(vec3f(0.f, -1.5f, 0.f), vec3f(10.f, .1f, 10.f));
// a unit cube centered on top of that
model.addCube(vec3f(0.f, 0.f, 0.f), vec3f(2.f, 2.f, 2.f));
Camera camera = { /*from*/vec3f(-10.f,2.f,-12.f),
/* at */vec3f(0.f,0.f,0.f),
/* up */vec3f(0.f,1.f,0.f) };
// something approximating the scale of the world, so the
// camera knows how much to move for any given user interaction:
const float worldScale = 10.f;
SampleWindow* window = new SampleWindow("Optix 7 Course Example",
model, camera, worldScale);
window->run();
}
catch (std::runtime_error& e) {
std::cout << GDT_TERMINAL_RED << "FATAL ERROR: " << e.what()
<< GDT_TERMINAL_DEFAULT << std::endl;
exit(1);
}
return 0;
}
int numObjects = (int)meshes.size();
std::vector<HitgroupRecord> hitgroupRecords;
for (int meshID = 0; meshID < numObjects; meshID++) {
HitgroupRecord rec;
// all meshes use the same code, so all same hit group
OPTIX_CHECK(optixSbtRecordPackHeader(hitgroupPGs[0], &rec));
rec.data.color = gdt::randomColor(meshID);
rec.data.vertex = (vec3f*)vertexBuffer[meshID].d_pointer();
rec.data.index = (vec3i*)indexBuffer[meshID].d_pointer();
hitgroupRecords.push_back(rec);
}
hitgroupRecordsBuffer.alloc_and_upload(hitgroupRecords);
sbt.hitgroupRecordBase = hitgroupRecordsBuffer.d_pointer();
sbt.hitgroupRecordStrideInBytes = sizeof(HitgroupRecord);
sbt.hitgroupRecordCount = (int)hitgroupRecords.size();
Comments | NOTHING