THUSC 2021 Day2 光线追踪渲染 题解
BMP
通过查看逆操作的实现,我们可以得到这道题目的大致流程:填充头信息、使用二进制输出将头信息写入、逐行写入颜色信息。我们只需要注意行末补齐问题即可。
void Image::SaveBitmapToFile(const char *_path) const
{
FILE * fp = fopen(_path, "wb");
if(! fp)
return;
unsigned int lineByte = (width * 24 / 8 + 3) / 4 * 4;
BITMAPFILEHEADER fileHead;
fileHead.bfType = 0x4D42;
fileHead.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + lineByte * height;
fileHead.bfReserved1 = 0;
fileHead.bfReserved2 = 0;
fileHead.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
fwrite(&fileHead, sizeof(BITMAPFILEHEADER), 1, fp);
BITMAPINFOHEADER bmpHead;
bmpHead.biBitCount = 24;
bmpHead.biClrImportant = 0;
bmpHead.biClrUsed = 0;
bmpHead.biCompression=0;
bmpHead.biHeight = height;
bmpHead.biPlanes = 1;
bmpHead.biSize = 40;
bmpHead.biSizeImage = lineByte * height;
bmpHead.biWidth = width;
bmpHead.biXPelsPerMeter = 0;
bmpHead.biYPelsPerMeter = 0;
fwrite(&bmpHead, sizeof(BITMAPINFOHEADER), 1, fp);
BYTE blank = 0;
for(int i = height-1; ~i; i --) {
unsigned int b = width * 3;
fwrite(pixels + i*width, sizeof(Color), width, fp);
while(b != lineByte)
fwrite(&blank, sizeof(BYTE), 1, fp), ++ b;
}
fclose(fp);
return;
}
TRIANGLE
乍一看是一道数学题,但是我们可以从计算几何库中找到一个函数——planeIntersection,可以计算出一个平面和一个射线的交。利用平面的构造函数,加上交点在三角形内的判断,就可以轻松完成这道题目。
double triangleIntersection(const Vector4& v0, const Vector4& v1, const Vector4& v2, const Ray& ray, Vector4* point, Vector4* normal)
{
Vector4 p1;
double dis = planeIntersection(Plane(v0, v1, v2), ray.o, ray.d, &p1, normal);
if(std::isinf(dis))
return INFINITY;
double area = Triangle(v0, v1, v2).Area()
- Triangle(v0, v1, p1).Area()
- Triangle(v1, v2, p1).Area()
- Triangle(v2, v0, p1).Area();
if(point)
*point = p1;
if(!isZero(area))
return INFINITY;
return dis;
}
CORNELLBOX & BALLS
两个问题考察的都是查看文档的能力。实际上,这两道题目的公式在指引中都有出现。其中,CORNELLBOX 的函数可以带入 F 的定义消去一些乘法计算,给代码适当加速。
Vector3 SingleSourceContribution(Vector4 vPoint, Vector4 vNorm, Model * pModel, const Source &source)
{
Vector4 lightDir = source.pos - vPoint;
if(dot(lightDir, vNorm) <= EPS)
return MakeVector3();
double len = 0;
lightDir = -lightDir / (len = lightDir.len());
Vector4 normal;
Surface surface;
double d = pModel->GetIntersection(Ray(source.pos + EPS * lightDir, lightDir), &normal, &surface);
if(d < len * (1 - EPS))
return MakeVector3();
const double cos_theta = - dot(vNorm, lightDir) / vNorm.len() / lightDir.len();
return source.intensity * cos_theta / PI / d / d;
}
Vector4 SpecularReflectRay(Vector4 vIn, Vector4 vNorm)
{
Vector4 ret = 2 * dot(vIn, vNorm) * vNorm - vIn;
return ret / ret.len();
}
PANORAMA
我们一般使用以圆柱为基础展开得到的全景视图。如果使用球作为基础展开,得到的图像将会十分扭曲。
我们首先定义方向向量
Ray CameraPanorama::GenerateRay(double x, double y) const
{
double theta1 = (x - 0.5) / 0.5 * PI;
double theta2 = -(y - 0.5) / 0.5 / 2 * PI;
if(isZero(y))
return Ray(coord * MakeVector4(0, 0, 0, 1), coord * MakeVector4(0, 1, 0, 0));
if(isZero(1 - y))
return Ray(coord * MakeVector4(0, 0, 0, 1), coord * MakeVector4(0, -1, 0, 0));
Vector4 d = MakeVector4(sin(theta1), tan(theta2), cos(theta1), 0);
d = d / d.len();
return Ray(coord * MakeVector4(0, 0, 0, 1), coord * d);
}
TEXTURE
本质上就是上一题的逆操作。我们首先考虑
Color ModelSphereTexture::GetColor(Vector4 direction) const
{
direction = direction / direction.len();
double x = 0, y = 0;
if(isZero(direction.x) && isZero(direction.z)) {
if(direction.y < 0)
y = 1;
else
y = -1;
}
else{
double sn = -direction.x, cs = -direction.z;
double w = 1.0 / sqrt(sn * sn + cs * cs);
double theta1 = acos(w * cs);
if(sn < 0)
theta1 = -theta1;
double theta2 = atan(direction.y * w);
x = 0.5 - (theta1 / 2 / PI);
y = 0.5 - (theta2 / PI);
}
int w = texture->GetWidth();
int h = texture->GetHeight();
return texture->GetColor(std::min(w-1, (int)(x * w)), std::min(h-1, (int)(y * h)));
}
PATHTRACING
终于稍微硬核一点的题目。我们参照 RayTrace 的写法,只需要修改其碰到漫反射面的处理规则即可。
根据完全点采样的定义,在碰到漫反射面后,我们枚举每一个光源,在上面随机出一个点,并且将整一个面光源看成这一个点。然后,我们检查这个“点光源”和漫反射面上的点中间是否有阻隔,如果没有的话就根据学习资料最后提到的公式,直接将亮度累加即可。由于此时我们已经计算了光源直接打到这个点带来的贡献,那么我们在随机选择一个反射向量后,就不应当再统计一次经过的光源亮度,否则这个光源就会被计算两次,影响结果。
根据轮盘赌思想,我们需要在准备递归的时候进入一个判断,这个判断有
判断两点之间是否有面、计算是否穿过光源等都可以使用相交函数和距离判定等方式实现。
#include "include/LightPathTrace.h"
namespace Light
{
class ScenePathTrace: public Scene
{
public:
int nSource;
Source * pSource;
Model * pModel;
ScenePathTrace();
ScenePathTrace(Camera * _pcamara, Film * _pfilm, Model * _pmodel, Source * _psource, int nsource);
virtual Vector3 SampleRay(const Ray & ray, Random::RAND_ENGINE *eng = 0);
Vector3 SampleRayTracer(const Ray & ray, Random::RAND_ENGINE *eng, bool emitStraightLightIncrease);
};
ScenePathTrace::ScenePathTrace()
{
pFilm = 0;
pCam = 0;
pSource = 0;
pModel = 0;
nSource = 0;
}
ScenePathTrace::ScenePathTrace(Camera * _pcamara, Film * _pfilm, Model * _pmodel, Source * _psource, int nsource)
{
pFilm = _pfilm;
pCam = _pcamara;
pSource = _psource;
pModel = _pmodel;
nSource = nsource;
}
Scene* GetPathTraceScene(Camera * _pcamara, Film * _pfilm, Model * _pmodel, Source * _psource, int nsource)
{
return new ScenePathTrace(_pcamara, _pfilm, _pmodel, _psource, nsource);
}
inline Vector4 randVector4(Random::RAND_ENGINE *eng, const Vector4& normal){
Vector4 ret = MakeVector4();
do {
ret.x = 2 * Random::randDouble(eng, 0, 1) - 1;
ret.y = 2 * Random::randDouble(eng, 0, 1) - 1;
ret.z = 2 * Random::randDouble(eng, 0, 1) - 1;
}while(ret.len_sq() > 1 || isZero(ret.len_sq()));
ret = ret / ret.len();
if(dot(ret, normal) < 0)
ret = -ret;
return ret;
}
inline Vector4 randTrianglePoint(Random::RAND_ENGINE *eng, const Triangle& t){
Vector4 AB = t.v1 - t.v0, AC = t.v2 - t.v0;
double x = Random::randDouble(eng, 0, 1), y = Random::randDouble(eng, 0, 1);
if(x + y >= 1)
x = 1 - x, y = 1 - y;
return t.v0 + x * AB + y * AC;
}
Vector4 SpecularReflectRay(Vector4 vIn, Vector4 vNorm);
const double RANDOM_CANCEL_TRACE = 0.1;
const int STRAIGHT_LIGHT_SAMPLE_COUNT = 1;
inline bool moveForward(Random::RAND_ENGINE *eng) {
bool ret = (Random::randDouble(eng, 0, 1) >= RANDOM_CANCEL_TRACE);
return ret;
}
Vector3 ScenePathTrace::SampleRayTracer(const Ray & ray, Random::RAND_ENGINE *eng, bool emitStraightLightIncrease)
{
Vector3 res = MakeVector3();
double d;
Vector4 vIn = -ray.d;
Vector4 vNorm;
Vector4 vPoint;
Surface surface;
d = pModel->GetIntersection(ray, &vNorm, &surface);
if (!std::isinf(d))
{
if (dot(vNorm, vIn) < 0) vNorm = -vNorm;
vPoint = ray.o + d*ray.d;
if (surface.type == SURFACE_TYPE_DIFFUSE)
{
int cnt = STRAIGHT_LIGHT_SAMPLE_COUNT; while(cnt --)
for (int i = 0; i < nSource; i++)
{
Vector4 forw = randTrianglePoint(eng, pSource[i].triangle);
Vector4 dir = forw - vPoint;
if(dir.len() < EPS)
continue;
dir = dir / dir.len();
Ray r(vPoint, dir);
if(dot(vNorm, r.d) < 0)
continue;
double dis = pModel->GetIntersection(r);
Vector4 vp = MakeVector4(), vn = MakeVector4();
double dis2 = triangleIntersection(pSource[i].triangle.v0, pSource[i].triangle.v1, pSource[i].triangle.v2, r, &vp, &vn);
if(!std::isinf(dis2) && (dis2 < dis + EPS || std::isinf(dis)) && dis2 > EPS){
Vector4 vi = -r.d;
if(dot(vn, vi) < 0)
vn = -vn;
res = res + pSource[i].luminance * pSource[i].triangle.Area() * dot(vn, vi) * dot(vNorm, r.d) / (PI * dis2 * dis2 * vn.len() * vNorm.len());
}
}
res = res / STRAIGHT_LIGHT_SAMPLE_COUNT;
if(moveForward(eng))
res = res + SampleRayTracer(Ray(vPoint, randVector4(eng, vNorm)), eng, true) / (1.0 - RANDOM_CANCEL_TRACE);
}
else if(surface.type == SURFACE_TYPE_SPECULAR)
{
Ray reflect_ray;
reflect_ray.o = vPoint;
reflect_ray.d = SpecularReflectRay(vIn, vNorm);
if(moveForward(eng))
res = res + SampleRayTracer(reflect_ray, eng, false) / (1.0 - RANDOM_CANCEL_TRACE);
}
res = res * surface.color;
}
if(!emitStraightLightIncrease){
for (int i = 0; i < nSource; i++)
{
Vector4 vp = MakeVector4(), vn = MakeVector4();
double dis = triangleIntersection(pSource[i].triangle.v0, pSource[i].triangle.v1, pSource[i].triangle.v2, ray, &vp, &vn);
if(!std::isinf(dis) && (std::isinf(d) || dis < d + EPS) && dis > EPS)
res = res + pSource[i].luminance;
}
}
return res;
}
Vector3 ScenePathTrace::SampleRay(const Ray & ray, Random::RAND_ENGINE *eng)
{
Vector3 ret = SampleRayTracer(ray, eng, false);
return ret;
}
Vector4 SpecularReflectRay(Vector4 vIn, Vector4 vNorm)
{
Vector4 ret = 2 * dot(vIn, vNorm) * vNorm - vIn;
return ret / ret.len();
}
}
ACCELERATE
问题实际为:给出若干个点,以及一些由三个点下标组成的三角形,其构成了一个网格模型,每一次询问 Intersection 的时候,你需要 快速 算出射线和这些三角形的交中最靠近射线端点的一个。
我们注意到还有 BoundingBox 类完全没有被使用,而题目也提示了这个类可以加速计算速度。这个类表示的是一个所有边平行于坐标轴的立方体,特征是可以快速算出和一条射线的交。根据这个性质,我们考虑使用层级结构,对于每一层,事先算出包裹了所有三角形的 BoundingBox,如果射线和它没有交点,就说明了和里面的所有三角形没有交点。
考虑使用 K-D Tree,将一个集合分割到两侧的时候,通过
于是我们就可以得到一棵二叉树,每一次询问只需要在树上不断搜索,通过盒子测交和左右儿子距离等方式进行剪枝,就可以达到一个不错的效率。
实际上,这并不是正统的写法。正统的 K-D Tree + BoundingBox 需要考虑到三角形和盒子的相交情况,相比之下效率反而会比较低。还有一种实现思路是每一次将集合分成相对均匀的两组,具体而言为找到最长的维度并且找到最优分法,而不是和 K-D Tree 一样轮换选择维度,也不会出现一个三角形在多个同层节点中出现的情况,此处不再展开。
#include "LightMeshAccelerate.h"
namespace Light
{
void Initialize(int nVert, Vector4 *pVert, int nInd, Index* pInd);
void MeshAccelerate::Initialize()
{
Light::Initialize(nVert, pVert, nInd, pInd);
}
double Intersection(const Ray& ray, Vector4* normal);
double MeshAccelerate::Intersection(const Ray& ray, Vector4* point, Vector4* normal) const
{
double d = Light::Intersection(ray, normal);
if (point) *point = ray.o + ray.d*d;
return d;
}
int nVert;
Vector4 *pVert;
int nInd;
Index* pInd;
struct IKdyQuery {
double d;
Vector4 v;
IKdyQuery(double d = 0, Vector4 v = MakeVector4())
: d(d), v(v){};
bool operator < (const IKdyQuery& e) const {
return d < e.d;
}
};
struct KD_Tree {
KD_Tree *left, *right;
BoundingBox bbx;
std::vector<int> triangleId;
KD_Tree() {
triangleId = std::vector<int>();
left = right = NULL;
}
~KD_Tree() {
if(left)
delete left;
if(right)
delete right;
}
IKdyQuery KDT_Query(const Ray& ray);
} *rootKDT;
double boundingBoxRange(BoundingBox& bx, int dep){
if(dep % 3 == 0)
return bx.x1 - bx.x0;
if(dep % 3 == 1)
return bx.y1 - bx.y0;
return bx.z1 - bx.z0;
}
double boundingBoxValue(BoundingBox& bx, int dep, int idx){
if(dep % 3 == 0)
return (idx == 0) ? bx.x0 : bx.x1;
if(dep % 3 == 1)
return (idx == 0) ? bx.y0 : bx.y1;
return (idx == 0) ? bx.z0 : bx.z1;
}
KD_Tree* createKDT(std::vector<int> v, int dep = 0){
std::vector<BoundingBox> bxs(v.size());
std::vector<BoundingBox> preBxs(v.size());
std::vector<BoundingBox> sufBxs(v.size());
std::vector<std::pair<double, std::pair<int, int> > > qs;
for(int i = 0; i < (int)v.size(); i ++){
bxs[i] = BoundingBox(pVert[pInd[v[i]].v0].vec3(), pVert[pInd[v[i]].v0].vec3());
bxs[i] = boundingBoxMerge(bxs[i], BoundingBox(pVert[pInd[v[i]].v1].vec3(), pVert[pInd[v[i]].v1].vec3()));
bxs[i] = boundingBoxMerge(bxs[i], BoundingBox(pVert[pInd[v[i]].v2].vec3(), pVert[pInd[v[i]].v2].vec3()));
qs.emplace_back(boundingBoxValue(bxs[i], dep, 0), std::make_pair(i, -1));
qs.emplace_back(boundingBoxValue(bxs[i], dep, 1), std::make_pair(i, 1));
}
BoundingBox bigBox = bxs[0];
for(int i = 1; i < (int)v.size(); i ++)
bigBox = boundingBoxMerge(bigBox, bxs[i]);
if(v.size() <= 5){
KD_Tree * ret = new KD_Tree();
ret -> bbx = bigBox;
ret -> triangleId = v;
return ret;
}
std::sort(qs.begin(), qs.end());
int sz = 0;
for(int i = 0; i < (int)qs.size(); i ++) if(qs[i].second.second == -1){
preBxs[sz] = bxs[qs[i].second.first];
if(sz)
preBxs[sz] = boundingBoxMerge(preBxs[sz], preBxs[sz-1]);
++ sz;
}
sz = 0;
for(int i = (int)qs.size() - 1; i + 1; i --) if(qs[i].second.second == 1){
sufBxs[sz] = bxs[qs[i].second.first];
if(sz)
sufBxs[sz] = boundingBoxMerge(sufBxs[sz], sufBxs[sz-1]);
++ sz;
}
int L = 0, R = v.size();
std::pair<double, std::pair<int, int> > mn = std::make_pair(boundingBoxRange(preBxs.back(), dep) * R, std::make_pair(L, R));
for(int q = 0; q < (int)qs.size(); q ++){
if(qs[q].second.second == 1)
-- R;
double Lsize = (L == 0 ? 0 : boundingBoxRange(preBxs[L-1], dep));
double Rsize = (R == 0 ? 0 : boundingBoxRange(sufBxs[R-1], dep));
mn = std::min(mn, std::make_pair(L * Lsize + R * Rsize, std::make_pair(L, R)));
if(qs[q].second.second == -1)
++ L;
}
std::vector<int> newL(0), newR(0);
sz = 0;
for(int i = 0; i < (int)qs.size(); i ++) if(qs[i].second.second == -1){
if(sz < mn.second.first)
newL.push_back(v[qs[i].second.first]);
++ sz;
}
sz = 0;
for(int i = (int)qs.size() - 1; i + 1; i --) if(qs[i].second.second == 1){
if(sz < mn.second.second)
newR.push_back(v[qs[i].second.first]);
++ sz;
}
if(newL.empty()){
KD_Tree * ret = new KD_Tree();
ret -> bbx = bigBox;
ret -> triangleId = newR;
return ret;
}
if(newR.empty()){
KD_Tree * ret = new KD_Tree();
ret -> bbx = bigBox;
ret -> triangleId = newL;
return ret;
}
KD_Tree * ret = new KD_Tree();
ret -> bbx = bigBox;
ret -> left = createKDT(newL, dep + 1);
ret -> right = createKDT(newR, dep + 1);
return ret;
}
void Initialize(int nVert, Vector4 *pVert, int nInd, Index* pInd)
{
Light::nVert = nVert;
Light::pVert = pVert;
Light::nInd = nInd;
Light::pInd = pInd;
delete rootKDT;
std::vector<int> v(nInd);
for(int i = 0; i < nInd; i ++)
v.push_back(i);
rootKDT = createKDT(v);
}
IKdyQuery KD_Tree::KDT_Query(const Ray& ray) {
if(triangleId.size()) {
double d = INFINITY;
Vector4 n = MakeVector4(), norm;
for(int i = 0; i < (int)triangleId.size(); i ++){
double pd = triangleIntersection(
pVert[pInd[triangleId[i]].v0],
pVert[pInd[triangleId[i]].v1],
pVert[pInd[triangleId[i]].v2],
ray,
NULL,
&norm
);
if (pd < d && pd > EPS)
{
d = pd;
n = norm;
}
}
return IKdyQuery(d, n);
}
double dl = boundingBoxIntersection(left -> bbx, ray);
double dr = boundingBoxIntersection(right -> bbx, ray);
if(std::isinf(dl) && std::isinf(dr))
return IKdyQuery(INFINITY, MakeVector4());
if(std::isinf(dl))
return right -> KDT_Query(ray);
if(std::isinf(dr))
return left -> KDT_Query(ray);
if(dl < dr) {
IKdyQuery ret = left -> KDT_Query(ray);
if(ret.d < dr)
return ret;
return std::min(ret, right -> KDT_Query(ray));
}
else {
IKdyQuery ret = right -> KDT_Query(ray);
if(ret.d < dl)
return ret;
return std::min(ret, left -> KDT_Query(ray));
}
}
double Intersection(const Ray& ray, Vector4* normal)
{
IKdyQuery ret = rootKDT -> KDT_Query(ray);
if(normal)
*normal = ret.v;
return ret.d;
}
}
好玩的
- 考场上卡在第三题的主要原因是第二题用了 new 忘了 delete,导致内存爆炸。最近重新找到这套题目,三个小时就把前六题做完了。
- 第七个点死活调不出来,扔到 Linux 上居然过了,最后查出来原因是没有初始化随机向量的第四维度,而长度的计算居然是会算入这个维度的。
- 最后一个点不到一个小时写完了。
- 第五个点一直感觉图不是很对,然后我直接改了 panorama.cpp 里面摄像头的类型,最后才知道我的图片反了。
- 现在打算挂一个晚上电脑,把第七个点的图放大四倍渲染。
- 实际上,渲染兔子比渲染宝石快 10 倍,虽然兔子点数是宝石的 100 倍。