Notice to GitKraken users: A vulnerability has been found in the SSH key generation of GitKraken versions 7.6.0 to 8.0.0 (https://www.gitkraken.com/blog/weak-ssh-key-fix). If you use GitKraken and have generated a SSH key using one of these versions, please remove it both from your local workstation and from your LRZ GitLab profile.

21.10.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit e53955a4 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

IpsviRaycaster cleanup and added some documentation.

parent 6306b833
......@@ -115,7 +115,7 @@ vec3 calculatePhongShading(in vec3 position, in LightSource light, in vec3 camer
* \param normal Normal
* \param materialColor Material color (used for all shading coefficients)
*/
vec3 calculatePhongShading(in vec3 position, in LightSource light, in vec3 camera, in vec3 normal, in vec3 materialColor) {
vec3 calculatePhongShading(in vec3 position, in vec3 ambientColorOverride, LightSource light, in vec3 camera, in vec3 normal, in vec3 materialColor) {
vec3 N = normalize(normal);
vec3 V = normalize(camera - position);
......@@ -124,7 +124,7 @@ vec3 calculatePhongShading(in vec3 position, in LightSource light, in vec3 camer
float d = length(L);
L /= d;
vec3 toReturn = materialColor * light._ambientColor; // ambient term
vec3 toReturn = materialColor * ambientColorOverride; // ambient term
toReturn += materialColor * getDiffuseTerm(light._diffuseColor, N, L);
toReturn += materialColor * getSpecularTerm(light._specularColor, N, L, V, light._shininess);
#ifdef PHONG_APPLY_ATTENUATION
......@@ -133,6 +133,19 @@ vec3 calculatePhongShading(in vec3 position, in LightSource light, in vec3 camer
return toReturn;
}
/**
* Computes the phong shading according to the given parameters.
*
* \param position sample position
* \param light LightSource
* \param camera Camera position
* \param normal Normal
* \param materialColor Material color (used for all shading coefficients)
*/
vec3 calculatePhongShading(in vec3 position, in LightSource light, in vec3 camera, in vec3 normal, in vec3 materialColor) {
return calculatePhongShading(position, light._ambientColor, light, camera, normal, materialColor);
}
/**
* Computes the phong shading intensity according to the given parameters.
......
......@@ -90,6 +90,9 @@ ivec2 calcIcSamplePosition(vec3 worldPosition) {
return ivec2(round(dot(worldProjected, _icRightVector)), round(dot(worldProjected, _icUpVector)));
}
// the composite function can be used for additional shadow ray integration
// from the current sample to the position of the IC.
// However, it's currently not used for performance reasons
void composite(vec3 startPosition, vec3 endPosition, inout float opacity) {
vec3 direction = endPosition - startPosition;
float t = _samplingStepSize;
......@@ -145,11 +148,6 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
// optimization: Only store/load when the icPosition has changed
// otherwise we can reuse the variables from the previous sample
if (icPositionPrev != icPosition) {
// if there is no updated illumination information to be saved,
// carry over the old pixel
//if (! toBeSaved)
// icOut = imageLoad(_icImageIn, icPositionPrev);
// write illumination information
if (toBeSaved)
imageStore(_icImageOut, icPositionPrev, vec4(icOut));
......@@ -171,8 +169,7 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume, _volumeTextureParams, samplePosition);
color.rgb = calculatePhongShading(worldPos, _lightSource, _cameraPosition, gradient, color.rgb);
vec3 gradient = computeGradient(_volume, _volumeTextureParams, samplePosition);
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
......@@ -186,7 +183,8 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
toBeSaved = true;
// apply shadowing
color.rgb *= (1.0 - icIn * _shadowIntensity);
const vec3 ambientColorOverride = _lightSource._ambientColor * (1.0 - icIn * _shadowIntensity);
color.rgb = calculatePhongShading(worldPos, ambientColorOverride, _lightSource, _cameraPosition, gradient, color.rgb);
// front-to-back compositing along view direction
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
......@@ -212,8 +210,6 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
t += _samplingStepSize / len;
}
//if (! toBeSaved)
// icOut = imageLoad(_icImageIn, icPositionPrev);
if (toBeSaved)
imageStore(_icImageOut, icPositionPrev, vec4(icOut));
......
......@@ -43,8 +43,7 @@ namespace campvis {
, p_lightId("LightId", "Input Light Source", "lightsource", DataNameProperty::READ)
, p_sweepLineWidth("SweepLineWidth", "Sweep Line Width", 2, 1, 32)
, p_icTextureSize("IcTextureSize", "Illumination Cache Texture Size", cgt::ivec2(512), cgt::ivec2(32), cgt::ivec2(2048))
, p_shadowIntensity("ShadowIntensity", "Shadow Intensity", .9f, 0.f, 1.f)
, p_numLines("NumLines", "Max Number of Lines", 2000, 1, 2000)
, p_shadowIntensity("ShadowIntensity", "Shadow Intensity", .75f, 0.f, 1.f)
, _vhm(nullptr)
{
_icTextures[0] = nullptr;
......@@ -54,7 +53,6 @@ namespace campvis {
addProperty(p_sweepLineWidth);
addProperty(p_icTextureSize, INVALID_RESULT | INVALID_IC_TEXTURES);
addProperty(p_shadowIntensity);
addProperty(p_numLines);
setPropertyInvalidationLevel(p_transferFunction, INVALID_BBV | INVALID_RESULT);
setPropertyInvalidationLevel(p_sourceImageID, INVALID_BBV | INVALID_RESULT);
......@@ -82,13 +80,13 @@ namespace campvis {
}
void IpsviRaycaster::processImpl(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image) {
// (re)create Illumination Cache (IC) textures if needed
if (getInvalidationLevel() & INVALID_IC_TEXTURES) {
delete _icTextures[0];
delete _icTextures[1];
cgt::ivec3 icSize(p_icTextureSize.getValue(), 1);
cgt::TextureUnit icUnit;
icUnit.activate();
_icTextures[0] = new cgt::Texture(GL_TEXTURE_2D, icSize, GL_R32F);
_icTextures[1] = new cgt::Texture(GL_TEXTURE_2D, icSize, GL_R32F);
......@@ -96,6 +94,7 @@ namespace campvis {
validate(INVALID_IC_TEXTURES);
}
// update VoxelHierarchyMapper's hierarchy
if (getInvalidationLevel() & INVALID_BBV) {
_shader->deactivate();
_vhm->createHierarchy(image, p_transferFunction.getTF());
......@@ -116,23 +115,22 @@ namespace campvis {
processDirectional(data, image, *camera, *light);
}
else {
LDEBUG("Could not load light source from DataContainer.");
LWARNING("Could not load all the needed data from the DataContainer.");
}
}
void IpsviRaycaster::processDirectional(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image, const CameraData& camera, const LightSourceData& light) {
const cgt::vec3& lightSink = camera.getCamera().getFocus();
const cgt::vec3 lightSource = camera.getCamera().getFocus() + light.getLightPosition();
const cgt::vec3& lightDirection = light.getLightPosition();
const cgt::vec3 lightSource = camera.getCamera().getFocus() - light.getLightPosition();
const cgt::vec3& lightDirection = -light.getLightPosition();
// TODO: This should be a world to NDC space conversion, but it does not work... :(
// transformation matrices for world -> NDC -> viewport
const auto V = camera.getCamera().getViewMatrix();
const auto P = camera.getCamera().getProjectionMatrix();
// calculate viewport matrix for NDC -> viewport conversion
const cgt::vec2 halfViewport = cgt::vec2(getEffectiveViewportSize()) / 2.f;
const cgt::mat4 viewportMatrix = cgt::mat4::createTranslation(cgt::vec3(halfViewport, 0.f)) * cgt::mat4::createScale(cgt::vec3(halfViewport, 1.f));
// project light source and direction into pixel space
const cgt::vec4 projectedLight = viewportMatrix*P*V* cgt::vec4(lightSource, 1.f);
const cgt::vec4 projectedOrigin = viewportMatrix*P*V* cgt::vec4(lightSink, 1.f);
cgt::vec2 projectedLightDirection = projectedOrigin.xy()/projectedOrigin.w - projectedLight.xy()/projectedLight.w;
......@@ -155,26 +153,26 @@ namespace campvis {
sweepDir = TopToBottom;
}
// START: compute illumination cache (IC) plane/texture
// compute illumination cache (IC) plane
// the plane is defined by the light direction
cgt::vec3 icNormal = cgt::normalize(lightDirection);
cgt::vec3 icUpVector = (std::abs(cgt::dot(icNormal, cgt::vec3(0.f, 0.f, 1.f))) < 0.99) ? cgt::vec3(0.f, 0.f, 1.f) : cgt::vec3(0.f, 1.f, 0.f);
cgt::vec3 icRightVector = cgt::normalize(cgt::cross(icNormal, icUpVector));
icUpVector = cgt::normalize(cgt::cross(icRightVector, icNormal));
// project all 8 corners of the volume onto the IC plane
// project all 8 corners of the volume onto the IC plane to get the volume bounds in viewport space
cgt::Bounds worldBounds = image->getParent()->getWorldBounds();
cgt::Bounds viewportBounds;
cgt::vec3 minPixel(0.f), maxPixel(0.f);
std::vector<cgt::vec3> corners;
corners.push_back(cgt::vec3(worldBounds.getLLF().x, worldBounds.getLLF().y, worldBounds.getLLF().z));
corners.push_back(cgt::vec3(worldBounds.getLLF().x, worldBounds.getLLF().y, worldBounds.getURB().z));
corners.push_back(cgt::vec3(worldBounds.getLLF().x, worldBounds.getURB().y, worldBounds.getLLF().z));
corners.push_back(cgt::vec3(worldBounds.getLLF().x, worldBounds.getURB().y, worldBounds.getURB().z));
corners.push_back(cgt::vec3(worldBounds.getURB().x, worldBounds.getLLF().y, worldBounds.getLLF().z));
corners.push_back(cgt::vec3(worldBounds.getURB().x, worldBounds.getLLF().y, worldBounds.getURB().z));
corners.push_back(cgt::vec3(worldBounds.getURB().x, worldBounds.getURB().y, worldBounds.getLLF().z));
corners.push_back(cgt::vec3(worldBounds.getURB().x, worldBounds.getURB().y, worldBounds.getURB().z));
std::vector<cgt::vec3> corners(8, cgt::vec3(0.f));
corners[0] = cgt::vec3(worldBounds.getLLF().x, worldBounds.getLLF().y, worldBounds.getLLF().z);
corners[1] = cgt::vec3(worldBounds.getLLF().x, worldBounds.getLLF().y, worldBounds.getURB().z);
corners[2] = cgt::vec3(worldBounds.getLLF().x, worldBounds.getURB().y, worldBounds.getLLF().z);
corners[3] = cgt::vec3(worldBounds.getLLF().x, worldBounds.getURB().y, worldBounds.getURB().z);
corners[4] = cgt::vec3(worldBounds.getURB().x, worldBounds.getLLF().y, worldBounds.getLLF().z);
corners[5] = cgt::vec3(worldBounds.getURB().x, worldBounds.getLLF().y, worldBounds.getURB().z);
corners[6] = cgt::vec3(worldBounds.getURB().x, worldBounds.getURB().y, worldBounds.getLLF().z);
corners[7] = cgt::vec3(worldBounds.getURB().x, worldBounds.getURB().y, worldBounds.getURB().z);
for (auto i = 0; i < corners.size(); ++i) {
const cgt::vec3 diag = corners[i];
......@@ -195,6 +193,9 @@ namespace campvis {
icRightVector *= float(icSize.x - 1) / (std::ceil(maxPixel.x) - std::floor(minPixel.x)) ;
icUpVector *= float(icSize.y - 1) / (std::ceil(maxPixel.y) - std::floor(minPixel.y));
// * all preparations done, let's to the rendering *
// bind voxel hierarchy to shader
cgt::TextureUnit xorUnit, bbvUnit;
{
......@@ -239,8 +240,10 @@ namespace campvis {
glEnable(GL_DEPTH_TEST);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
auto viewportSize = getEffectiveViewportSize();
const auto viewportSize = getEffectiveViewportSize();
// setup the helper variables for rendering the sweep lines (according to the sweep direction)
// as additionall optimization, restrict the rendered lines to the volume extent in viewport space
cgt::mat4 projection;
cgt::mat4 viewScale;
cgt::vec3 viewTranslationBase;
......@@ -248,54 +251,54 @@ namespace campvis {
int lineMax;
float scale = 1.f;
float bias = 0.f;
switch (sweepDir) {
case LeftToRight:
scale = float(viewportSize.y) / viewportBounds.diagonal().y;
bias = viewportBounds.getLLF().y / float(viewportSize.y) * scale;
projection = cgt::mat4::createOrtho(0, viewportSize.x, scale-bias, -bias, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(p_sweepLineWidth.getValue(), 1.f, 1.f));
viewTranslationBase = cgt::vec3(1.f, 0.f, 0.f);
line += std::max(0, int(viewportBounds.getLLF().x));
lineMax = std::min(viewportSize.x, int(viewportBounds.getURB().x));
break;
case RightToLeft:
scale = float(viewportSize.y) / viewportBounds.diagonal().y;
bias = viewportBounds.getLLF().y / float(viewportSize.y) * scale;
projection = cgt::mat4::createOrtho(viewportSize.x, 0, scale-bias, -bias, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(p_sweepLineWidth.getValue(), 1.f, 1.f));
viewTranslationBase = cgt::vec3(1.f, 0.f, 0.f);
line += std::max(0, viewportSize.x - int(viewportBounds.getURB().x));
lineMax = std::min(viewportSize.x, viewportSize.x - int(viewportBounds.getLLF().x));
break;
case BottomToTop:
scale = float(viewportSize.x) / viewportBounds.diagonal().x;
bias = viewportBounds.getLLF().x / float(viewportSize.x) * scale;
projection = cgt::mat4::createOrtho(-bias, scale-bias, viewportSize.y, 0, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(1.f, p_sweepLineWidth.getValue(), 1.f));
viewTranslationBase = cgt::vec3(0.f, 1.f, 0.f);
line += std::max(0, int(viewportBounds.getLLF().y));
lineMax = std::min(viewportSize.y, int(viewportBounds.getURB().y));
break;
case TopToBottom:
scale = float(viewportSize.x) / viewportBounds.diagonal().x;
bias = viewportBounds.getLLF().x / float(viewportSize.x) * scale;
projection = cgt::mat4::createOrtho(-bias, scale-bias, 0, viewportSize.y, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(1.f, p_sweepLineWidth.getValue(), 1.f));
viewTranslationBase = cgt::vec3(0.f, 1.f, 0.f);
line += std::max(0, viewportSize.y - int(viewportBounds.getURB().y));
lineMax = std::min(viewportSize.y, viewportSize.y - int(viewportBounds.getLLF().y));
break;
case LeftToRight:
scale = float(viewportSize.y) / viewportBounds.diagonal().y;
bias = viewportBounds.getLLF().y / float(viewportSize.y) * scale;
projection = cgt::mat4::createOrtho(0, float(viewportSize.x), scale-bias, -bias, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(float(p_sweepLineWidth.getValue()), 1.f, 1.f));
viewTranslationBase = cgt::vec3(1.f, 0.f, 0.f);
line += std::max(0, int(viewportBounds.getLLF().x));
lineMax = std::min(viewportSize.x, int(viewportBounds.getURB().x));
break;
case RightToLeft:
scale = float(viewportSize.y) / viewportBounds.diagonal().y;
bias = viewportBounds.getLLF().y / float(viewportSize.y) * scale;
projection = cgt::mat4::createOrtho(float(viewportSize.x), 0, scale-bias, -bias, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(float(p_sweepLineWidth.getValue()), 1.f, 1.f));
viewTranslationBase = cgt::vec3(1.f, 0.f, 0.f);
line += std::max(0, viewportSize.x - int(viewportBounds.getURB().x));
lineMax = std::min(viewportSize.x, viewportSize.x - int(viewportBounds.getLLF().x));
break;
case BottomToTop:
scale = float(viewportSize.x) / viewportBounds.diagonal().x;
bias = viewportBounds.getLLF().x / float(viewportSize.x) * scale;
projection = cgt::mat4::createOrtho(-bias, scale-bias, float(viewportSize.y), 0, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(1.f, float(p_sweepLineWidth.getValue()), 1.f));
viewTranslationBase = cgt::vec3(0.f, 1.f, 0.f);
line += std::max(0, int(viewportBounds.getLLF().y));
lineMax = std::min(viewportSize.y, int(viewportBounds.getURB().y));
break;
case TopToBottom:
scale = float(viewportSize.x) / viewportBounds.diagonal().x;
bias = viewportBounds.getLLF().x / float(viewportSize.x) * scale;
projection = cgt::mat4::createOrtho(-bias, scale-bias, 0, float(viewportSize.y), -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(1.f, float(p_sweepLineWidth.getValue()), 1.f));
viewTranslationBase = cgt::vec3(0.f, 1.f, 0.f);
line += std::max(0, viewportSize.y - int(viewportBounds.getURB().y));
lineMax = std::min(viewportSize.y, viewportSize.y - int(viewportBounds.getLLF().y));
break;
}
int evenOdd = 0;
// some more shader setup
_shader->setUniform("_projectionMatrix", projection);
GLint uIcImageIn = _shader->getUniformLocation("_icImageIn");
GLint uIcImageOut = _shader->getUniformLocation("_icImageOut");
GLint uViewMatrix = _shader->getUniformLocation("_viewMatrix");
// finally we can start rendering :)
int evenOdd = 0;
while (line < lineMax) {
// ping-pong buffering to avoid concurrent read-writes
if (evenOdd % 2 == 0) {
......@@ -313,9 +316,6 @@ namespace campvis {
line += p_sweepLineWidth.getValue();
++evenOdd;
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
if (evenOdd > p_numLines.getValue())
break;
}
// restore state
......@@ -339,6 +339,11 @@ namespace campvis {
cgt::vec4 projectedLight = viewportMatrix * P * V * cgt::vec4(light.getLightPosition(), 1.f);
projectedLight /= projectedLight.w;
// The processing of point light sources is not yet implemented.
// It mostly works light four passes with directional lights. However, the projection
// of the illumination cache is no longer orthogonal. That complicates the it's definition.
// So far, I was too lazy to do so - ambient occlusion might be cooler anyway.
cgtAssert(false, "Processing of point light sources not yet implemented.");
}
}
......@@ -45,7 +45,11 @@ namespace campvis {
class VoxelHierarchyMapper;
/**
* Performs a simple volume ray casting.
* Raycaster that implements the Image Plane Sweep Volume Illumation algorithm of Sundn et al.
* This raycasting processor supports real-time directional light shadowing and additionally
* uses the VoxelHierarchyMapper for optimized entry-exit points.
*
* Requires OpenGL 4.4!
*/
class CAMPVIS_MODULES_API IpsviRaycaster : public RaycastingProcessor {
public:
......@@ -71,7 +75,7 @@ namespace campvis {
/// \see AbstractProcessor::getName()
virtual const std::string getName() const { return getId(); };
/// \see AbstractProcessor::getDescription()
virtual const std::string getDescription() const { return "Performs a simple volume ray casting."; };
virtual const std::string getDescription() const { return "Raycaster that implements the Image Plane Sweep Volume Illumation algorithm of Sundn et al."; };
/// \see AbstractProcessor::getAuthor()
virtual const std::string getAuthor() const { return "Christian Schulte zu Berge <christian.szb@in.tum.de>"; };
/// \see AbstractProcessor::getProcessorState()
......@@ -83,22 +87,19 @@ namespace campvis {
virtual void deinit();
DataNameProperty p_lightId; ///< Name/ID for the LightSource to use
IntProperty p_sweepLineWidth;
IVec2Property p_icTextureSize;
FloatProperty p_shadowIntensity;
IntProperty p_numLines;
IntProperty p_sweepLineWidth; ///< Width of the sweep line in pixels
IVec2Property p_icTextureSize; ///< Size of the Illumination Cache texture
FloatProperty p_shadowIntensity; ///< Intensity of the shadowing effect
protected:
/// \see RaycastingProcessor::processImpl()
virtual void processImpl(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image);
void processDirectional(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image, const CameraData& camera, const LightSourceData& light);
void processPointLight(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image, const CameraData& camera, const LightSourceData& light);
VoxelHierarchyMapper* _vhm;
cgt::Texture* _icTextures[2];
VoxelHierarchyMapper* _vhm; ///< for optimizing entry/exit points
cgt::Texture* _icTextures[2]; ///< Illumination cache textures
static const std::string loggerCat_;
};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment