2.12.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit 931ccbe0 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

Further work on IpsviRaycaster. Directional illumination now works quite well....

Further work on IpsviRaycaster. Directional illumination now works quite well. Most artifacts are fixed.
parent 5777b735
......@@ -74,28 +74,34 @@ uniform float _samplingStepSize;
const float SAMPLING_BASE_INTERVAL_RCP = 200.0;
// projects a vector in world coordinates onto the IC
// returns world coordinates
ivec2 calcIcSamplePosition(vec3 worldPosition) {
// project world position onto IC plane
const vec3 diag = worldPosition - _icOrigin;
const float distance = abs(dot(diag, _icNormal));
const vec3 projected = diag - (-distance * _icNormal);
return ivec2(dot(projected, _icRightVector), dot(projected, _icUpVector));
const vec3 worldProjected = diag - (-distance * _icNormal);
// transforms world coordinates (have to be lying on the IC plane) to IC pixel space
return ivec2(round(dot(worldProjected, _icRightVector)), round(dot(worldProjected, _icUpVector)));
}
void composite(vec3 startPosition, vec3 endPosition, inout float opacity) {
vec3 direction = endPosition - startPosition;
float t = 0.0;
float tend = length(direction);
float t = _samplingStepSize;
jitterFloat(t, _samplingStepSize); // jitter startpoint to avoid ringing artifacts (not really effective...)
float tend = min(length(direction), 4*_samplingStepSize);
direction = normalize(direction);
while (t < tend) {
// lookup intensity and TF
vec3 samplePosition = startPosition.rgb + t * direction;
vec3 samplePosition = startPosition.xyz + t * direction;
float intensity = texture(_volume, samplePosition).r;
float tfOpacity = lookupTF(_transferFunction, _transferFunctionParams, intensity).a;
opacity = opacity + (1.0 - opacity) * tfOpacity;
t += _samplingStepSize * 2;
t += _samplingStepSize;
}
}
......@@ -116,7 +122,9 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
jitterEntryPoint(entryPoint, direction, _samplingStepSize * _jitterStepSizeMultiplier);
ivec2 icPositionPrev = calcIcSamplePosition(textureToWorld(_volumeTextureParams, entryPoint));
vec4 icIn = imageLoad(_icImageIn, icPositionPrev);
vec4 icOut = vec4(0.0);
bool toBeSaved = false;
while (t < tend) {
// compute sample position
......@@ -124,11 +132,26 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
vec3 worldPos = textureToWorld(_volumeTextureParams, samplePosition);
ivec2 icPosition = calcIcSamplePosition(worldPos);
vec4 icIn = imageLoad(_icImageIn, icPositionPrev);
// optimization: Only store/load when the icPosition has changed
// otherwise we can reuse the variables from the previous sample
if (icPositionPrev != icPosition) {
// if there is no updated illumination information to be saved,
// carry over the old pixel
if (! toBeSaved)
icOut = imageLoad(_icImageIn, icPositionPrev);
// write illumination information
imageStore(_icImageOut, icPositionPrev, icOut);
toBeSaved = false;
// load illumination information
icIn = imageLoad(_icImageIn, icPosition);
// perform a compositing from samplePosition to the samplePosition of the IC
// Currently disabled since it leads to ringing artifacts...
//if (icIn.xyz != vec3(0.0))
// composite(samplePosition, icIn.xyz, icIn.a);
}
// lookup intensity and TF
float intensity = texture(_volume, samplePosition).r;
......@@ -138,7 +161,7 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume, _volumeTextureParams, samplePosition);
color.rgb = calculatePhongShading(worldPos.xyz, _lightSource, _cameraPosition, gradient, color.rgb);
color.rgb = calculatePhongShading(worldPos, _lightSource, _cameraPosition, gradient, color.rgb);
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
......@@ -149,6 +172,7 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
// icOut.rgb = ((1.0 - color.a) * icIn.rgb) + (color.a * color.rgb);
icOut.xyz = samplePosition;
icOut.a = ((1.0 - color.a) * icIn.a) + color.a;
toBeSaved = true;
// apply shadowing
color.rgb *= (1.0 - icIn.a * _shadowIntensity);
......@@ -157,8 +181,6 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
// update illumination information
imageStore(_icImageOut, icPosition, icOut);
icPositionPrev = icPosition;
}
......@@ -179,6 +201,13 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
t += _samplingStepSize;
}
if (toBeSaved) {
imageStore(_icImageOut, icPositionPrev, icOut);
}
else {
imageStore(_icImageOut, icPositionPrev, imageLoad(_icImageIn, icPositionPrev));
}
// calculate depth value from ray parameter
gl_FragDepth = 1.0;
if (firstHitT >= 0.0) {
......
......@@ -67,7 +67,7 @@ namespace campvis {
dvrTF->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.19f, .28f), cgt::col4(89, 89, 89, 155), cgt::col4(89, 89, 89, 155)));
dvrTF->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.41f, .51f), cgt::col4(170, 170, 128, 64), cgt::col4(192, 192, 128, 64)));
static_cast<TransferFunctionProperty*>(_vr.getNestedProperty("RaycasterProps::TransferFunction"))->replaceTF(dvrTF);
static_cast<FloatProperty*>(_vr.getNestedProperty("RaycasterProps::SamplingRate"))->setValue(4.f);
static_cast<FloatProperty*>(_vr.getNestedProperty("RaycasterProps::SamplingRate"))->setValue(2.f);
}
}
\ No newline at end of file
......@@ -37,14 +37,18 @@ namespace campvis {
const std::string IpsviRaycaster::loggerCat_ = "CAMPVis.modules.vis.IpsviRaycaster";
IpsviRaycaster::IpsviRaycaster(IVec2Property* viewportSizeProp)
: RaycastingProcessor(viewportSizeProp, "modules/vis/glsl/ipsviraycaster.frag", true, "450")
: RaycastingProcessor(viewportSizeProp, "modules/vis/glsl/ipsviraycaster.frag", true, "440")
, p_lightId("LightId", "Input Light Source", "lightsource", DataNameProperty::READ)
, p_sweepLineWidth("SweepLineWidth", "Sweep Line Width", 2, 1, 8)
, p_sweepLineWidth("SweepLineWidth", "Sweep Line Width", 2, 1, 32)
, p_icTextureSize("IcTextureSize", "Illumination Cache Texture Size", cgt::ivec2(512), cgt::ivec2(32), cgt::ivec2(2048))
, p_shadowIntensity("ShadowIntensity", "Shadow Intensity", .9f, 0.f, 1.f)
, p_numLines("NumLines", "Max Number of Lines", 2000, 1, 2000)
{
addProperty(p_lightId);
addProperty(p_sweepLineWidth);
addProperty(p_icTextureSize);
addProperty(p_shadowIntensity);
addProperty(p_numLines);
addDecorator(new ProcessorDecoratorGradient());
decoratePropertyCollection(this);
......@@ -65,19 +69,29 @@ namespace campvis {
ScopedTypedData<CameraData> camera(data, p_camera.getValue());
ScopedTypedData<LightSourceData> light(data, p_lightId.getValue());
if (light != nullptr) {
cgt::vec3 lightDirection = (light->getLightPosition());
if (camera && light) {
processDirectional(data, image, *camera, *light);
}
else {
LDEBUG("Could not load light source from DataContainer.");
}
}
void IpsviRaycaster::processDirectional(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image, const CameraData& camera, const LightSourceData& light) {
const cgt::vec3& lightSink = camera.getCamera().getFocus();
const cgt::vec3 lightSource = camera.getCamera().getFocus() + light.getLightPosition();
const cgt::vec3& lightDirection = light.getLightPosition();
// TODO: This should be a world to NDC space conversion, but it does not work... :(
const auto V = camera->getCamera().getViewMatrix();
const auto P = camera->getCamera().getProjectionMatrix();
const auto V = camera.getCamera().getViewMatrix();
const auto P = camera.getCamera().getProjectionMatrix();
// calculate viewport matrix for NDC -> viewport conversion
const cgt::vec2 halfViewport = cgt::vec2(getEffectiveViewportSize()) / 2.f;
const cgt::mat4 viewportMatrix = cgt::mat4::createTranslation(cgt::vec3(halfViewport, 0.f)) * cgt::mat4::createScale(cgt::vec3(halfViewport, 1.f));
const cgt::vec4 projectedLight = viewportMatrix*P*V*cgt::vec4(lightDirection, 1.f);
const cgt::vec4 projectedOrigin = viewportMatrix*P*V*cgt::vec4(0.f, 0.f, 0.f, 1.f);
const cgt::vec4 projectedLight = viewportMatrix*P*V* cgt::vec4(lightSource, 1.f);
const cgt::vec4 projectedOrigin = viewportMatrix*P*V* cgt::vec4(lightSink, 1.f);
cgt::vec2 projectedLightDirection = projectedOrigin.xy()/projectedOrigin.w - projectedLight.xy()/projectedLight.w;
// compute sweep direction (in viewport space)
......@@ -85,23 +99,25 @@ namespace campvis {
SweepDirection sweepDir;
if (std::abs(projectedLightDirection.x) > std::abs(projectedLightDirection.y)) {
// horizontal sweep
if (projectedLightDirection.x > 0)
if (projectedLightDirection.x < 0)
sweepDir = LeftToRight;
else
sweepDir = RightToLeft;
}
else {
// vertical sweep
if (projectedLightDirection.y > 0)
if (projectedLightDirection.y < 0)
sweepDir = BottomToTop;
else
sweepDir = TopToBottom;
}
//LINFO(projectedOrigin.xy()/projectedOrigin.w << ", " << projectedLight.xy()/projectedLight.w << ", " << projectedLightDirection << " => " << sweepDir);
// START: compute illumination cache (IC) plane/texture
// the plane is defined by the light direction
cgt::vec3 icNormal = cgt::normalize(lightDirection);
cgt::vec3 icUpVector = (std::abs(cgt::dot(icNormal, cgt::vec3(0.f, 0.f, 1.f))) < 0.01) ? cgt::vec3(0.f, 0.f, 1.f) : cgt::vec3(0.f, 1.f, 0.f);
cgt::vec3 icUpVector = (std::abs(cgt::dot(icNormal, cgt::vec3(0.f, 0.f, 1.f))) < 0.99) ? cgt::vec3(0.f, 0.f, 1.f) : cgt::vec3(0.f, 1.f, 0.f);
cgt::vec3 icRightVector = cgt::normalize(cgt::cross(icNormal, icUpVector));
icUpVector = cgt::normalize(cgt::cross(icRightVector, icNormal));
......@@ -129,7 +145,7 @@ namespace campvis {
}
cgt::vec3 icOrigin = cgt::floor(minPixel).x * icRightVector + cgt::floor(minPixel).y * icUpVector;
cgt::ivec3 icSize(384, 384, 1);
cgt::ivec3 icSize(p_icTextureSize.getValue(), 1);
icRightVector *= float(icSize.x - 1) / (std::ceil(maxPixel.x) - std::floor(minPixel.x)) ;
icUpVector *= float(icSize.y - 1) / (std::ceil(maxPixel.y) - std::floor(minPixel.y));
......@@ -144,20 +160,23 @@ namespace campvis {
LWARNING(pixel);
}
cgt::col4* zeroInit = new cgt::col4[cgt::hmul(icSize)];
memset(zeroInit, 0, sizeof(cgt::col4) * cgt::hmul(icSize));
//cgt::col4* zeroInit = new cgt::col4[cgt::hmul(icSize)];
//memset(zeroInit, 0, sizeof(cgt::col4) * cgt::hmul(icSize));
cgt::col4 zeroInit(0, 0, 0, 0);
cgt::TextureUnit icUnit1, icUnit2;
cgt::Texture* icTextures[2];
icUnit1.activate();
icTextures[0] = new cgt::Texture(GL_TEXTURE_2D, icSize, GL_RGBA32F, zeroInit->elem, GL_RGBA, GL_UNSIGNED_BYTE);
icTextures[0] = new cgt::Texture(GL_TEXTURE_2D, icSize, GL_RGBA32F);
glClearTexImage(icTextures[0]->getId(), 0, GL_RGBA, GL_UNSIGNED_BYTE, &zeroInit);
icUnit2.activate();
icTextures[1] = new cgt::Texture(GL_TEXTURE_2D, icSize, GL_RGBA32F, zeroInit->elem, GL_RGBA, GL_UNSIGNED_BYTE);
icTextures[1] = new cgt::Texture(GL_TEXTURE_2D, icSize, GL_RGBA32F);
glClearTexImage(icTextures[1]->getId(), 0, GL_RGBA, GL_UNSIGNED_BYTE, &zeroInit);
glBindImageTexture(0, icTextures[0]->getId(), 0, false, 0, GL_READ_WRITE, GL_RGBA32F);
glBindImageTexture(1, icTextures[1]->getId(), 0, false, 0, GL_READ_WRITE, GL_RGBA32F);
delete [] zeroInit;
//delete [] zeroInit;
_shader->setUniform("_icOrigin", icOrigin);
_shader->setUniform("_icNormal", icNormal);
......@@ -175,7 +194,7 @@ namespace campvis {
static const GLenum buffers[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 , GL_COLOR_ATTACHMENT2 };
glDrawBuffers(3, buffers);
light->bind(_shader, "_lightSource");
light.bind(_shader, "_lightSource");
glEnable(GL_DEPTH_TEST);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
......@@ -213,27 +232,35 @@ namespace campvis {
break;
}
_shader->setUniform("_projectionMatrix", projection);
int line = 1 - p_sweepLineWidth.getValue();
int evenOdd = 0;
_shader->setUniform("_projectionMatrix", projection);
GLint uIcImageIn = _shader->getUniformLocation("_icImageIn");
GLint uIcImageOut = _shader->getUniformLocation("_icImageOut");
GLint uViewMatrix = _shader->getUniformLocation("_viewMatrix");
while (line < lineMax) {
// ping-pong buffering to avoid concurrent read-writes
if (evenOdd % 2 == 0) {
_shader->setUniform("_icImageIn", 0);
_shader->setUniform("_icImageOut", 1);
_shader->setUniform(uIcImageIn, 0);
_shader->setUniform(uIcImageOut, 1);
}
else {
_shader->setUniform("_icImageIn", 1);
_shader->setUniform("_icImageOut", 0);
_shader->setUniform(uIcImageIn, 1);
_shader->setUniform(uIcImageOut, 0);
}
_shader->setUniform("_viewMatrix", cgt::mat4::createTranslation(viewTranslationBase * float(line)) * viewScale);
_shader->setUniform(uViewMatrix, cgt::mat4::createTranslation(viewTranslationBase * float(line)) * viewScale);
QuadRdr.renderQuad01();
line += p_sweepLineWidth.getValue();
++evenOdd;
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
if (evenOdd > p_numLines.getValue())
break;
}
// restore state
......@@ -254,9 +281,18 @@ namespace campvis {
ic->addColorTexture(id2);
data.addData(p_targetImageID.getValue() + ".IC", ic);
}
else {
LDEBUG("Could not load light source from DataContainer.");
}
void IpsviRaycaster::processPointLight(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image, const CameraData& camera, const LightSourceData& light) {
// calculate viewport matrix for NDC -> viewport conversion
const auto& V = camera.getCamera().getViewMatrix();
const auto& P = camera.getCamera().getProjectionMatrix();
const cgt::vec2 halfViewport = cgt::vec2(getEffectiveViewportSize()) / 2.f;
const cgt::mat4 viewportMatrix = cgt::mat4::createTranslation(cgt::vec3(halfViewport, 0.f)) * cgt::mat4::createScale(cgt::vec3(halfViewport, 1.f));
// project light position to viewport coordinates
cgt::vec4 projectedLight = viewportMatrix * P * V * cgt::vec4(light.getLightPosition(), 1.f);
projectedLight /= projectedLight.w;
}
}
......@@ -39,6 +39,9 @@ namespace cgt {
}
namespace campvis {
class CameraData;
class LightSourceData;
/**
* Performs a simple volume ray casting.
*/
......@@ -74,12 +77,19 @@ namespace campvis {
DataNameProperty p_lightId; ///< Name/ID for the LightSource to use
IntProperty p_sweepLineWidth;
IVec2Property p_icTextureSize;
FloatProperty p_shadowIntensity;
IntProperty p_numLines;
protected:
/// \see RaycastingProcessor::processImpl()
virtual void processImpl(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image);
void processDirectional(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image, const CameraData& camera, const LightSourceData& light);
void processPointLight(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image, const CameraData& camera, const LightSourceData& light);
static const std::string loggerCat_;
};
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment