2.12.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit 45fb9320 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

Started work on IpsviRaycaster processor implementing...

Started work on IpsviRaycaster processor implementing ImagePlaneSweepVolumeIllumination by Sundén et al.
- Sweep direction setup works for directional light.
- Computation of illumination cache plane works.
- Implemented simplified version of the IPSVI shader, seems to work to some extent.
parent b4b9cfbf
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2015, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
layout(location = 0) out vec4 out_Color; ///< outgoing fragment color
layout(location = 1) out vec4 out_FHP; ///< outgoing fragment first hitpoint
layout(location = 2) out vec4 out_FHN; ///< outgoing fragment first hit normal
#include "tools/gradient.frag"
#include "tools/raycasting.frag"
#include "tools/shading.frag"
#include "tools/texture2d.frag"
#include "tools/texture3d.frag"
#include "tools/transferfunction.frag"
uniform vec2 _viewportSizeRCP;
uniform float _jitterStepSizeMultiplier;
// ray entry points
uniform sampler2D _entryPoints;
uniform sampler2D _entryPointsDepth;
uniform TextureParameters2D _entryParams;
// ray exit points
uniform sampler2D _exitPoints;
uniform sampler2D _exitPointsDepth;
uniform TextureParameters2D _exitParams;
// DRR volume
uniform sampler3D _volume;
uniform TextureParameters3D _volumeTextureParams;
// Transfer function
uniform sampler1D _transferFunction;
uniform TFParameters1D _transferFunctionParams;
// Illumination cache
uniform layout(rgba8) image2D _icImageIn;
uniform layout(rgba8) image2D _icImageOut;
uniform vec3 _icOrigin;
uniform vec3 _icNormal;
uniform vec3 _icRightVector;
uniform vec3 _icUpVector;
uniform float _shadowIntensity = 0.5;
uniform LightSource _lightSource;
uniform vec3 _cameraPosition;
uniform float _samplingStepSize;
const float SAMPLING_BASE_INTERVAL_RCP = 200.0;
ivec2 calcIcSamplePosition(vec3 worldPosition) {
// project world position onto IC plane
const vec3 diag = worldPosition - _icOrigin;
const float distance = abs(dot(diag, _icNormal));
const vec3 projected = diag - (-distance * _icNormal);
return ivec2(dot(projected, _icRightVector), dot(projected, _icUpVector));
}
/**
* Performs the raycasting and returns the final fragment color.
*/
vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords) {
vec4 result = vec4(0.0);
float firstHitT = -1.0;
// calculate ray parameters
vec3 direction = exitPoint.rgb - entryPoint.rgb;
float t = 0.0;
float tend = length(direction);
direction = normalize(direction);
jitterEntryPoint(entryPoint, direction, _samplingStepSize * _jitterStepSizeMultiplier);
ivec2 icPositionPrev = calcIcSamplePosition(textureToWorld(_volumeTextureParams, vec4(entryPoint, 1.0)).xyz);
vec4 icOut = vec4(0.0);
while (t < tend) {
// compute sample position
vec3 samplePosition = entryPoint.rgb + t * direction;
vec4 worldPos = textureToWorld(_volumeTextureParams, vec4(samplePosition, 1.0));
ivec2 icPosition = calcIcSamplePosition(worldPos.xyz / worldPos.w);
vec4 icIn = imageLoad(_icImageIn, icPositionPrev);
// lookup intensity and TF
float intensity = texture(_volume, samplePosition).r;
vec4 color = lookupTF(_transferFunction, _transferFunctionParams, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume, _volumeTextureParams, samplePosition);
color.rgb = calculatePhongShading(worldPos.xyz / worldPos.w, _lightSource, _cameraPosition, gradient, color.rgb);
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
// perform global illumination
// back-to-front compositing from light-direction
icOut.rgb = ((1.0 - color.a) * icIn.rgb) + (color.a * color.rgb);
icOut.a = ((1.0 - color.a) * icIn.a) + color.a;
// apply shadowing
color.rgb *= (1.0 - icOut.a * _shadowIntensity);
// front-to-back compositing along view direction
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
// update illumination information
imageStore(_icImageOut, icPosition, icOut);
icPositionPrev = icPosition;
}
// save first hit ray parameter for depth value calculation
if (firstHitT < 0.0 && result.a > 0.0) {
firstHitT = t;
out_FHP = vec4(samplePosition, 1.0);
out_FHN = vec4(icPosition, 0.0, 0.0);// vec4(normalize(computeGradient(_volume, _volumeTextureParams, samplePosition)), 1.0);
}
// early ray termination (disabled!)
//if (result.a > 0.975) {
// result.a = 1.0;
// t = tend;
//}
// advance to the next evaluation point along the ray
t += _samplingStepSize;
}
// calculate depth value from ray parameter
gl_FragDepth = 1.0;
if (firstHitT >= 0.0) {
float depthEntry = texture(_entryPointsDepth, texCoords).z;
float depthExit = texture(_exitPointsDepth, texCoords).z;
gl_FragDepth = calculateDepthValue(firstHitT/tend, depthEntry, depthExit);
}
return result;
}
/***
* The main method.
***/
void main() {
vec2 p = gl_FragCoord.xy * _viewportSizeRCP;
vec3 frontPos = texture(_entryPoints, p).rgb;
vec3 backPos = texture(_exitPoints, p).rgb;
//determine whether the ray has to be casted
if (frontPos == backPos) {
//background need no raycasting
discard;
} else {
//fragCoords are lying inside the boundingbox
out_Color = performRaycasting(frontPos, backPos, p);
}
}
\ No newline at end of file
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2015, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
#include "ipsviraycaster.h"
#include "core/tools/quadrenderer.h"
#include "core/datastructures/cameradata.h"
#include "core/datastructures/lightsourcedata.h"
#include "core/datastructures/renderdata.h"
#include "core/datastructures/genericimagerepresentationlocal.h"
#include "core/pipeline/processordecoratorgradient.h"
#include <tbb/tbb.h>
namespace campvis {
const std::string IpsviRaycaster::loggerCat_ = "CAMPVis.modules.vis.IpsviRaycaster";
IpsviRaycaster::IpsviRaycaster(IVec2Property* viewportSizeProp)
: RaycastingProcessor(viewportSizeProp, "modules/vis/glsl/ipsviraycaster.frag", true, "450")
, p_lightId("LightId", "Input Light Source", "lightsource", DataNameProperty::READ)
, p_sweepLineWidth("SweepLineWidth", "Sweep Line Width", 2, 1, 8)
, p_shadowIntensity("ShadowIntensity", "Shadow Intensity", .5f, 0.f, 1.f)
{
addProperty(p_lightId);
addProperty(p_sweepLineWidth);
addProperty(p_shadowIntensity);
addDecorator(new ProcessorDecoratorGradient());
decoratePropertyCollection(this);
}
IpsviRaycaster::~IpsviRaycaster() {
}
void IpsviRaycaster::init() {
RaycastingProcessor::init();
}
void IpsviRaycaster::deinit() {
RaycastingProcessor::deinit();
}
void IpsviRaycaster::processImpl(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image) {
ScopedTypedData<CameraData> camera(data, p_camera.getValue());
ScopedTypedData<LightSourceData> light(data, p_lightId.getValue());
if (light != nullptr) {
cgt::vec3 lightDirection = -(light->getLightPosition());
// TODO: This should be a world to NDC space conversion, but it does not work... :(
cgt::mat4 tmp = camera->getCamera().getViewMatrix() * camera->getCamera().getProjectionMatrix();
cgt::vec4 projectedLightDirection = tmp * cgt::vec4(lightDirection, 1.f);
projectedLightDirection /= projectedLightDirection.w;
// compute sweep direction (in viewport space)
enum SweepDirection { LeftToRight, RightToLeft, TopToBottom, BottomToTop };
SweepDirection sweepDir;
if (std::abs(projectedLightDirection.x) > std::abs(projectedLightDirection.y)) {
// horizontal sweep
if (projectedLightDirection.x > 0)
sweepDir = RightToLeft;
else
sweepDir = LeftToRight;
}
else {
// vertical sweep
if (projectedLightDirection.y > 0)
sweepDir = TopToBottom;
else
sweepDir = BottomToTop;
}
LINFO(cgt::normalize(projectedLightDirection.xy()) << " => " << sweepDir);
// START: compute illumination cache (IC) plane/texture
// the plane is defined by the light direction
cgt::vec3 icNormal = cgt::normalize(lightDirection);
cgt::vec3 icUpVector = (std::abs(cgt::dot(icNormal, cgt::vec3(0.f, 0.f, 1.f))) < 0.01) ? cgt::vec3(0.f, 0.f, 1.f) : cgt::vec3(0.f, 1.f, 0.f);
cgt::vec3 icRightVector = cgt::normalize(cgt::cross(icNormal, icUpVector));
icUpVector = cgt::normalize(cgt::cross(icRightVector, icNormal));
// project all 8 corners of the volume onto the IC plane
cgt::Bounds worldBounds = image->getParent()->getWorldBounds();
cgt::vec3 minPixel(0.f), maxPixel(0.f);
std::vector<cgt::vec3> corners;
corners.push_back(cgt::vec3(worldBounds.getLLF().x, worldBounds.getLLF().y, worldBounds.getLLF().z));
corners.push_back(cgt::vec3(worldBounds.getLLF().x, worldBounds.getLLF().y, worldBounds.getURB().z));
corners.push_back(cgt::vec3(worldBounds.getLLF().x, worldBounds.getURB().y, worldBounds.getLLF().z));
corners.push_back(cgt::vec3(worldBounds.getLLF().x, worldBounds.getURB().y, worldBounds.getURB().z));
corners.push_back(cgt::vec3(worldBounds.getURB().x, worldBounds.getLLF().y, worldBounds.getLLF().z));
corners.push_back(cgt::vec3(worldBounds.getURB().x, worldBounds.getLLF().y, worldBounds.getURB().z));
corners.push_back(cgt::vec3(worldBounds.getURB().x, worldBounds.getURB().y, worldBounds.getLLF().z));
corners.push_back(cgt::vec3(worldBounds.getURB().x, worldBounds.getURB().y, worldBounds.getURB().z));
for (auto i = 0; i < corners.size(); ++i) {
const cgt::vec3 diag = corners[i];
const float distance = std::abs(cgt::dot(diag, icNormal));
const cgt::vec3 projected = diag - (-distance * icNormal);
const cgt::vec3 pixel(cgt::dot(projected, icRightVector), cgt::dot(projected, icUpVector), 0.f);
minPixel = cgt::min(minPixel, pixel);
maxPixel = cgt::max(maxPixel, pixel);
}
cgt::vec3 icOrigin = cgt::floor(minPixel).x * icRightVector + cgt::floor(minPixel).y * icUpVector;
cgt::ivec3 icSize(512, 512, 1);
icRightVector *= float(icSize.x - 1) / (std::ceil(maxPixel.x) - std::floor(minPixel.x)) ;
icUpVector *= float(icSize.y - 1) / (std::ceil(maxPixel.y) - std::floor(minPixel.y));
// just debuggin/asserting correctness
for (auto i = 0; i < corners.size(); ++i) {
const cgt::vec3 diag = corners[i] - icOrigin;
const float distance = std::abs(cgt::dot(diag, icNormal));
const cgt::vec3 projected = diag - (-distance * icNormal);
const cgt::vec3 pixel(cgt::dot(projected, icRightVector), cgt::dot(projected, icUpVector), 0.f);
if (pixel.x < 0.f || pixel.y < 0.f || pixel.x >= icSize.x || pixel.y >= icSize.y)
LWARNING(pixel);
}
cgt::col4* zeroInit = new cgt::col4[cgt::hmul(icSize)];
memset(zeroInit, 0, sizeof(cgt::col4) * cgt::hmul(icSize));
cgt::TextureUnit icUnit1, icUnit2;
cgt::Texture* icTextures[2];
icUnit1.activate();
icTextures[0] = new cgt::Texture(GL_TEXTURE_2D, icSize, GL_RGBA8, zeroInit->elem, GL_RGBA, GL_UNSIGNED_BYTE);
icUnit2.activate();
icTextures[1] = new cgt::Texture(GL_TEXTURE_2D, icSize, GL_RGBA8, zeroInit->elem, GL_RGBA, GL_UNSIGNED_BYTE);
glBindImageTexture(0, icTextures[0]->getId(), 0, false, 0, GL_READ_WRITE, GL_RGBA8);
glBindImageTexture(1, icTextures[1]->getId(), 0, false, 0, GL_READ_WRITE, GL_RGBA8);
delete [] zeroInit;
_shader->setUniform("_icOrigin", icOrigin);
_shader->setUniform("_icNormal", icNormal);
_shader->setUniform("_icRightVector", icRightVector);
_shader->setUniform("_icUpVector", icUpVector);
_shader->setUniform("_shadowIntensity", p_shadowIntensity.getValue());
FramebufferActivationGuard fag(this);
createAndAttachTexture(GL_RGBA8);
createAndAttachTexture(GL_RGBA32F);
createAndAttachTexture(GL_RGBA32F);
createAndAttachDepthTexture();
static const GLenum buffers[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 , GL_COLOR_ATTACHMENT2 };
glDrawBuffers(3, buffers);
light->bind(_shader, "_lightSource");
glEnable(GL_DEPTH_TEST);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
auto viewportSize = getEffectiveViewportSize();
cgt::mat4 projection;
cgt::mat4 viewScale;
cgt::vec3 viewTranslationBase;
int lineMax;
switch (sweepDir) {
case LeftToRight:
projection = cgt::mat4::createOrtho(0, viewportSize.x, 0, 1, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(p_sweepLineWidth.getValue(), 1.f, 1.f));
viewTranslationBase = cgt::vec3(1.f, 0.f, 0.f);
lineMax = viewportSize.x;
break;
case RightToLeft:
projection = cgt::mat4::createOrtho(viewportSize.x, 0, 0, 1, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(p_sweepLineWidth.getValue(), 1.f, 1.f));
viewTranslationBase = cgt::vec3(1.f, 0.f, 0.f);
lineMax = viewportSize.x;
break;
case BottomToTop:
projection = cgt::mat4::createOrtho(0, 1, viewportSize.y, 0, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(1.f, p_sweepLineWidth.getValue(), 1.f));
viewTranslationBase = cgt::vec3(0.f, 1.f, 0.f);
lineMax = viewportSize.y;
break;
case TopToBottom:
projection = cgt::mat4::createOrtho(0, 1, 0, viewportSize.y, -1, 1);
viewScale = cgt::mat4::createScale(cgt::vec3(1.f, p_sweepLineWidth.getValue(), 1.f));
viewTranslationBase = cgt::vec3(0.f, 1.f, 0.f);
lineMax = viewportSize.y;
break;
}
_shader->setUniform("_projectionMatrix", projection);
int line = 1 - p_sweepLineWidth.getValue();
int evenOdd = 0;
while (line < lineMax) {
if (evenOdd % 2 == 0) {
_shader->setUniform("_icImageIn", 0);
_shader->setUniform("_icImageOut", 1);
}
else {
_shader->setUniform("_icImageIn", 1);
_shader->setUniform("_icImageOut", 0);
}
_shader->setUniform("_viewMatrix", cgt::mat4::createTranslation(viewTranslationBase * float(line)) * viewScale);
QuadRdr.renderQuad01();
line += p_sweepLineWidth.getValue();
++evenOdd;
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
}
// restore state
glDrawBuffers(1, buffers);
glDisable(GL_DEPTH_TEST);
glBindImageTexture(0, 0, 0, false, 0, GL_READ_ONLY, GL_RGBA32F);
glBindImageTexture(1, 0, 0, false, 0, GL_READ_ONLY, GL_RGBA32F);
LGL_ERROR;
data.addData(p_targetImageID.getValue(), new RenderData(_fbo));
RenderData* ic = new RenderData();
ImageData* id1 = new ImageData(2, icSize, 4);
ImageRepresentationGL::create(id1, icTextures[0]);
ic->addColorTexture(id1);
ImageData* id2 = new ImageData(2, icSize, 4);
ImageRepresentationGL::create(id2, icTextures[1]);
ic->addColorTexture(id2);
data.addData(p_targetImageID.getValue() + ".IC", ic);
}
else {
LDEBUG("Could not load light source from DataContainer.");
}
}
}
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2015, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
#ifndef IPSVIRAYCASTER_H__
#define IPSVIRAYCASTER_H__
#include "core/pipeline/raycastingprocessor.h"
#include "core/properties/floatingpointproperty.h"
#include "core/properties/genericproperty.h"
#include "core/properties/transferfunctionproperty.h"
#include "modules/modulesapi.h"
#include <string>
namespace cgt {
class Shader;
}
namespace campvis {
/**
* Performs a simple volume ray casting.
*/
class CAMPVIS_MODULES_API IpsviRaycaster : public RaycastingProcessor {
public:
/**
* Constructs a new IpsviRaycaster Processor
**/
explicit IpsviRaycaster(IVec2Property* viewportSizeProp);
/**
* Destructor
**/
virtual ~IpsviRaycaster();
/**
* To be used in ProcessorFactory static methods
*/
static const std::string getId() { return "IpsviRaycaster"; };
/// \see AbstractProcessor::getName()
virtual const std::string getName() const { return getId(); };
/// \see AbstractProcessor::getDescription()
virtual const std::string getDescription() const { return "Performs a simple volume ray casting."; };
/// \see AbstractProcessor::getAuthor()
virtual const std::string getAuthor() const { return "Christian Schulte zu Berge <christian.szb@in.tum.de>"; };
/// \see AbstractProcessor::getProcessorState()
virtual ProcessorState getProcessorState() const { return AbstractProcessor::TESTING; };
/// \see AbstractProcessor::init
virtual void init();
/// \see AbstractProcessor::deinit
virtual void deinit();
DataNameProperty p_lightId; ///< Name/ID for the LightSource to use
IntProperty p_sweepLineWidth;
FloatProperty p_shadowIntensity;
protected:
/// \see RaycastingProcessor::processImpl()
virtual void processImpl(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image);
static const std::string loggerCat_;
};
}
#endif // IPSVIRAYCASTER_H__
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "modules/vis/processors/drrraycaster.h" #include "modules/vis/processors/drrraycaster.h"
#include "modules/vis/processors/eepgenerator.h" #include "modules/vis/processors/eepgenerator.h"
#include "modules/vis/processors/geometryrenderer.h" #include "modules/vis/processors/geometryrenderer.h"
#include "modules/vis/processors/ipsviraycaster.h"
#include "modules/vis/processors/mprrenderer.h" #include "modules/vis/processors/mprrenderer.h"
#include "modules/vis/processors/orientationoverlay.h" #include "modules/vis/processors/orientationoverlay.h"
#include "modules/vis/processors/proxygeometrygenerator.h" #include "modules/vis/processors/proxygeometrygenerator.h"
...@@ -71,6 +72,7 @@ namespace campvis { ...@@ -71,6 +72,7 @@ namespace campvis {
template class SmartProcessorRegistrar<DRRRaycaster>; template class SmartProcessorRegistrar<DRRRaycaster>;
template class SmartProcessorRegistrar<EEPGenerator>; template class SmartProcessorRegistrar<EEPGenerator>;
template class SmartProcessorRegistrar<GeometryRenderer>; template class SmartProcessorRegistrar<GeometryRenderer>;
template class SmartProcessorRegistrar<IpsviRaycaster>;
template class SmartProcessorRegistrar<MprRenderer>; template class SmartProcessorRegistrar<MprRenderer>;
template class SmartProcessorRegistrar<OrientationOverlay>; template class SmartProcessorRegistrar<OrientationOverlay>;
template class SmartProcessorRegistrar<ProxyGeometryGenerator>; template class SmartProcessorRegistrar<ProxyGeometryGenerator>;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment