The expiration time for new job artifacts in CI/CD pipelines is now 30 days (GitLab default). Previously generated artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit 30c89024 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

Further work on MultiVolumeRaycaster:

* Included VoxelHierarchyMapper for empty space skipping
* Now can successfully render three volumes at the same time.
parent 0a73c082
......@@ -32,6 +32,11 @@ void jitterEntryPoint(inout vec3 position, in vec3 direction, in float stepSize)
position = position + direction * (stepSize * random);
}
void jitterFloat(inout float t, in float stepSize) {
float random = fract(sin(gl_FragCoord.x * 12.9898 + gl_FragCoord.y * 78.233) * 43758.5453);
t += (stepSize * random);
}
/**
* Computes the intersection of the given ray with the given axis-aligned box.
* \param rayOrigin Origin of ray
......@@ -60,6 +65,18 @@ float rayBoxIntersection(in vec3 rayOrigin, in vec3 rayDirection, in vec3 boxLlf
return min(min(tMin.x, min(tMin.y, tMin.z)) , min(tMax.x, min(tMax.y, tMax.z)));
}
// compute the near and far intersections of the cube (stored in the x and y components) using the slab method
// no intersection means vec.x > vec.y (really tNear > tFar)
vec2 intersectAABB(vec3 rayOrigin, vec3 rayDirection, in vec3 boxLlf, in vec3 boxUrb) {
vec3 tMin = (boxLlf - rayOrigin) / rayDirection;
vec3 tMax = (boxUrb - rayOrigin) / rayDirection;
vec3 t1 = min(tMin, tMax);
vec3 t2 = max(tMin, tMax);
float tNear = max(max(t1.x, t1.y), t1.z);
float tFar = min(min(t2.x, t2.y), t2.z);
return vec2(tNear, tFar);
};
/**
* Converts a depth value in eye space to the corresponding depth value in viewport space.
* \param depth Depth value in eye space.
......
......@@ -33,6 +33,8 @@ layout(location = 2) out vec4 out_FHN; ///< outgoing fragment first hit no
#include "tools/texture3d.frag"
#include "tools/transferfunction.frag"
#include "modules/vis/glsl/voxelhierarchy.frag"
uniform vec2 _viewportSizeRCP;
uniform float _jitterStepSizeMultiplier;
......@@ -63,12 +65,62 @@ uniform TFParameters1D _transferFunctionParams1;
uniform TFParameters1D _transferFunctionParams2;
uniform TFParameters1D _transferFunctionParams3;
// Voxel Hierarchy Lookup volumes
uniform usampler2D _voxelHierarchy1;
uniform usampler2D _voxelHierarchy2;
uniform usampler2D _voxelHierarchy3;
uniform int _vhMaxMipMapLevel1;
uniform int _vhMaxMipMapLevel2;
uniform int _vhMaxMipMapLevel3;
uniform LightSource _lightSource;
uniform vec3 _cameraPosition;
uniform float _samplingStepSize;
const float SAMPLING_BASE_INTERVAL_RCP = 200.0;
vec2 clipVolume(usampler2D vhTexture, int vhMaxMipmapLevel, TextureParameters3D volumeParams, in vec3 entryPoint, in vec3 exitPoint) {
vec3 startPosTex = worldToTexture(volumeParams, entryPoint).xyz;
vec3 endPosTex = worldToTexture(volumeParams, exitPoint).xyz;
vec3 directionTex = endPosTex - startPosTex;
float tNear = clipFirstHitpoint(vhTexture, vhMaxMipmapLevel, startPosTex, directionTex, 0.0, 1.0);
float tFar = 1.0 - clipFirstHitpoint(vhTexture, vhMaxMipmapLevel, endPosTex, -directionTex, 0.0, 1.0);
return vec2(tNear, tFar);
}
#define RAYCASTING_STEP(worldPosition, CLIP, volume, volumeParams, tf, tfParams, result, firstHitT, tNear) \
{ \
if (tNear >= CLIP.x && tNear <= CLIP.y) { \
vec3 samplePosition = worldToTexture(volumeParams, worldPosition).xyz; \
if (all(greaterThanEqual(samplePosition, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition, vec3(1.0, 1.0, 1.0)))) { \
// lookup intensity and TF \
float intensity = texture(volume, samplePosition).r; \
vec4 color = lookupTF(tf, tfParams, intensity); \
\
// perform compositing \
if (color.a > 0.0) { \
// compute gradient (needed for shading and normals) \
vec3 gradient = computeGradient(volume, volumeParams, samplePosition); \
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb); \
\
// accomodate for variable sampling rates \
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP); \
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a); \
result.a = result.a + (1.0 -result.a) * color.a; \
\
// save first hit ray parameter for depth value calculation \
if (firstHitT < 0.0 && result.a > 0.01) { \
firstHitT = tNear; \
out_FHP = vec4(worldPosition, 1.0); \
out_FHN = vec4(gradient, 1.0); \
} \
} \
} \
} \
}
/**
* Performs the raycasting and returns the final fragment color.
*/
......@@ -78,104 +130,47 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
// calculate ray parameters
vec3 direction = exitPoint.rgb - entryPoint.rgb;
float t = 0.0;
float tend = length(direction);
direction = normalize(direction);
jitterEntryPoint(entryPoint, direction, _samplingStepSize * _jitterStepSizeMultiplier);
OFFSET = (0.25 / (1 << _vhMaxMipMapLevel1)); //< offset value used to avoid self-intersection or previous voxel intersection.
while (t < tend) {
// compute sample position
vec3 worldPosition = entryPoint.rgb + t * direction;
vec2 clip1 = clipVolume(_voxelHierarchy1, _vhMaxMipMapLevel1, _volumeParams1, entryPoint, exitPoint);
vec2 clip2 = clipVolume(_voxelHierarchy2, _vhMaxMipMapLevel2, _volumeParams2, entryPoint, exitPoint);
vec2 clip3 = clipVolume(_voxelHierarchy3, _vhMaxMipMapLevel3, _volumeParams3, entryPoint, exitPoint);
float tNear = min(clip1.x, min(clip2.x, clip3.x));
float tFar = max(clip1.y, max(clip2.y, clip3.y));
jitterFloat(tNear, -_samplingStepSize * _jitterStepSizeMultiplier);
while (tNear < tFar) {
// compute sample position
vec3 worldPosition = entryPoint.rgb + tNear * direction;
// FIRST volume
vec3 samplePosition1 = worldToTexture(_volumeParams1, worldPosition).xyz;
if (all(greaterThanEqual(samplePosition1, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition1, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume1, samplePosition1).r;
vec4 color = lookupTF(_transferFunction1, _transferFunctionParams1, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume1, _volumeParams1, samplePosition1);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
RAYCASTING_STEP(worldPosition, clip1, _volume1, _volumeParams1, _transferFunction1, _transferFunctionParams1, result, firstHitT, tNear);
// SECOND volume
vec3 samplePosition2 = worldToTexture(_volumeParams2, worldPosition).xyz;
if (all(greaterThanEqual(samplePosition2, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition2, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume2, samplePosition2).r;
vec4 color = lookupTF(_transferFunction2, _transferFunctionParams2, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume2, _volumeParams2, samplePosition2);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
RAYCASTING_STEP(worldPosition, clip2, _volume2, _volumeParams2, _transferFunction2, _transferFunctionParams2, result, firstHitT, tNear);
// THIRD volume
vec3 samplePosition3 = worldToTexture(_volumeParams3, worldPosition).xyz;
if (all(greaterThanEqual(samplePosition3, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition3, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume3, samplePosition3).r;
vec4 color = lookupTF(_transferFunction3, _transferFunctionParams3, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume3, _volumeParams3, samplePosition3);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
// save first hit ray parameter for depth value calculation
if (firstHitT < 0.0 && result.a > 0.0) {
firstHitT = t;
out_FHP = vec4(worldPosition, 1.0);
out_FHN = vec4(normalize(computeGradient(_volume1, _volumeParams1, worldPosition)), 1.0);
}
RAYCASTING_STEP(worldPosition, clip3, _volume3, _volumeParams3, _transferFunction3, _transferFunctionParams3, result, firstHitT, tNear);
// early ray termination
if (result.a > 0.975) {
result.a = 1.0;
t = tend;
tNear = tFar;
}
// advance to the next evaluation point along the ray
t += _samplingStepSize;
tNear += _samplingStepSize;
}
// calculate depth value from ray parameter
gl_FragDepth = 1.0;
gl_FragDepth = 0.98765;
if (firstHitT >= 0.0) {
float depthEntry = texture(_entryPointsDepth, texCoords).z;
float depthExit = texture(_exitPointsDepth, texCoords).z;
gl_FragDepth = calculateDepthValue(firstHitT/tend, depthEntry, depthExit);
gl_FragDepth = calculateDepthValue(firstHitT, depthEntry, depthExit);
}
return result;
}
......
......@@ -62,51 +62,40 @@ namespace campvis {
_tcp.p_image.setValue("ImageGroup");
_renderTargetID.setValue("result");
_ctReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_CT_CoregT1.am"));
_ctReader.p_targetImageID.setValue("ct.image");
_ctReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_t1Reader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_T1_bet04.GB306.am"));
_t1Reader.p_targetImageID.setValue("t1_tf.image");
_t1Reader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage1);
_t1Reader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_ctReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_CT_CoregT1.am"));
_ctReader.p_targetImageID.setValue("ct.image");
_ctReader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage2);
_ctReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_petReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_PET-CoregNMI_fl.am"));
_petReader.p_targetImageID.setValue("pet.image");
_petReader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage3);
_petReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
Geometry1DTransferFunction* ct_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
ct_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction1.replaceTF(ct_tf);
Geometry1DTransferFunction* t1_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.12f, .15f), cgt::col4(85, 0, 0, 128), cgt::col4(255, 0, 0, 128)));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.19f, .28f), cgt::col4(89, 89, 89, 155), cgt::col4(89, 89, 89, 155)));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.41f, .51f), cgt::col4(170, 170, 128, 64), cgt::col4(192, 192, 128, 64)));
_mvr.p_transferFunction2.replaceTF(t1_tf);
_mvr.p_transferFunction1.replaceTF(t1_tf);
Geometry1DTransferFunction* ct_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
ct_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction2.replaceTF(ct_tf);
Geometry1DTransferFunction* pet_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
pet_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction3.replaceTF(pet_tf);
_mvr.p_sourceImagesId.setValue("ImageGroup");
_mvr.p_outputImageId.setValue("result");
_mvr.p_samplingRate.setValue(1.f);
}
void NeuroDemo::onReaderValidated(AbstractProcessor* p) {
ScopedTypedData<ImageData> ctImage(getDataContainer(), _ctReader.p_targetImageID.getValue());
ScopedTypedData<ImageData> t1Image(getDataContainer(), _t1Reader.p_targetImageID.getValue());
ScopedTypedData<ImageData> petImage(getDataContainer(), _petReader.p_targetImageID.getValue());
ImageSeries* is = new ImageSeries();
if (ctImage)
is->addImage(ctImage.getDataHandle());
if (t1Image)
is->addImage(t1Image.getDataHandle());
if (petImage)
is->addImage(petImage.getDataHandle());
getDataContainer().addData("ImageGroup", is);
}
}
\ No newline at end of file
......@@ -48,7 +48,9 @@ namespace neuro {
MultiVolumeRaycaster::MultiVolumeRaycaster(IVec2Property* viewportSizeProp)
: VisualizationProcessor(viewportSizeProp)
, p_sourceImagesId("SourceImagesId", "Input Image(s)", "", DataNameProperty::READ)
, p_sourceImage1("SourceImage1", "First Input Image", "", DataNameProperty::READ)
, p_sourceImage2("SourceImage2", "Second Input Image", "", DataNameProperty::READ)
, p_sourceImage3("SourceImage3", "Third Input Image", "", DataNameProperty::READ)
, p_geometryImageId("GeometryImageId", "Rendered Geometry to Integrate (optional)", "", DataNameProperty::READ)
, p_camera("Camera", "Camera ID", "camera", DataNameProperty::READ)
, p_lightId("LightId", "Input Light Source", "lightsource", DataNameProperty::READ)
......@@ -59,18 +61,21 @@ namespace neuro {
, p_jitterStepSizeMultiplier("jitterStepSizeMultiplier", "Jitter Step Size Multiplier", 1.f, 0.f, 1.f)
, p_samplingRate("SamplingRate", "Sampling Rate", 2.f, 0.1f, 10.f, 0.1f)
, _eepShader(nullptr)
, _rcShader(nullptr)
{
addDecorator(new ProcessorDecoratorGradient());
addProperty(p_sourceImagesId, INVALID_PROPERTIES | INVALID_RESULT);
addProperty(p_sourceImage1, INVALID_PROPERTIES | INVALID_RESULT | INVALID_VOXEL_HIERARCHY1);
addProperty(p_sourceImage2, INVALID_PROPERTIES | INVALID_RESULT | INVALID_VOXEL_HIERARCHY2);
addProperty(p_sourceImage3, INVALID_PROPERTIES | INVALID_RESULT | INVALID_VOXEL_HIERARCHY3);
addProperty(p_geometryImageId);
addProperty(p_camera);
addProperty(p_lightId);
addProperty(p_outputImageId);
addProperty(p_transferFunction1);
addProperty(p_transferFunction2);
addProperty(p_transferFunction3);
addProperty(p_transferFunction1, INVALID_RESULT | INVALID_VOXEL_HIERARCHY1);
addProperty(p_transferFunction2, INVALID_RESULT | INVALID_VOXEL_HIERARCHY2);
addProperty(p_transferFunction3, INVALID_RESULT | INVALID_VOXEL_HIERARCHY3);
addProperty(p_jitterStepSizeMultiplier);
addProperty(p_samplingRate);
......@@ -95,30 +100,59 @@ namespace neuro {
_rcShader->setAttributeLocation(0, "in_Position");
_rcShader->setAttributeLocation(1, "in_TexCoord");
}
_vhm1 = new VoxelHierarchyMapper();
_vhm2 = new VoxelHierarchyMapper();
_vhm3 = new VoxelHierarchyMapper();
}
void MultiVolumeRaycaster::deinit() {
ShdrMgr.dispose(_eepShader);
_eepShader = 0;
_eepShader = nullptr;
ShdrMgr.dispose(_rcShader);
_rcShader = nullptr;
delete _vhm1;
delete _vhm2;
delete _vhm3;
VisualizationProcessor::deinit();
}
void MultiVolumeRaycaster::updateResult(DataContainer& dataContainer) {
ImageRepresentationGL::ScopedRepresentation singleImage(dataContainer, p_sourceImagesId.getValue(), true);
ScopedTypedData<ImageSeries> imageSeries(dataContainer, p_sourceImagesId.getValue(), true);
ImageRepresentationGL::ScopedRepresentation image1(dataContainer, p_sourceImage1.getValue());
ImageRepresentationGL::ScopedRepresentation image2(dataContainer, p_sourceImage2.getValue());
ImageRepresentationGL::ScopedRepresentation image3(dataContainer, p_sourceImage3.getValue());
ScopedTypedData<CameraData> camera(dataContainer, p_camera.getValue());
ScopedTypedData<RenderData> geometryImage(dataContainer, p_geometryImageId.getValue(), true);
ScopedTypedData<LightSourceData> light(dataContainer, p_lightId.getValue());
std::vector<const ImageRepresentationGL*> images;
if (singleImage != nullptr)
images.push_back(singleImage);
else if (imageSeries != nullptr) {
for (size_t i = 0; i < imageSeries->getNumImages(); ++i) {
images.push_back(static_cast<const ImageData*>(imageSeries->getImage(i).getData())->getRepresentation<ImageRepresentationGL>());
cgtAssert(images.back() != nullptr, "We have a nullptr in our image list, this is WRONG! Did a conversion fail?");
if (image1) {
images.push_back(image1);
if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY1){
_vhm1->createHierarchy(image1, p_transferFunction1.getTF());
validate(INVALID_VOXEL_HIERARCHY1);
}
}
if (image2) {
images.push_back(image2);
if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY2){
_vhm2->createHierarchy(image2, p_transferFunction2.getTF());
validate(INVALID_VOXEL_HIERARCHY2);
}
}
if (image3) {
images.push_back(image3);
if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY3){
_vhm3->createHierarchy(image3, p_transferFunction3.getTF());
validate(INVALID_VOXEL_HIERARCHY3);
}
}
if (images.size() >= 3 && camera != nullptr) {
auto eepp = computeEntryExitPoints(images, camera, geometryImage);
......@@ -138,22 +172,24 @@ namespace neuro {
}
void MultiVolumeRaycaster::updateProperties(DataContainer& dataContainer) {
ScopedTypedData<ImageData> img(dataContainer, p_sourceImagesId.getValue(), true);
ScopedTypedData<ImageSeries> is(dataContainer, p_sourceImagesId.getValue(), true);
ImageRepresentationGL::ScopedRepresentation image1(dataContainer, p_sourceImage1.getValue());
ImageRepresentationGL::ScopedRepresentation image2(dataContainer, p_sourceImage2.getValue());
ImageRepresentationGL::ScopedRepresentation image3(dataContainer, p_sourceImage3.getValue());
if (img != nullptr) {
p_transferFunction1.setImageHandle(img.getDataHandle());
}
else if (is != nullptr && is->getNumImages() == 3) {
p_transferFunction1.setImageHandle(is->getImage(0));
p_transferFunction2.setImageHandle(is->getImage(1));
p_transferFunction3.setImageHandle(is->getImage(2));
}
else {
if (image1)
p_transferFunction1.setImageHandle(image1.getDataHandle());
else
p_transferFunction1.setImageHandle(DataHandle(nullptr));
if (image2)
p_transferFunction2.setImageHandle(image2.getDataHandle());
else
p_transferFunction2.setImageHandle(DataHandle(nullptr));
if (image3)
p_transferFunction3.setImageHandle(image3.getDataHandle());
else
p_transferFunction3.setImageHandle(DataHandle(nullptr));
}
}
void MultiVolumeRaycaster::updateShader() {
......@@ -255,17 +291,6 @@ namespace neuro {
RenderData* MultiVolumeRaycaster::performRaycasting(DataContainer& dataContainer, const std::vector<const ImageRepresentationGL*>& images, const CameraData* camera, const RenderData* entrypoints, const RenderData* exitpoints, const LightSourceData* light) {
cgtAssert(_rcShader != nullptr, "EEP Shader must not be 0.");
// little hack to support LOD texture lookup for the gradients:
// if texture does not yet have mipmaps, create them.
const cgt::Texture* tex = images.front()->getTexture();
if (tex->getFilter() != cgt::Texture::MIPMAP) {
const_cast<cgt::Texture*>(tex)->setFilter(cgt::Texture::MIPMAP);
glGenerateMipmap(GL_TEXTURE_3D);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
LGL_ERROR;
}
_rcShader->activate();
decorateRenderProlog(dataContainer, _rcShader);
......@@ -273,7 +298,9 @@ namespace neuro {
_rcShader->setUniform("_jitterStepSizeMultiplier", p_jitterStepSizeMultiplier.getValue());
// compute sampling step size relative to volume size
float samplingStepSize = .1f / p_samplingRate.getValue();
float samplingStepSize = .001f / p_samplingRate.getValue();
if (p_lqMode.getValue())
samplingStepSize /= 4.f;
_rcShader->setUniform("_samplingStepSize", samplingStepSize);
// compute and set camera parameters
......@@ -287,10 +314,10 @@ namespace neuro {
_rcShader->setUniform("const_to_z_w_2", 0.5f*((f+n)/(f-n))+0.5f);
// bind input textures
cgt::TextureUnit volumeUnit, entryUnit, entryUnitDepth, exitUnit, exitUnitDepth, tf1Unit, tf2Unit, tf3Unit;
images[0]->bind(_rcShader, volumeUnit, "_volume1", "_volumeParams1");
images[1]->bind(_rcShader, volumeUnit, "_volume2", "_volumeParams2");
images[2]->bind(_rcShader, volumeUnit, "_volume3", "_volumeParams3");
cgt::TextureUnit volumeUnit1, volumeUnit2, volumeUnit3, entryUnit, entryUnitDepth, exitUnit, exitUnitDepth, tf1Unit, tf2Unit, tf3Unit;
images[0]->bind(_rcShader, volumeUnit1, "_volume1", "_volumeParams1");
images[1]->bind(_rcShader, volumeUnit2, "_volume2", "_volumeParams2");
images[2]->bind(_rcShader, volumeUnit3, "_volume3", "_volumeParams3");
p_transferFunction1.getTF()->bind(_rcShader, tf1Unit, "_transferFunction1", "_transferFunctionParams1");
p_transferFunction2.getTF()->bind(_rcShader, tf2Unit, "_transferFunction2", "_transferFunctionParams2");
p_transferFunction3.getTF()->bind(_rcShader, tf3Unit, "_transferFunction3", "_transferFunctionParams3");
......@@ -298,6 +325,31 @@ namespace neuro {
exitpoints->bind(_rcShader, exitUnit, exitUnitDepth, "_exitPoints", "_exitPointsDepth", "_exitParams");
light->bind(_rcShader, "_lightSource");
// bind voxel hierarchies
cgt::TextureUnit xorUnit, vhUnit1, vhUnit2, vhUnit3;
xorUnit.activate();
_vhm1->getXorBitmaskTexture()->bind();
_rcShader->setUniform("_xorBitmask", xorUnit.getUnitNumber());
vhUnit1.activate();
_vhm1->getHierarchyTexture()->bind();
_rcShader->setUniform("_voxelHierarchy1", vhUnit1.getUnitNumber());
_rcShader->setUniform("_vhMaxMipMapLevel1", static_cast<int>(_vhm1->getMaxMipmapLevel()));
if (_vhm2) {
vhUnit2.activate();
_vhm2->getHierarchyTexture()->bind();
_rcShader->setUniform("_voxelHierarchy2", vhUnit2.getUnitNumber());
_rcShader->setUniform("_vhMaxMipMapLevel2", static_cast<int>(_vhm2->getMaxMipmapLevel()));
}
if (_vhm3) {
vhUnit3.activate();
_vhm3->getHierarchyTexture()->bind();
_rcShader->setUniform("_voxelHierarchy3", vhUnit3.getUnitNumber());
_rcShader->setUniform("_vhMaxMipMapLevel3", static_cast<int>(_vhm3->getMaxMipmapLevel()));
}
FramebufferActivationGuard fag(this);
createAndAttachTexture(GL_RGBA8);
createAndAttachTexture(GL_RGBA32F);
......
......@@ -33,6 +33,7 @@
#include "core/properties/allproperties.h"
#include "modules/modulesapi.h"
#include "modules/vis/tools/voxelhierarchymapper.h"
namespace cgt {
class Shader;
......@@ -52,6 +53,12 @@ namespace neuro {
*/
class CAMPVIS_MODULES_API MultiVolumeRaycaster : public VisualizationProcessor, public HasProcessorDecorators {
public:
enum AdditionalInvalidationLevels {
INVALID_VOXEL_HIERARCHY1 = AbstractProcessor::FIRST_FREE_TO_USE_INVALIDATION_LEVEL,
INVALID_VOXEL_HIERARCHY2 = AbstractProcessor::FIRST_FREE_TO_USE_INVALIDATION_LEVEL << 1,
INVALID_VOXEL_HIERARCHY3 = AbstractProcessor::FIRST_FREE_TO_USE_INVALIDATION_LEVEL << 2
};
/**
* Constructs a new MultiVolumeRaycaster Processor
**/
......@@ -77,7 +84,9 @@ namespace neuro {
/// \see AbstractProcessor::getProcessorState()
virtual ProcessorState getProcessorState() const { return AbstractProcessor::TESTING; };
DataNameProperty p_sourceImagesId; ///< ID for input images (either single image or image series)
DataNameProperty p_sourceImage1; ///< ID for first input images
DataNameProperty p_sourceImage2; ///< ID for second input images
DataNameProperty p_sourceImage3; ///< ID for third input images
DataNameProperty p_geometryImageId; ///< image ID for the optional rendered geometry to integrate into the EEP
DataNameProperty p_camera; ///< input camra
DataNameProperty p_lightId; ///< input light source
......@@ -120,6 +129,12 @@ namespace neuro {
const RenderData* exitpoints,
const LightSourceData* light);
VoxelHierarchyMapper* _vhm1;
VoxelHierarchyMapper* _vhm2;
VoxelHierarchyMapper* _vhm3;
static const std::string loggerCat_;
};
......
......@@ -57,6 +57,10 @@ uniform TextureParameters3D _volumeTextureParams;
uniform sampler1D _transferFunction;
uniform TFParameters1D _transferFunctionParams;
// Voxel Hierarchy Lookup volume
uniform usampler2D _voxelHierarchy;
uniform int _vhMaxMipMapLevel;
uniform LightSource _lightSource;
uniform vec3 _cameraPosition;
......@@ -81,8 +85,8 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
float firstHitT = -1.0f;
float tNear = clipFirstHitpoint(entryPoint, direction, 0.0, 1.0);
float tFar = 1.0 - clipFirstHitpoint(exitPoint, -direction, 0.0, 1.0);
float tNear = clipFirstHitpoint(_voxelHierarchy, _vhMaxMipMapLevel, entryPoint, direction, 0.0, 1.0);
float tFar = 1.0 - clipFirstHitpoint(_voxelHierarchy, _vhMaxMipMapLevel, exitPoint, -direction, 0.0, 1.0);
// compute sample position
vec3 samplePosition = entryPoint.rgb + tNear * direction;