Commit 30c89024 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

Further work on MultiVolumeRaycaster:

* Included VoxelHierarchyMapper for empty space skipping
* Now can successfully render three volumes at the same time.
parent 0a73c082
...@@ -32,6 +32,11 @@ void jitterEntryPoint(inout vec3 position, in vec3 direction, in float stepSize) ...@@ -32,6 +32,11 @@ void jitterEntryPoint(inout vec3 position, in vec3 direction, in float stepSize)
position = position + direction * (stepSize * random); position = position + direction * (stepSize * random);
} }
void jitterFloat(inout float t, in float stepSize) {
float random = fract(sin(gl_FragCoord.x * 12.9898 + gl_FragCoord.y * 78.233) * 43758.5453);
t += (stepSize * random);
}
/** /**
* Computes the intersection of the given ray with the given axis-aligned box. * Computes the intersection of the given ray with the given axis-aligned box.
* \param rayOrigin Origin of ray * \param rayOrigin Origin of ray
...@@ -60,6 +65,18 @@ float rayBoxIntersection(in vec3 rayOrigin, in vec3 rayDirection, in vec3 boxLlf ...@@ -60,6 +65,18 @@ float rayBoxIntersection(in vec3 rayOrigin, in vec3 rayDirection, in vec3 boxLlf
return min(min(tMin.x, min(tMin.y, tMin.z)) , min(tMax.x, min(tMax.y, tMax.z))); return min(min(tMin.x, min(tMin.y, tMin.z)) , min(tMax.x, min(tMax.y, tMax.z)));
} }
// compute the near and far intersections of the cube (stored in the x and y components) using the slab method
// no intersection means vec.x > vec.y (really tNear > tFar)
vec2 intersectAABB(vec3 rayOrigin, vec3 rayDirection, in vec3 boxLlf, in vec3 boxUrb) {
vec3 tMin = (boxLlf - rayOrigin) / rayDirection;
vec3 tMax = (boxUrb - rayOrigin) / rayDirection;
vec3 t1 = min(tMin, tMax);
vec3 t2 = max(tMin, tMax);
float tNear = max(max(t1.x, t1.y), t1.z);
float tFar = min(min(t2.x, t2.y), t2.z);
return vec2(tNear, tFar);
};
/** /**
* Converts a depth value in eye space to the corresponding depth value in viewport space. * Converts a depth value in eye space to the corresponding depth value in viewport space.
* \param depth Depth value in eye space. * \param depth Depth value in eye space.
......
...@@ -33,6 +33,8 @@ layout(location = 2) out vec4 out_FHN; ///< outgoing fragment first hit no ...@@ -33,6 +33,8 @@ layout(location = 2) out vec4 out_FHN; ///< outgoing fragment first hit no
#include "tools/texture3d.frag" #include "tools/texture3d.frag"
#include "tools/transferfunction.frag" #include "tools/transferfunction.frag"
#include "modules/vis/glsl/voxelhierarchy.frag"
uniform vec2 _viewportSizeRCP; uniform vec2 _viewportSizeRCP;
uniform float _jitterStepSizeMultiplier; uniform float _jitterStepSizeMultiplier;
...@@ -63,12 +65,62 @@ uniform TFParameters1D _transferFunctionParams1; ...@@ -63,12 +65,62 @@ uniform TFParameters1D _transferFunctionParams1;
uniform TFParameters1D _transferFunctionParams2; uniform TFParameters1D _transferFunctionParams2;
uniform TFParameters1D _transferFunctionParams3; uniform TFParameters1D _transferFunctionParams3;
// Voxel Hierarchy Lookup volumes
uniform usampler2D _voxelHierarchy1;
uniform usampler2D _voxelHierarchy2;
uniform usampler2D _voxelHierarchy3;
uniform int _vhMaxMipMapLevel1;
uniform int _vhMaxMipMapLevel2;
uniform int _vhMaxMipMapLevel3;
uniform LightSource _lightSource; uniform LightSource _lightSource;
uniform vec3 _cameraPosition; uniform vec3 _cameraPosition;
uniform float _samplingStepSize; uniform float _samplingStepSize;
const float SAMPLING_BASE_INTERVAL_RCP = 200.0; const float SAMPLING_BASE_INTERVAL_RCP = 200.0;
vec2 clipVolume(usampler2D vhTexture, int vhMaxMipmapLevel, TextureParameters3D volumeParams, in vec3 entryPoint, in vec3 exitPoint) {
vec3 startPosTex = worldToTexture(volumeParams, entryPoint).xyz;
vec3 endPosTex = worldToTexture(volumeParams, exitPoint).xyz;
vec3 directionTex = endPosTex - startPosTex;
float tNear = clipFirstHitpoint(vhTexture, vhMaxMipmapLevel, startPosTex, directionTex, 0.0, 1.0);
float tFar = 1.0 - clipFirstHitpoint(vhTexture, vhMaxMipmapLevel, endPosTex, -directionTex, 0.0, 1.0);
return vec2(tNear, tFar);
}
#define RAYCASTING_STEP(worldPosition, CLIP, volume, volumeParams, tf, tfParams, result, firstHitT, tNear) \
{ \
if (tNear >= CLIP.x && tNear <= CLIP.y) { \
vec3 samplePosition = worldToTexture(volumeParams, worldPosition).xyz; \
if (all(greaterThanEqual(samplePosition, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition, vec3(1.0, 1.0, 1.0)))) { \
// lookup intensity and TF \
float intensity = texture(volume, samplePosition).r; \
vec4 color = lookupTF(tf, tfParams, intensity); \
\
// perform compositing \
if (color.a > 0.0) { \
// compute gradient (needed for shading and normals) \
vec3 gradient = computeGradient(volume, volumeParams, samplePosition); \
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb); \
\
// accomodate for variable sampling rates \
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP); \
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a); \
result.a = result.a + (1.0 -result.a) * color.a; \
\
// save first hit ray parameter for depth value calculation \
if (firstHitT < 0.0 && result.a > 0.01) { \
firstHitT = tNear; \
out_FHP = vec4(worldPosition, 1.0); \
out_FHN = vec4(gradient, 1.0); \
} \
} \
} \
} \
}
/** /**
* Performs the raycasting and returns the final fragment color. * Performs the raycasting and returns the final fragment color.
*/ */
...@@ -78,104 +130,47 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords) ...@@ -78,104 +130,47 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
// calculate ray parameters // calculate ray parameters
vec3 direction = exitPoint.rgb - entryPoint.rgb; vec3 direction = exitPoint.rgb - entryPoint.rgb;
float t = 0.0;
float tend = length(direction);
direction = normalize(direction);
jitterEntryPoint(entryPoint, direction, _samplingStepSize * _jitterStepSizeMultiplier); OFFSET = (0.25 / (1 << _vhMaxMipMapLevel1)); //< offset value used to avoid self-intersection or previous voxel intersection.
while (t < tend) { vec2 clip1 = clipVolume(_voxelHierarchy1, _vhMaxMipMapLevel1, _volumeParams1, entryPoint, exitPoint);
// compute sample position vec2 clip2 = clipVolume(_voxelHierarchy2, _vhMaxMipMapLevel2, _volumeParams2, entryPoint, exitPoint);
vec3 worldPosition = entryPoint.rgb + t * direction; vec2 clip3 = clipVolume(_voxelHierarchy3, _vhMaxMipMapLevel3, _volumeParams3, entryPoint, exitPoint);
float tNear = min(clip1.x, min(clip2.x, clip3.x));
float tFar = max(clip1.y, max(clip2.y, clip3.y));
jitterFloat(tNear, -_samplingStepSize * _jitterStepSizeMultiplier);
while (tNear < tFar) {
// compute sample position
vec3 worldPosition = entryPoint.rgb + tNear * direction;
// FIRST volume // FIRST volume
vec3 samplePosition1 = worldToTexture(_volumeParams1, worldPosition).xyz; RAYCASTING_STEP(worldPosition, clip1, _volume1, _volumeParams1, _transferFunction1, _transferFunctionParams1, result, firstHitT, tNear);
if (all(greaterThanEqual(samplePosition1, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition1, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume1, samplePosition1).r;
vec4 color = lookupTF(_transferFunction1, _transferFunctionParams1, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume1, _volumeParams1, samplePosition1);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
// SECOND volume // SECOND volume
vec3 samplePosition2 = worldToTexture(_volumeParams2, worldPosition).xyz; RAYCASTING_STEP(worldPosition, clip2, _volume2, _volumeParams2, _transferFunction2, _transferFunctionParams2, result, firstHitT, tNear);
if (all(greaterThanEqual(samplePosition2, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition2, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume2, samplePosition2).r;
vec4 color = lookupTF(_transferFunction2, _transferFunctionParams2, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume2, _volumeParams2, samplePosition2);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
// THIRD volume // THIRD volume
vec3 samplePosition3 = worldToTexture(_volumeParams3, worldPosition).xyz; RAYCASTING_STEP(worldPosition, clip3, _volume3, _volumeParams3, _transferFunction3, _transferFunctionParams3, result, firstHitT, tNear);
if (all(greaterThanEqual(samplePosition3, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition3, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume3, samplePosition3).r;
vec4 color = lookupTF(_transferFunction3, _transferFunctionParams3, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume3, _volumeParams3, samplePosition3);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
// save first hit ray parameter for depth value calculation
if (firstHitT < 0.0 && result.a > 0.0) {
firstHitT = t;
out_FHP = vec4(worldPosition, 1.0);
out_FHN = vec4(normalize(computeGradient(_volume1, _volumeParams1, worldPosition)), 1.0);
}
// early ray termination // early ray termination
if (result.a > 0.975) { if (result.a > 0.975) {
result.a = 1.0; result.a = 1.0;
t = tend; tNear = tFar;
} }
// advance to the next evaluation point along the ray // advance to the next evaluation point along the ray
t += _samplingStepSize; tNear += _samplingStepSize;
} }
// calculate depth value from ray parameter // calculate depth value from ray parameter
gl_FragDepth = 1.0; gl_FragDepth = 0.98765;
if (firstHitT >= 0.0) { if (firstHitT >= 0.0) {
float depthEntry = texture(_entryPointsDepth, texCoords).z; float depthEntry = texture(_entryPointsDepth, texCoords).z;
float depthExit = texture(_exitPointsDepth, texCoords).z; float depthExit = texture(_exitPointsDepth, texCoords).z;
gl_FragDepth = calculateDepthValue(firstHitT/tend, depthEntry, depthExit); gl_FragDepth = calculateDepthValue(firstHitT, depthEntry, depthExit);
} }
return result; return result;
} }
......
...@@ -62,51 +62,40 @@ namespace campvis { ...@@ -62,51 +62,40 @@ namespace campvis {
_tcp.p_image.setValue("ImageGroup"); _tcp.p_image.setValue("ImageGroup");
_renderTargetID.setValue("result"); _renderTargetID.setValue("result");
_ctReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_CT_CoregT1.am"));
_ctReader.p_targetImageID.setValue("ct.image");
_ctReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_t1Reader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_T1_bet04.GB306.am")); _t1Reader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_T1_bet04.GB306.am"));
_t1Reader.p_targetImageID.setValue("t1_tf.image"); _t1Reader.p_targetImageID.setValue("t1_tf.image");
_t1Reader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage1);
_t1Reader.s_validated.connect(this, &NeuroDemo::onReaderValidated); _t1Reader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_ctReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_CT_CoregT1.am"));
_ctReader.p_targetImageID.setValue("ct.image");
_ctReader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage2);
_ctReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_petReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_PET-CoregNMI_fl.am")); _petReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_PET-CoregNMI_fl.am"));
_petReader.p_targetImageID.setValue("pet.image"); _petReader.p_targetImageID.setValue("pet.image");
_petReader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage3);
_petReader.s_validated.connect(this, &NeuroDemo::onReaderValidated); _petReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
Geometry1DTransferFunction* ct_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
ct_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction1.replaceTF(ct_tf);
Geometry1DTransferFunction* t1_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f)); Geometry1DTransferFunction* t1_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.12f, .15f), cgt::col4(85, 0, 0, 128), cgt::col4(255, 0, 0, 128))); t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.12f, .15f), cgt::col4(85, 0, 0, 128), cgt::col4(255, 0, 0, 128)));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.19f, .28f), cgt::col4(89, 89, 89, 155), cgt::col4(89, 89, 89, 155))); t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.19f, .28f), cgt::col4(89, 89, 89, 155), cgt::col4(89, 89, 89, 155)));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.41f, .51f), cgt::col4(170, 170, 128, 64), cgt::col4(192, 192, 128, 64))); t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.41f, .51f), cgt::col4(170, 170, 128, 64), cgt::col4(192, 192, 128, 64)));
_mvr.p_transferFunction2.replaceTF(t1_tf); _mvr.p_transferFunction1.replaceTF(t1_tf);
Geometry1DTransferFunction* ct_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
ct_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction2.replaceTF(ct_tf);
Geometry1DTransferFunction* pet_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f)); Geometry1DTransferFunction* pet_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
pet_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255))); pet_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction3.replaceTF(pet_tf); _mvr.p_transferFunction3.replaceTF(pet_tf);
_mvr.p_sourceImagesId.setValue("ImageGroup");
_mvr.p_outputImageId.setValue("result"); _mvr.p_outputImageId.setValue("result");
_mvr.p_samplingRate.setValue(1.f); _mvr.p_samplingRate.setValue(1.f);
} }
void NeuroDemo::onReaderValidated(AbstractProcessor* p) { void NeuroDemo::onReaderValidated(AbstractProcessor* p) {
ScopedTypedData<ImageData> ctImage(getDataContainer(), _ctReader.p_targetImageID.getValue());
ScopedTypedData<ImageData> t1Image(getDataContainer(), _t1Reader.p_targetImageID.getValue());
ScopedTypedData<ImageData> petImage(getDataContainer(), _petReader.p_targetImageID.getValue());
ImageSeries* is = new ImageSeries();
if (ctImage)
is->addImage(ctImage.getDataHandle());
if (t1Image)
is->addImage(t1Image.getDataHandle());
if (petImage)
is->addImage(petImage.getDataHandle());
getDataContainer().addData("ImageGroup", is);
} }
} }
\ No newline at end of file
...@@ -48,7 +48,9 @@ namespace neuro { ...@@ -48,7 +48,9 @@ namespace neuro {
MultiVolumeRaycaster::MultiVolumeRaycaster(IVec2Property* viewportSizeProp) MultiVolumeRaycaster::MultiVolumeRaycaster(IVec2Property* viewportSizeProp)
: VisualizationProcessor(viewportSizeProp) : VisualizationProcessor(viewportSizeProp)
, p_sourceImagesId("SourceImagesId", "Input Image(s)", "", DataNameProperty::READ) , p_sourceImage1("SourceImage1", "First Input Image", "", DataNameProperty::READ)
, p_sourceImage2("SourceImage2", "Second Input Image", "", DataNameProperty::READ)
, p_sourceImage3("SourceImage3", "Third Input Image", "", DataNameProperty::READ)
, p_geometryImageId("GeometryImageId", "Rendered Geometry to Integrate (optional)", "", DataNameProperty::READ) , p_geometryImageId("GeometryImageId", "Rendered Geometry to Integrate (optional)", "", DataNameProperty::READ)
, p_camera("Camera", "Camera ID", "camera", DataNameProperty::READ) , p_camera("Camera", "Camera ID", "camera", DataNameProperty::READ)
, p_lightId("LightId", "Input Light Source", "lightsource", DataNameProperty::READ) , p_lightId("LightId", "Input Light Source", "lightsource", DataNameProperty::READ)
...@@ -59,18 +61,21 @@ namespace neuro { ...@@ -59,18 +61,21 @@ namespace neuro {
, p_jitterStepSizeMultiplier("jitterStepSizeMultiplier", "Jitter Step Size Multiplier", 1.f, 0.f, 1.f) , p_jitterStepSizeMultiplier("jitterStepSizeMultiplier", "Jitter Step Size Multiplier", 1.f, 0.f, 1.f)
, p_samplingRate("SamplingRate", "Sampling Rate", 2.f, 0.1f, 10.f, 0.1f) , p_samplingRate("SamplingRate", "Sampling Rate", 2.f, 0.1f, 10.f, 0.1f)
, _eepShader(nullptr) , _eepShader(nullptr)
, _rcShader(nullptr)
{ {
addDecorator(new ProcessorDecoratorGradient()); addDecorator(new ProcessorDecoratorGradient());
addProperty(p_sourceImagesId, INVALID_PROPERTIES | INVALID_RESULT); addProperty(p_sourceImage1, INVALID_PROPERTIES | INVALID_RESULT | INVALID_VOXEL_HIERARCHY1);
addProperty(p_sourceImage2, INVALID_PROPERTIES | INVALID_RESULT | INVALID_VOXEL_HIERARCHY2);
addProperty(p_sourceImage3, INVALID_PROPERTIES | INVALID_RESULT | INVALID_VOXEL_HIERARCHY3);
addProperty(p_geometryImageId); addProperty(p_geometryImageId);
addProperty(p_camera); addProperty(p_camera);
addProperty(p_lightId); addProperty(p_lightId);
addProperty(p_outputImageId); addProperty(p_outputImageId);
addProperty(p_transferFunction1); addProperty(p_transferFunction1, INVALID_RESULT | INVALID_VOXEL_HIERARCHY1);
addProperty(p_transferFunction2); addProperty(p_transferFunction2, INVALID_RESULT | INVALID_VOXEL_HIERARCHY2);
addProperty(p_transferFunction3); addProperty(p_transferFunction3, INVALID_RESULT | INVALID_VOXEL_HIERARCHY3);
addProperty(p_jitterStepSizeMultiplier); addProperty(p_jitterStepSizeMultiplier);
addProperty(p_samplingRate); addProperty(p_samplingRate);
...@@ -95,30 +100,59 @@ namespace neuro { ...@@ -95,30 +100,59 @@ namespace neuro {
_rcShader->setAttributeLocation(0, "in_Position"); _rcShader->setAttributeLocation(0, "in_Position");
_rcShader->setAttributeLocation(1, "in_TexCoord"); _rcShader->setAttributeLocation(1, "in_TexCoord");
} }
_vhm1 = new VoxelHierarchyMapper();
_vhm2 = new VoxelHierarchyMapper();
_vhm3 = new VoxelHierarchyMapper();
} }
void MultiVolumeRaycaster::deinit() { void MultiVolumeRaycaster::deinit() {
ShdrMgr.dispose(_eepShader); ShdrMgr.dispose(_eepShader);
_eepShader = 0; _eepShader = nullptr;
ShdrMgr.dispose(_rcShader);
_rcShader = nullptr;
delete _vhm1;
delete _vhm2;
delete _vhm3;
VisualizationProcessor::deinit(); VisualizationProcessor::deinit();
} }
void MultiVolumeRaycaster::updateResult(DataContainer& dataContainer) { void MultiVolumeRaycaster::updateResult(DataContainer& dataContainer) {
ImageRepresentationGL::ScopedRepresentation singleImage(dataContainer, p_sourceImagesId.getValue(), true); ImageRepresentationGL::ScopedRepresentation image1(dataContainer, p_sourceImage1.getValue());
ScopedTypedData<ImageSeries> imageSeries(dataContainer, p_sourceImagesId.getValue(), true); ImageRepresentationGL::ScopedRepresentation image2(dataContainer, p_sourceImage2.getValue());
ImageRepresentationGL::ScopedRepresentation image3(dataContainer, p_sourceImage3.getValue());
ScopedTypedData<CameraData> camera(dataContainer, p_camera.getValue()); ScopedTypedData<CameraData> camera(dataContainer, p_camera.getValue());
ScopedTypedData<RenderData> geometryImage(dataContainer, p_geometryImageId.getValue(), true); ScopedTypedData<RenderData> geometryImage(dataContainer, p_geometryImageId.getValue(), true);
ScopedTypedData<LightSourceData> light(dataContainer, p_lightId.getValue()); ScopedTypedData<LightSourceData> light(dataContainer, p_lightId.getValue());
std::vector<const ImageRepresentationGL*> images; std::vector<const ImageRepresentationGL*> images;
if (singleImage != nullptr) if (image1) {
images.push_back(singleImage); images.push_back(image1);
else if (imageSeries != nullptr) {
for (size_t i = 0; i < imageSeries->getNumImages(); ++i) { if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY1){
images.push_back(static_cast<const ImageData*>(imageSeries->getImage(i).getData())->getRepresentation<ImageRepresentationGL>()); _vhm1->createHierarchy(image1, p_transferFunction1.getTF());
cgtAssert(images.back() != nullptr, "We have a nullptr in our image list, this is WRONG! Did a conversion fail?"); validate(INVALID_VOXEL_HIERARCHY1);
} }
} }
if (image2) {
images.push_back(image2);
if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY2){
_vhm2->createHierarchy(image2, p_transferFunction2.getTF());
validate(INVALID_VOXEL_HIERARCHY2);
}
}
if (image3) {
images.push_back(image3);
if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY3){
_vhm3->createHierarchy(image3, p_transferFunction3.getTF());
validate(INVALID_VOXEL_HIERARCHY3);
}
}
if (images.size() >= 3 && camera != nullptr) { if (images.size() >= 3 && camera != nullptr) {
auto eepp = computeEntryExitPoints(images, camera, geometryImage); auto eepp = computeEntryExitPoints(images, camera, geometryImage);
...@@ -138,22 +172,24 @@ namespace neuro { ...@@ -138,22 +172,24 @@ namespace neuro {
} }
void MultiVolumeRaycaster::updateProperties(DataContainer& dataContainer) { void MultiVolumeRaycaster::updateProperties(DataContainer& dataContainer) {
ScopedTypedData<ImageData> img(dataContainer, p_sourceImagesId.getValue(), true); ImageRepresentationGL::ScopedRepresentation image1(dataContainer, p_sourceImage1.getValue());
ScopedTypedData<ImageSeries> is(dataContainer, p_sourceImagesId.getValue(), true); ImageRepresentationGL::ScopedRepresentation image2(dataContainer, p_sourceImage2.getValue());
ImageRepresentationGL::ScopedRepresentation image3(dataContainer, p_sourceImage3.getValue());
if (img != nullptr) { if (image1)
p_transferFunction1.setImageHandle(img.getDataHandle()); p_transferFunction1.setImageHandle(image1.getDataHandle());
} else
else if (is != nullptr && is->getNumImages() == 3) {
p_transferFunction1.setImageHandle(is->getImage(0));
p_transferFunction2.setImageHandle(is->getImage(1));
p_transferFunction3.setImageHandle(is->getImage(2));
}
else {
p_transferFunction1.setImageHandle(DataHandle(nullptr)); p_transferFunction1.setImageHandle(DataHandle(nullptr));
if (image2)
p_transferFunction2.setImageHandle(image2.getDataHandle());
else
p_transferFunction2.setImageHandle(DataHandle(nullptr)); p_transferFunction2.setImageHandle(DataHandle(nullptr));
if (image3)
p_transferFunction3.setImageHandle(image3.getDataHandle());
else
p_transferFunction3.setImageHandle(DataHandle(nullptr)); p_transferFunction3.setImageHandle(DataHandle(nullptr));
}
} }
void MultiVolumeRaycaster::updateShader() { void MultiVolumeRaycaster::updateShader() {
...@@ -255,17 +291,6 @@ namespace neuro { ...@@ -255,17 +291,6 @@ namespace neuro {
RenderData* MultiVolumeRaycaster::performRaycasting(DataContainer& dataContainer, const std::vector<const ImageRepresentationGL*>& images, const CameraData* camera, const RenderData* entrypoints, const RenderData* exitpoints, const LightSourceData* light) { RenderData* MultiVolumeRaycaster::performRaycasting(DataContainer& dataContainer, const std::vector<const ImageRepresentationGL*>& images, const CameraData* camera, const RenderData* entrypoints, const RenderData* exitpoints, const LightSourceData* light) {
cgtAssert(_rcShader != nullptr, "EEP Shader must not be 0."); cgtAssert(_rcShader != nullptr, "EEP Shader must not be 0.");
// little hack to support LOD texture lookup for the gradients:
// if texture does not yet have mipmaps, create them.
const cgt::Texture* tex = images.front()->getTexture();
if (tex->getFilter() != cgt::Texture::MIPMAP) {
const_cast<cgt::Texture*>(tex)->setFilter(cgt::Texture::MIPMAP);
glGenerateMipmap(GL_TEXTURE_3D);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
LGL_ERROR;
}
_rcShader->activate(); _rcShader->activate();
decorateRenderProlog(dataContainer, _rcShader); decorateRenderProlog(dataContainer, _rcShader);
...@@ -273,7 +298,9 @@ namespace neuro { ...@@ -273,7 +298,9 @@ namespace neuro {
_rcShader->setUniform("_jitterStepSizeMultiplier", p_jitterStepSizeMultiplier.getValue()); _rcShader->setUniform("_jitterStepSizeMultiplier", p_jitterStepSizeMultiplier.getValue());
// compute sampling step size relative to volume size // compute sampling step size relative to volume size
float samplingStepSize = .1f / p_samplingRate.getValue(); float samplingStepSize = .001f / p_samplingRate.getValue();
if (p_lqMode.getValue())
samplingStepSize /= 4.f;
_rcShader->setUniform("_samplingStepSize", samplingStepSize); _rcShader->setUniform("_samplingStepSize", samplingStepSize);