Starting from 2021-07-01, all LRZ GitLab users will be required to explicitly accept the GitLab Terms of Service. Please see the detailed information at https://doku.lrz.de/display/PUBLIC/GitLab and make sure that your projects conform to the requirements.

Commit baebd055 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

Introducing MultiVolumeMprRenderer processor, rendering a slice through up to...

Introducing MultiVolumeMprRenderer processor, rendering a slice through up to 3 co-registered images at the same time. Updated NeuroDemo pipeline and implemented picking of MPR plane.
parent a440d1c7
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2014, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
in vec3 ex_TexCoord;
out vec4 out_Color;
#include "tools/texture3d.frag"
#include "tools/transferfunction.frag"
// Input volume2
uniform sampler3D _volume1;
uniform sampler3D _volume2;
uniform sampler3D _volume3;
uniform TextureParameters3D _volumeParams1;
uniform TextureParameters3D _volumeParams2;
uniform TextureParameters3D _volumeParams3;
// Transfer function
uniform sampler1D _transferFunction1;
uniform sampler1D _transferFunction2;
uniform sampler1D _transferFunction3;
uniform TFParameters1D _transferFunctionParams1;
uniform TFParameters1D _transferFunctionParams2;
uniform TFParameters1D _transferFunctionParams3;
vec4 lookupTexture(vec3 worldPosition, sampler3D volume, TextureParameters3D volumeParams, sampler1D tf, TFParameters1D tfParams) {
vec3 texCoord = worldToTexture(volumeParams, worldPosition).xyz;
if (all(greaterThanEqual(texCoord, vec3(0.0))) && all(lessThan(texCoord, vec3(1.0)))) {
vec4 color = texture(volume, texCoord);
return lookupTF(tf, tfParams, color.r);
}
}
void main() {
vec4 color1 = lookupTexture(ex_TexCoord, _volume1, _volumeParams1, _transferFunction1, _transferFunctionParams1);
vec4 color2 = lookupTexture(ex_TexCoord, _volume2, _volumeParams2, _transferFunction2, _transferFunctionParams2);
vec4 color3 = lookupTexture(ex_TexCoord, _volume3, _volumeParams3, _transferFunction3, _transferFunctionParams3);
out_Color = color1 + color2 + color3;
if (out_Color.w > 1.0)
out_Color /= out_Color.w;
out_Color = vec4(mix(out_Color.rgb, vec3(0.0, 0.0, 0.0), out_Color.a), 1.0);
}
......@@ -166,7 +166,7 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
}
// calculate depth value from ray parameter
gl_FragDepth = 0.98765;
gl_FragDepth = 1.0;
if (firstHitT >= 0.0) {
float depthEntry = texture(_entryPointsDepth, texCoords).z;
float depthExit = texture(_exitPointsDepth, texCoords).z;
......
......@@ -25,8 +25,12 @@
#include "neurodemo.h"
#include "cgt/event/keyevent.h"
#include "cgt/event/mouseevent.h"
#include "core/datastructures/imagedata.h"
#include "core/datastructures/imagerepresentationlocal.h"
#include "core/datastructures/imageseries.h"
#include "core/datastructures/renderdata.h"
#include "core/classification/geometry1dtransferfunction.h"
#include "core/classification/tfgeometry1d.h"
......@@ -40,7 +44,9 @@ namespace campvis {
, _ctReader()
, _t1Reader()
, _petReader()
, _mvmpr(&_canvasSize)
, _mvr(&_canvasSize)
, _rtc(&_canvasSize)
{
_tcp.addLqModeProcessor(&_mvr);
addEventListenerToBack(&_tcp);
......@@ -50,7 +56,9 @@ namespace campvis {
addProcessor(&_ctReader);
addProcessor(&_t1Reader);
addProcessor(&_petReader);
addProcessor(&_mvmpr);
addProcessor(&_mvr);
addProcessor(&_rtc);
}
NeuroDemo::~NeuroDemo() {
......@@ -59,43 +67,107 @@ namespace campvis {
void NeuroDemo::init() {
AutoEvaluationPipeline::init();
_tcp.p_image.setValue("ImageGroup");
_renderTargetID.setValue("result");
_tcp.p_image.setValue("ct.image");
_renderTargetID.setValue("composed");
_t1Reader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_T1_bet04.GB306.am"));
_t1Reader.p_targetImageID.setValue("t1_tf.image");
_t1Reader.p_targetImageID.addSharedProperty(&_mvmpr.p_sourceImage1);
_t1Reader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage1);
_t1Reader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_ctReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_CT_CoregT1.am"));
_ctReader.p_targetImageID.setValue("ct.image");
_ctReader.p_targetImageID.addSharedProperty(&_mvmpr.p_sourceImage2);
_ctReader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage2);
_ctReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_petReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_Data/K_PET-CoregNMI_fl.am"));
_petReader.p_targetImageID.setValue("pet.image");
_petReader.p_targetImageID.addSharedProperty(&_mvmpr.p_sourceImage3);
_petReader.p_targetImageID.addSharedProperty(&_mvr.p_sourceImage3);
_petReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
Geometry1DTransferFunction* t1_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.12f, .15f), cgt::col4(85, 0, 0, 128), cgt::col4(255, 0, 0, 128)));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.19f, .28f), cgt::col4(89, 89, 89, 155), cgt::col4(89, 89, 89, 155)));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.41f, .51f), cgt::col4(170, 170, 128, 64), cgt::col4(192, 192, 128, 64)));
_mvr.p_transferFunction1.replaceTF(t1_tf);
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.25f, .65f), cgt::col4(170, 170, 128, 64), cgt::col4(192, 192, 128, 64)));
_mvmpr.p_transferFunction1.replaceTF(t1_tf);
_mvr.p_transferFunction1.replaceTF(t1_tf->clone());
Geometry1DTransferFunction* ct_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
ct_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction2.replaceTF(ct_tf);
ct_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.8f, 1.f), cgt::col4(0, 150, 225, 192), cgt::col4(0, 192, 255, 255)));
_mvmpr.p_transferFunction2.replaceTF(ct_tf);
_mvr.p_transferFunction2.replaceTF(ct_tf->clone());
Geometry1DTransferFunction* pet_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
pet_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction3.replaceTF(pet_tf);
_mvr.p_outputImageId.setValue("result");
auto g = TFGeometry1D::createQuad(cgt::vec2(.8f, 1.0f), cgt::col4(255, 255, 0, 48), cgt::col4(255, 32, 192, 72));
g->addKeyPoint(.9f, cgt::col4(255, 32, 0, 72));
pet_tf->addGeometry(g);
_mvmpr.p_transferFunction3.replaceTF(pet_tf);
_mvr.p_transferFunction3.replaceTF(pet_tf->clone());
_mvmpr.p_relativeToImageCenter.setValue(false);
_mvmpr.p_use2DProjection.setValue(false);
_mvmpr.p_planeSize.setValue(150.f);
_mvmpr.p_outputImageId.setValue("result.mpr");
_mvmpr.p_outputImageId.addSharedProperty(&_rtc.p_firstImageId);
_mvmpr.p_outputImageId.addSharedProperty(&_mvr.p_geometryImageId);
_mvr.p_outputImageId.setValue("result.rc");
_mvr.p_outputImageId.addSharedProperty(&_rtc.p_secondImageId);
_mvr.p_samplingRate.setValue(1.f);
_rtc.p_compositingMethod.selectByOption(RenderTargetCompositor::CompositingModeDepth);
_rtc.p_targetImageId.setValue("composed");
}
void NeuroDemo::onReaderValidated(AbstractProcessor* p) {
}
void NeuroDemo::onEvent(cgt::Event* e) {
if (typeid(*e) == typeid(cgt::MouseEvent)) {
cgt::MouseEvent* me = static_cast<cgt::MouseEvent*>(e);
if (me->action() == cgt::MouseEvent::PRESSED && me->modifiers() & cgt::Event::CTRL) {
// unproject mouse click through FirstHitPoint texture of raycaster
ScopedTypedData<RenderData> rd(getDataContainer(), _mvr.p_outputImageId.getValue());
if (rd != nullptr && rd->getNumColorTextures() == 3) {
const ImageRepresentationLocal* FHP = rd->getColorTexture(1)->getRepresentation<ImageRepresentationLocal>();
cgt::svec3 lookupPosition(me->x(), me->viewport().y - me->y(), 0);
if (cgt::hand(cgt::lessThan(lookupPosition, FHP->getSize()))) {
LINFO("Lookup Position: " << lookupPosition);
cgt::vec3 worldPosition(0.f);
for (size_t i = 0; i < 3; ++i)
worldPosition[i] = FHP->getElementNormalized(lookupPosition, i);
LINFO("World Position: " << worldPosition);
// add to base points
if (! (me->modifiers() & cgt::Event::SHIFT))
_mprBasePoints.clear();
_mprBasePoints.push_back(worldPosition);
if (_mprBasePoints.size() > 2) {
const cgt::vec3& a = _mprBasePoints[_mprBasePoints.size() - 3];
const cgt::vec3& b = _mprBasePoints[_mprBasePoints.size() - 2];
const cgt::vec3& c = _mprBasePoints[_mprBasePoints.size() - 1];
cgt::vec3 n = cgt::normalize(cgt::cross(b-a, c-a));
float d = cgt::dot(a, n);
_mvmpr.p_planeNormal.setValue(n);
_mvmpr.p_planeDistance.setValue(-d);
}
}
}
me->accept();
}
}
//if (e->isAccepted())
AutoEvaluationPipeline::onEvent(e);
}
}
\ No newline at end of file
......@@ -33,7 +33,9 @@
#include "modules/base/processors/lightsourceprovider.h"
#include "modules/base/processors/trackballcameraprovider.h"
#include "modules/io/processors/genericimagereader.h"
#include "modules/neuro/processors/multivolumemprrenderer.h"
#include "modules/neuro/processors/multivolumeraycaster.h"
#include "modules/vis/processors/rendertargetcompositor.h"
namespace campvis {
class CAMPVIS_MODULES_API NeuroDemo : public AutoEvaluationPipeline {
......@@ -60,12 +62,20 @@ namespace campvis {
void onReaderValidated(AbstractProcessor* p);
void onEvent(cgt::Event* e);
LightSourceProvider _lsp;
TrackballCameraProvider _tcp;
GenericImageReader _ctReader;
GenericImageReader _t1Reader;
GenericImageReader _petReader;
neuro::MultiVolumeMprRenderer _mvmpr;
neuro::MultiVolumeRaycaster _mvr;
RenderTargetCompositor _rtc;
private:
std::vector<cgt::vec3> _mprBasePoints;
};
// Instantiate template to register the pipelines.
......
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2014, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
#include "multivolumemprrenderer.h"
#include "cgt/glmath.h"
#include "cgt/logmanager.h"
#include "cgt/shadermanager.h"
#include "cgt/textureunit.h"
#include "core/classification/simpletransferfunction.h"
#include "core/datastructures/cameradata.h"
#include "core/datastructures/geometrydatafactory.h"
#include "core/datastructures/imagedata.h"
#include "core/datastructures/imagerepresentationgl.h"
#include "core/datastructures/imageseries.h"
#include "core/datastructures/lightsourcedata.h"
#include "core/datastructures/renderdata.h"
#include "core/datastructures/meshgeometry.h"
#include "core/pipeline/processordecoratorgradient.h"
#include "core/tools/quadrenderer.h"
namespace campvis {
namespace neuro {
const std::string MultiVolumeMprRenderer::loggerCat_ = "CAMPVis.modules.vis.MultiVolumeMprRenderer";
MultiVolumeMprRenderer::MultiVolumeMprRenderer(IVec2Property* viewportSizeProp)
: VisualizationProcessor(viewportSizeProp)
, p_sourceImage1("SourceImage1", "First Input Image", "", DataNameProperty::READ)
, p_sourceImage2("SourceImage2", "Second Input Image", "", DataNameProperty::READ)
, p_sourceImage3("SourceImage3", "Third Input Image", "", DataNameProperty::READ)
, p_camera("Camera", "Camera ID", "camera", DataNameProperty::READ)
, p_outputImageId("OutputImageId", "Output Image ID", "MultiVolumeMprRenderer.output", DataNameProperty::WRITE)
, p_transferFunction1("TransferFunction1", "Transfer Function for First image", new SimpleTransferFunction(256))
, p_transferFunction2("TransferFunction2", "Transfer Function for Second image", new SimpleTransferFunction(256))
, p_transferFunction3("TransferFunction3", "Transfer Function for Third image", new SimpleTransferFunction(256))
, p_planeNormal("PlaneNormal", "Clipping Plane Normal", cgt::vec3(0.f, 0.f, 1.f), cgt::vec3(-1.f), cgt::vec3(1.f), cgt::vec3(.1f), cgt::ivec3(2))
, p_planeDistance("PlaneDistance", "Clipping Plane Distance", 0.f, -1000.f, 1000.f, 1.f, 1)
, p_planeSize("PlaneSize", "Clipping Plane Size", 100.f, 0.f, 1000.f, 1.f, 1)
, p_use2DProjection("Use3dRendering", "Use 3D Rendering instead of 2D", true)
, p_relativeToImageCenter("RelativeToImageCenter", "Construct Plane Relative to Image Center", true)
, _shader(nullptr)
{
addProperty(p_sourceImage1, INVALID_PROPERTIES | INVALID_RESULT);
addProperty(p_sourceImage2, INVALID_PROPERTIES | INVALID_RESULT);
addProperty(p_sourceImage3, INVALID_PROPERTIES | INVALID_RESULT);
addProperty(p_camera);
addProperty(p_outputImageId);
addProperty(p_transferFunction1);
addProperty(p_transferFunction2);
addProperty(p_transferFunction3);
addProperty(p_planeNormal);
addProperty(p_planeDistance);
addProperty(p_planeSize);
addProperty(p_use2DProjection, INVALID_RESULT | INVALID_PROPERTIES);
addProperty(p_relativeToImageCenter);
}
MultiVolumeMprRenderer::~MultiVolumeMprRenderer() {
}
void MultiVolumeMprRenderer::init() {
VisualizationProcessor::init();
_shader = ShdrMgr.loadWithCustomGlslVersion("core/glsl/passthrough.vert", "", "modules/neuro/glsl/multivolumemprrenderer.frag", generateHeader(), "400");
if (_shader != nullptr) {
_shader->setAttributeLocation(0, "in_Position");
_shader->setAttributeLocation(1, "in_TexCoord");
}
}
void MultiVolumeMprRenderer::deinit() {
ShdrMgr.dispose(_shader);
_shader = nullptr;
VisualizationProcessor::deinit();
}
void MultiVolumeMprRenderer::updateResult(DataContainer& dataContainer) {
ImageRepresentationGL::ScopedRepresentation image1(dataContainer, p_sourceImage1.getValue());
ImageRepresentationGL::ScopedRepresentation image2(dataContainer, p_sourceImage2.getValue());
ImageRepresentationGL::ScopedRepresentation image3(dataContainer, p_sourceImage3.getValue());
ScopedTypedData<CameraData> camera(dataContainer, p_camera.getValue());
if (image1 && image2 && image3 && camera) {
if (p_use2DProjection.getValue() || camera != nullptr) {
// Construct the clipping plane in world coordinates
cgt::vec3 n = cgt::normalize(p_planeNormal.getValue());
cgt::vec3 temp(1.0, 0.0, 0.0);
if (abs(cgt::dot(temp, n)) > 0.9)
temp = cgt::vec3(0.0, 1.0, 0.0);
cgt::vec3 inPlaneA = cgt::normalize(cgt::cross(n, temp)) * 0.5f * p_planeSize.getValue();
cgt::vec3 inPlaneB = cgt::normalize(cgt::cross(n, inPlaneA)) * 0.5f * p_planeSize.getValue();
cgt::vec3 base = (n * -p_planeDistance.getValue());
// move to image center if wanted
if (p_relativeToImageCenter.getValue()) {
cgt::Bounds b;
if (image1)
b.addVolume(image1->getParent()->getWorldBounds());
if (image2)
b.addVolume(image2->getParent()->getWorldBounds());
if (image3)
b.addVolume(image3->getParent()->getWorldBounds());
base += b.center();
}
// construct the four texCoords
std::vector<cgt::vec3> texCoords;
texCoords.push_back(base + inPlaneA + inPlaneB);
texCoords.push_back(base - inPlaneA + inPlaneB);
texCoords.push_back(base - inPlaneA - inPlaneB);
texCoords.push_back(base + inPlaneA - inPlaneB);
FaceGeometry slice(texCoords, texCoords);
// perform the rendering
glEnable(GL_DEPTH_TEST);
_shader->activate();
cgt::Shader::IgnoreUniformLocationErrorGuard guard(_shader);
if (p_use2DProjection.getValue()) {
// generate a camera position that simulates 2D rendering
// this way it is easier to achieve the correct aspect ratio in all cases
cgt::vec3 camPosition = base - p_planeSize.getValue() * n;
float ratio = static_cast<float>(getEffectiveViewportSize().x) / getEffectiveViewportSize().y;
// experimentally discovered:
// if the camera distance is half as big as the plane size, a field of view of
// 54 allows to see everything
float fovy = 54.f;
cgt::Camera c(camPosition, base, inPlaneA, fovy, ratio, 0.1f, 10000.f);
_shader->setUniform("_projectionMatrix", c.getProjectionMatrix());
_shader->setUniform("_viewMatrix", c.getViewMatrix());
}
else {
_shader->setUniform("_projectionMatrix", camera->getCamera().getProjectionMatrix());
_shader->setUniform("_viewMatrix", camera->getCamera().getViewMatrix());
}
cgt::TextureUnit volumeUnit1, volumeUnit2, volumeUnit3, tf1Unit, tf2Unit, tf3Unit;
image1->bind(_shader, volumeUnit1, "_volume1", "_volumeParams1");
image2->bind(_shader, volumeUnit2, "_volume2", "_volumeParams2");
image3->bind(_shader, volumeUnit3, "_volume3", "_volumeParams3");
p_transferFunction1.getTF()->bind(_shader, tf1Unit, "_transferFunction1", "_transferFunctionParams1");
p_transferFunction2.getTF()->bind(_shader, tf2Unit, "_transferFunction2", "_transferFunctionParams2");
p_transferFunction3.getTF()->bind(_shader, tf3Unit, "_transferFunction3", "_transferFunctionParams3");
FramebufferActivationGuard fag(this);
createAndAttachColorTexture();
createAndAttachDepthTexture();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
slice.render(GL_TRIANGLE_FAN);
_shader->deactivate();
cgt::TextureUnit::setZeroUnit();
glDisable(GL_DEPTH_TEST);
dataContainer.addData(p_outputImageId.getValue(), new RenderData(_fbo));
}
}
else {
LDEBUG("No suitable input data found!");
}
}
std::string MultiVolumeMprRenderer::generateHeader() const {
return "";
}
void MultiVolumeMprRenderer::updateProperties(DataContainer& dataContainer) {
ImageRepresentationGL::ScopedRepresentation image1(dataContainer, p_sourceImage1.getValue());
ImageRepresentationGL::ScopedRepresentation image2(dataContainer, p_sourceImage2.getValue());
ImageRepresentationGL::ScopedRepresentation image3(dataContainer, p_sourceImage3.getValue());
if (image1)
p_transferFunction1.setImageHandle(image1.getDataHandle());
else
p_transferFunction1.setImageHandle(DataHandle(nullptr));
if (image2)
p_transferFunction2.setImageHandle(image2.getDataHandle());
else
p_transferFunction2.setImageHandle(DataHandle(nullptr));
if (image3)
p_transferFunction3.setImageHandle(image3.getDataHandle());
else
p_transferFunction3.setImageHandle(DataHandle(nullptr));
}
void MultiVolumeMprRenderer::updateShader() {
_shader->setHeaders(generateHeader());
_shader->rebuild();
}
}
}
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2014, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
#ifndef MULTIVOLUMEMPRRENDERER_H__
#define MULTIVOLUMEMPRRENDERER_H__
#include <string>
#include "core/classification/abstracttransferfunction.h"
#include "core/pipeline/visualizationprocessor.h"
#include "core/pipeline/abstractprocessordecorator.h"
#include "core/properties/allproperties.h"
#include "modules/modulesapi.h"
#include "modules/vis/tools/voxelhierarchymapper.h"
namespace cgt {
class Shader;
}
namespace campvis {
class CameraData;
class ImageRepresentationGL;
class ImageSeries;
class LightSourceData;
class RenderData;
namespace neuro {
/**
* Performs an MPR rendering of multiple images at the same time.
*/
class CAMPVIS_MODULES_API MultiVolumeMprRenderer : public VisualizationProcessor {
public:
/**
* Constructs a new MultiVolumeMprRenderer Processor
**/
MultiVolumeMprRenderer(IVec2Property* viewportSizeProp);
/**
* Destructor
**/
virtual ~MultiVolumeMprRenderer();
/// \see AbstractProcessor::init
virtual void init();
/// \see AbstractProcessor::deinit
virtual void deinit();
/// \see AbstractProcessor::getName()
virtual const std::string getName() const { return "MultiVolumeMprRenderer"; };
/// \see AbstractProcessor::getDescription()
virtual const std::string getDescription() const { return "Performs an MPR rendering of multiple images at the same time."; };
/// \see AbstractProcessor::getAuthor()
virtual const std::string getAuthor() const { return "Christian Schulte zu Berge <christian.szb@in.tum.de>"; };
/// \see AbstractProcessor::getProcessorState()
virtual ProcessorState getProcessorState() const { return AbstractProcessor::TESTING; };
DataNameProperty p_sourceImage1; ///< ID for first input images
DataNameProperty p_sourceImage2; ///< ID for second input images
DataNameProperty p_sourceImage3; ///< ID for third input images
DataNameProperty p_camera; ///< input camra
DataNameProperty p_outputImageId; ///< ID for output image
TransferFunctionProperty p_transferFunction1; ///< Transfer function for first image
TransferFunctionProperty p_transferFunction2; ///< Transfer function for second image
TransferFunctionProperty p_transferFunction3; ///< Transfer function for third image
Vec3Property p_planeNormal; ///< Clipping plane normal
FloatProperty p_planeDistance; ///< Clipping plane distance
FloatProperty p_planeSize; ///< Size of clipping plane
BoolProperty p_use2DProjection; ///< Use 3D Rendering instead of 2D
BoolProperty p_relativeToImageCenter; ///< Flag whether to construct image plane relative to image center