2.12.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit 7f1a3689 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

Started work on neuro project

parent 45a42e7b
......@@ -88,13 +88,11 @@ namespace campvis {
}
cgt::Bounds ImageData::getWorldBounds() const {
return cgt::Bounds(_mappingInformation.getOffset(), _mappingInformation.getOffset() + (cgt::vec3(_size) * _mappingInformation.getVoxelSize()));
return getWorldBounds(cgt::svec3(0, 0, 0), cgt::svec3(_size));
}
cgt::Bounds ImageData::getWorldBounds(const cgt::svec3& llf, const cgt::svec3& urb) const {
return cgt::Bounds(
_mappingInformation.getOffset() + (cgt::vec3(llf) * _mappingInformation.getVoxelSize()),
_mappingInformation.getOffset() + (cgt::vec3(urb) * _mappingInformation.getVoxelSize()));
return cgt::Bounds(_mappingInformation.getVoxelToWorldMatrix() * cgt::vec3(llf), _mappingInformation.getVoxelToWorldMatrix() * cgt::vec3(urb));
}
size_t ImageData::getNumElements() const {
......
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
......
......@@ -82,5 +82,13 @@ namespace campvis {
return "Image Series";
}
cgt::Bounds ImageSeries::getWorldBounds() const {
cgt::Bounds b;
for (size_t i = 0; i < _images.size(); ++i) {
b.addVolume(static_cast<const ImageData*>(_images[i].getData())->getWorldBounds());
}
return b;
}
}
\ No newline at end of file
......@@ -35,7 +35,7 @@ namespace campvis {
/**
* Class encapsulating a series of images.
*/
class CAMPVIS_CORE_API ImageSeries : public AbstractData {
class CAMPVIS_CORE_API ImageSeries : public AbstractData, public IHasWorldBounds {
public:
/**
* Constructor
......@@ -69,6 +69,12 @@ namespace campvis {
/// \see AbstractData::getTypeAsString()
virtual std::string getTypeAsString() const;
/**
* Returns the data extent in world coordinates.
* \return The data extent in world coordinates.
*/
virtual cgt::Bounds getWorldBounds() const;
/**
* Appends the image \a image to the series.
* \param image Image to be added.
......
......@@ -166,7 +166,7 @@ namespace campvis {
void TrackballCameraProvider::updateProperties(DataContainer& data) {
// convert data
ScopedTypedData<ImageData> img(data, p_image.getValue());
ScopedTypedData<IHasWorldBounds> img(data, p_image.getValue());
if (img != 0) {
reinitializeCamera(img->getWorldBounds());
}
......
......@@ -709,16 +709,16 @@ namespace campvis {
0.0f, 0.0f, 0.0f, 1.0f);
float qfac = header.pixdim[0];
if(fabs(qfac) < 0.1f)
if (fabs(qfac) < 0.1f)
qfac = 1.0f;
cgt::mat4 sc = cgt::mat4::createScale(cgt::vec3(1.0f, 1.0f, qfac));
cgt::mat4 os = cgt::mat4::createTranslation(cgt::vec3(header.qoffset_x, header.qoffset_y, header.qoffset_z));
pToW = os * rot2 * sc;
//cgt::mat4 os = cgt::mat4::createTranslation(cgt::vec3(header.qoffset_x, header.qoffset_y, header.qoffset_z));
pToW = rot2 * sc;//os * rot2 * sc;
}
// Nifti transformations give us the center of the first voxel, we translate to correct:
pToW = pToW * cgt::mat4::createTranslation(-spacing * 0.5f);
// pToW = pToW * cgt::mat4::createTranslation(-spacing * 0.5f);
// TODO: implement arbitrary transformations into ImageMappingInformation
cgt::svec3 imageSize(dimensions);
......
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2014, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
in vec3 ex_TexCoord; ///< incoming texture coordinate
out vec4 out_Color; ///< outgoing fragment color
#include "tools/texture2d.frag"
uniform vec2 _viewportSizeRCP;
uniform bool _integrateGeometry; ///< flag whether to integrate geometry into the EEP
uniform bool _isEntrypoint; ///< true if current run is for entrypoints, false if current run is for exitpoints
uniform sampler2D _entryDepthTexture; ///< depth texture of the entrypoints (only used in exitpoints run)
uniform TextureParameters2D _entryDepthTexParams;
uniform sampler2D _geometryDepthTexture; ///< depth texture of rendered geometry
uniform TextureParameters2D _geometryDepthTexParams;
uniform float _near;
uniform float _far;
uniform mat4 _inverseViewMatrix; ///< inverse camera view matrix
uniform mat4 _inverseProjectionMatrix; ///< inverse camera projection matrix
uniform mat4 _volumeWorldToTexture; ///< world-to-texture matrix of volume
void main() {
vec2 fragCoordNormalized = gl_FragCoord.xy * _viewportSizeRCP;
float fragDepth = gl_FragCoord.z;
if (_integrateGeometry) {
float geometryDepth = texture(_geometryDepthTexture, fragCoordNormalized).r;
if (_isEntrypoint) {
// integrating geometry into Entrypoints
float entryDepth = gl_FragCoord.z;
if (geometryDepth <= entryDepth) {
// geometry before Entrypoint
out_Color = vec4(0.0);
fragDepth = geometryDepth;
}
else {
// geometry behind Entrypoint
out_Color = vec4(ex_TexCoord, 1.0);
fragDepth = entryDepth;
}
}
else {
// integrating geometry into Exitpoints
float entryDepth = texture(_entryDepthTexture, fragCoordNormalized).r;
float exitDepth = gl_FragCoord.z;
if (geometryDepth <= entryDepth) {
// geometry before Entrypoint
out_Color = vec4(0.0);
fragDepth = geometryDepth;
}
else if (geometryDepth <= exitDepth) {
// geometry between entrypoint and exitpoint
// transform viewport coordinates to [-1, 1] NDC
vec4 result = vec4(fragCoordNormalized, geometryDepth, 1.0);
result = 2.0 * result - 1.0;
// reverse perspective division by w (which is equal to the camera-space z)
float origZG = (2.0 * _far * _near) / ((_far + _near) - result.z * (_far - _near));
result *= origZG;
// unproject and reverse camera-transform
result = vec4((_inverseViewMatrix * (_inverseProjectionMatrix * result)).xyz, 1.0);
out_Color = vec4(result.xyz, 1.0);
fragDepth = geometryDepth;
}
else {
// geometry behind exitpoint
out_Color = vec4(ex_TexCoord, 1.0);
fragDepth = exitDepth;
}
}
}
else {
out_Color = vec4(ex_TexCoord, 1.0);
}
gl_FragDepth = fragDepth;
}
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2014, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
layout(location = 0) out vec4 out_Color; ///< outgoing fragment color
layout(location = 1) out vec4 out_FHP; ///< outgoing fragment first hitpoint
layout(location = 2) out vec4 out_FHN; ///< outgoing fragment first hit normal
#include "tools/gradient.frag"
#include "tools/raycasting.frag"
#include "tools/shading.frag"
#include "tools/texture2d.frag"
#include "tools/texture3d.frag"
#include "tools/transferfunction.frag"
uniform vec2 _viewportSizeRCP;
uniform float _jitterStepSizeMultiplier;
// ray entry points
uniform sampler2D _entryPoints;
uniform sampler2D _entryPointsDepth;
uniform TextureParameters2D _entryParams;
// ray exit points
uniform sampler2D _exitPoints;
uniform sampler2D _exitPointsDepth;
uniform TextureParameters2D _exitParams;
// Input volume2
uniform sampler3D _volume1;
uniform sampler3D _volume2;
uniform sampler3D _volume3;
uniform TextureParameters3D _volumeParams1;
uniform TextureParameters3D _volumeParams2;
uniform TextureParameters3D _volumeParams3;
// Transfer function
uniform sampler1D _transferFunction1;
uniform sampler1D _transferFunction2;
uniform sampler1D _transferFunction3;
uniform TFParameters1D _transferFunctionParams1;
uniform TFParameters1D _transferFunctionParams2;
uniform TFParameters1D _transferFunctionParams3;
uniform LightSource _lightSource;
uniform vec3 _cameraPosition;
uniform float _samplingStepSize;
const float SAMPLING_BASE_INTERVAL_RCP = 200.0;
/**
* Performs the raycasting and returns the final fragment color.
*/
vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords) {
vec4 result = vec4(0.0);
float firstHitT = -1.0;
// calculate ray parameters
vec3 direction = exitPoint.rgb - entryPoint.rgb;
float t = 0.0;
float tend = length(direction);
direction = normalize(direction);
jitterEntryPoint(entryPoint, direction, _samplingStepSize * _jitterStepSizeMultiplier);
while (t < tend) {
// compute sample position
vec3 worldPosition = entryPoint.rgb + t * direction;
// FIRST volume
vec3 samplePosition1 = worldToTexture(_volumeParams1, worldPosition).xyz;
if (all(greaterThanEqual(samplePosition1, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition1, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume1, samplePosition1).r;
vec4 color = lookupTF(_transferFunction1, _transferFunctionParams1, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume1, _volumeParams1, samplePosition1);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
// SECOND volume
vec3 samplePosition2 = worldToTexture(_volumeParams2, worldPosition).xyz;
if (all(greaterThanEqual(samplePosition2, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition2, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume2, samplePosition2).r;
vec4 color = lookupTF(_transferFunction2, _transferFunctionParams2, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume2, _volumeParams2, samplePosition2);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
// THIRD volume
vec3 samplePosition3 = worldToTexture(_volumeParams3, worldPosition).xyz;
if (all(greaterThanEqual(samplePosition3, vec3(0.0, 0.0, 0.0))) && all(lessThanEqual(samplePosition3, vec3(1.0, 1.0, 1.0)))) {
// lookup intensity and TF
float intensity = texture(_volume3, samplePosition3).r;
vec4 color = lookupTF(_transferFunction3, _transferFunctionParams3, intensity);
// perform compositing
if (color.a > 0.0) {
// compute gradient (needed for shading and normals)
vec3 gradient = computeGradient(_volume3, _volumeParams3, samplePosition3);
color.rgb = calculatePhongShading(worldPosition, _lightSource, _cameraPosition, gradient, color.rgb, color.rgb, vec3(1.0, 1.0, 1.0));
// accomodate for variable sampling rates
color.a = 1.0 - pow(1.0 - color.a, _samplingStepSize * SAMPLING_BASE_INTERVAL_RCP);
result.rgb = result.rgb + color.rgb * color.a * (1.0 - result.a);
result.a = result.a + (1.0 -result.a) * color.a;
}
}
// save first hit ray parameter for depth value calculation
if (firstHitT < 0.0 && result.a > 0.0) {
firstHitT = t;
out_FHP = vec4(worldPosition, 1.0);
out_FHN = vec4(normalize(computeGradient(_volume1, _volumeParams1, worldPosition)), 1.0);
}
// early ray termination
if (result.a > 0.975) {
result.a = 1.0;
t = tend;
}
// advance to the next evaluation point along the ray
t += _samplingStepSize;
}
// calculate depth value from ray parameter
gl_FragDepth = 1.0;
if (firstHitT >= 0.0) {
float depthEntry = texture(_entryPointsDepth, texCoords).z;
float depthExit = texture(_exitPointsDepth, texCoords).z;
gl_FragDepth = calculateDepthValue(firstHitT/tend, depthEntry, depthExit);
}
return result;
}
/***
* The main method.
***/
void main() {
vec2 p = gl_FragCoord.xy * _viewportSizeRCP;
vec3 frontPos = texture(_entryPoints, p).rgb;
vec3 backPos = texture(_exitPoints, p).rgb;
//determine whether the ray has to be casted
if (frontPos == backPos) {
//background need no raycasting
discard;
} else {
//fragCoords are lying inside the boundingbox
out_Color = performRaycasting(frontPos, backPos, p);
}
}
# CMake file for neuro module
IF(${ModuleEnabled})
# Source files:
FILE(GLOB ThisModSources RELATIVE ${ModulesDir}
modules/neuro/pipelines/*.cpp
modules/neuro/processors/*.cpp
modules/neuro/tools/*.cpp
)
# Header files (including GLSL files so that they'll appear in VS projects)
FILE(GLOB ThisModHeaders RELATIVE ${ModulesDir}
modules/neuro/glsl/*.frag
modules/neuro/glsl/*.geom
modules/neuro/glsl/*.vert
modules/neuro/pipelines/*.h
modules/neuro/processors/*.h
modules/neuro/tools/*.h
)
SET(ThisModShaderDirectories "modules/neuro/glsl")
SET(ThisModShaderDirectories "modules/neuro/sampledata")
SET(ThisModDependencies base io preprocessing vis)
ENDIF(${ModuleEnabled})
SET(ThisModStatus TESTING)
SET(ThisModExternalDependencies FALSE)
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2014, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
#include "neurodemo.h"
#include "cgt/event/keyevent.h"
#include "core/datastructures/imagedata.h"
#include "core/datastructures/imageseries.h"
#include "core/classification/geometry1dtransferfunction.h"
#include "core/classification/tfgeometry1d.h"
namespace campvis {
NeuroDemo::NeuroDemo(DataContainer* dc)
: AutoEvaluationPipeline(dc)
, _lsp()
, _tcp(&_canvasSize)
, _ctReader()
, _t1Reader()
, _petReader()
, _mvr(&_canvasSize)
{
_tcp.addLqModeProcessor(&_mvr);
addEventListenerToBack(&_tcp);
addProcessor(&_lsp);
addProcessor(&_tcp);
addProcessor(&_ctReader);
addProcessor(&_t1Reader);
addProcessor(&_petReader);
addProcessor(&_mvr);
}
NeuroDemo::~NeuroDemo() {
}
void NeuroDemo::init() {
AutoEvaluationPipeline::init();
_tcp.p_image.setValue("ImageGroup");
_renderTargetID.setValue("result");
_ctReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_CT.nii"));
_ctReader.p_targetImageID.setValue("ct.image");
_ctReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_t1Reader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_T1_bet.nii"));
_t1Reader.p_targetImageID.setValue("t1_tf.image");
_t1Reader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
_petReader.p_url.setValue(ShdrMgr.completePath("D:/Medical Data/K_export/K_PET.nii"));
_petReader.p_targetImageID.setValue("pet.image");
_petReader.s_validated.connect(this, &NeuroDemo::onReaderValidated);
Geometry1DTransferFunction* ct_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
ct_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction1.replaceTF(ct_tf);
Geometry1DTransferFunction* t1_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.12f, .15f), cgt::col4(85, 0, 0, 128), cgt::col4(255, 0, 0, 128)));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.19f, .28f), cgt::col4(89, 89, 89, 155), cgt::col4(89, 89, 89, 155)));
t1_tf->addGeometry(TFGeometry1D::createQuad(cgt::vec2(.41f, .51f), cgt::col4(170, 170, 128, 64), cgt::col4(192, 192, 128, 64)));
_mvr.p_transferFunction2.replaceTF(t1_tf);
Geometry1DTransferFunction* pet_tf = new Geometry1DTransferFunction(128, cgt::vec2(0.f, .05f));
pet_tf->addGeometry(TFGeometry1D::crateRamp(cgt::vec2(.5f, .9f), cgt::col4(255, 255, 255, 255)));
_mvr.p_transferFunction3.replaceTF(pet_tf);
_mvr.p_sourceImagesId.setValue("ImageGroup");
_mvr.p_outputImageId.setValue("result");
_mvr.p_samplingRate.setValue(1.f);
}
void NeuroDemo::onReaderValidated(AbstractProcessor* p) {
ScopedTypedData<ImageData> ctImage(getDataContainer(), _ctReader.p_targetImageID.getValue());
ScopedTypedData<ImageData> t1Image(getDataContainer(), _t1Reader.p_targetImageID.getValue());
ScopedTypedData<ImageData> petImage(getDataContainer(), _petReader.p_targetImageID.getValue());
ImageSeries* is = new ImageSeries();
if (ctImage)
is->addImage(ctImage.getDataHandle());
if (t1Image)
is->addImage(t1Image.getDataHandle());
if (petImage)
is->addImage(petImage.getDataHandle());
getDataContainer().addData("ImageGroup", is);
}
}
\ No newline at end of file
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2014, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitaet Muenchen
// Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
#ifndef NEURODEMO_H__
#define NEURODEMO_H__
#include "core/pipeline/autoevaluationpipeline.h"
#include "modules/modulesapi.h"
#include "modules/pipelinefactory.h"
#include "modules/base/processors/lightsourceprovider.h"
#include "modules/base/processors/trackballcameraprovider.h"
#include "modules/io/processors/genericimagereader.h"
#include "modules/neuro/processors/multivolumeraycaster.h"
namespace campvis {
class CAMPVIS_MODULES_API NeuroDemo : public AutoEvaluationPipeline {
public:
/**
* Creates a AutoEvaluationPipeline.
*/
NeuroDemo(DataContainer* dc);
/**
* Virtual Destructor
**/
virtual ~NeuroDemo();
/// \see AutoEvaluationPipeline::init()
virtual void init();