tensordemo.cpp 4.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
// ================================================================================================
// 
// This file is part of the CAMPVis Software Framework.
// 
// If not explicitly stated otherwise: Copyright (C) 2012-2013, all rights reserved,
//      Christian Schulte zu Berge <christian.szb@in.tum.de>
//      Chair for Computer Aided Medical Procedures
//      Technische Universität München
//      Boltzmannstr. 3, 85748 Garching b. München, Germany
// 
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
// 
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 
// except in compliance with the License. You may obtain a copy of the License at
// 
// http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software distributed under the 
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 
// either express or implied. See the License for the specific language governing permissions 
// and limitations under the License.
// 
// ================================================================================================

#include "tensordemo.h"

#include "tgt/event/keyevent.h"

#include "core/classification/geometry1dtransferfunction.h"
#include "core/classification/tfgeometry1d.h"

namespace campvis {

    TensorDemo::TensorDemo(DataContainer* dc)
        : AutoEvaluationPipeline(dc)
36
        , _lsp()
37
38
        , _imageReader()
        , _ta()
39
        , _glyphRenderer(&_canvasSize)
40
41
        , _sliceRenderer(&_canvasSize)
        , _rtc(&_canvasSize)
42
        , p_camera("Camera", "Camera", tgt::Camera())
43
        , p_sliceNumber("SliceNuber", "Slice Number", 0, 0, 256)
44
45
        , _trackballEH(0)

46
    {
47
48
        addProperty(p_camera);
        addProperty(p_sliceNumber);
49
50
51
52

        _trackballEH = new TrackballNavigationEventListener(&p_camera, &_canvasSize);
        addEventListenerToBack(_trackballEH);

53
        addProcessor(&_lsp);
54
55
        addProcessor(&_imageReader);
        addProcessor(&_ta);
56
        addProcessor(&_glyphRenderer);
57
58
        addProcessor(&_sliceRenderer);
        addProcessor(&_rtc);
59
60
61
62
63
64
65
66
    }

    TensorDemo::~TensorDemo() {
    }

    void TensorDemo::init() {
        AutoEvaluationPipeline::init();

67
        p_camera.addSharedProperty(&_glyphRenderer.p_camera);
68
69
70
71
        p_camera.addSharedProperty(&_sliceRenderer.p_camera);

        p_sliceNumber.addSharedProperty(&_glyphRenderer.p_sliceNumber);
        p_sliceNumber.addSharedProperty(&_sliceRenderer.p_sliceNumber);
72

73
74
75
        _imageReader.p_url.setValue(CAMPVIS_SOURCE_DIR "/modules/tensor/sampledata/planar_tensor.mhd");
        _imageReader.p_targetImageID.setValue("reader.output");
        _imageReader.p_targetImageID.addSharedProperty(&_ta.p_inputImage);
76

77
78
        _ta.p_outputProperties[0]->_imageId.addSharedProperty(&_sliceRenderer.p_sourceImageID);
        _ta.p_outputProperties[0]->_imageType.selectById("Trace");
79
80
81
        _ta.p_evalsImage.addSharedProperty(&_glyphRenderer.p_inputEigenvalues);
        _ta.p_evecsImage.addSharedProperty(&_glyphRenderer.p_inputEigenvectors);
        _ta.s_validated.connect(this, &TensorDemo::onProcessorValidated);
82

83
        _glyphRenderer.p_renderOutput.setValue("glyphs");
84
        _glyphRenderer.p_renderOutput.addSharedProperty(&_rtc.p_firstImageId);
85
86

        Geometry1DTransferFunction* tf = new Geometry1DTransferFunction(128, tgt::vec2(0.f, 1.f));
87
        tf->addGeometry(TFGeometry1D::createQuad(tgt::vec2(0.f, 1.f), tgt::col4(0, 0, 0, 255), tgt::col4(255, 255, 255, 255)));
88
89
90
91
92
93
        _sliceRenderer.p_transferFunction.replaceTF(tf);
        _sliceRenderer.p_targetImageID.setValue("slice");
        _sliceRenderer.p_targetImageID.addSharedProperty(&_rtc.p_secondImageId);

        _rtc.p_compositingMethod.selectById("depth");
        _rtc.p_targetImageId.setValue("composed");
94

95
96
        _renderTargetID.setValue("composed");
        
97
    }
98

99
100
101
102
103
104
105
106
    void TensorDemo::onProcessorValidated(AbstractProcessor* processor) {
        if (processor == &_ta) {
            // update camera
            ScopedTypedData<IHasWorldBounds> img(*_data, _ta.p_evalsImage.getValue());
            if (img) {
                _trackballEH->reinitializeCamera(img);
            }
        }
107
108
109
    }

}