Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit b6a82f71 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

Started implementation for tensor data support:

 * Introducing campvis::Tensor2<T> type
 * Updated MhdImageReader to support TensorImages
parent 287bdc91
......@@ -227,6 +227,7 @@ namespace campvis {
else DISPATCH_DISK_TO_GENERIC_LOCAL_CONVERSION(2)
else DISPATCH_DISK_TO_GENERIC_LOCAL_CONVERSION(3)
else DISPATCH_DISK_TO_GENERIC_LOCAL_CONVERSION(4)
else DISPATCH_DISK_TO_GENERIC_LOCAL_CONVERSION(6)
else {
tgtAssert(false, "Should not reach this - wrong number of channel!");
return 0;
......
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012-2013, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitt Mnchen
// Boltzmannstr. 3, 85748 Garching b. Mnchen, Germany
//
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
// ================================================================================================
#ifndef TENSOR_H__
#define TENSOR_H__
#include "tgt/matrix.h"
namespace campvis {
/**
* Second order tensor of base type T
*
* A second order tensor is a symmetric, positive definite 3x3 matrix, though
* can be represented by 6 values. To save memory only these 6 values are stored,
* use according accessor functions to get a Matrix3\<T\> representation.
*
* In this implementation the 6 tensor values are stored in row order as upper
* diagonal matrix, meaning
* Dxx Dxy Dxz
* elem = Dyy Dyz = [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]
* Dzz
*
* If you have differently organized data use one of the order transforming
* factory methods.
**/
template<class T>
struct Tensor2 {
typedef T ElemType;
enum {
size = 6
};
union {
struct {
T Dxx;
T Dxy;
T Dxz;
T Dyy;
T Dyz;
T Dzz;
};
T elem[size];
};
/// Default constructor
Tensor2() {}
/// Init all elements with the same value
explicit Tensor2(T v) {
for (size_t i = 0; i < size; ++i)
elem[i] = v;
}
/// Init from array with equal size
explicit Tensor2(const T* v) {
for (size_t i = 0; i < size; ++i)
elem[i] = v[i];
}
/// Init componentwisely
Tensor2(T Dxx, T Dxy, T Dxz, T Dyy, T Dyz, T Dzz) {
elem[0] = Dxx;
elem[1] = Dxy;
elem[2] = Dxz;
elem[3] = Dyy;
elem[4] = Dyz;
elem[5] = Dzz;
}
/// Init with another Vector of another type
template<class U>
Tensor2(const Tensor2<U>& v) {
for (size_t i = 0; i < v.size; ++i)
elem[i] = T(v.elem[i]);
}
/// Destructor
~Tensor2() {
}
/// Index operator
const T& operator [] (size_t index) const {
return elem[index];
}
/// Index operator
T& operator [] (size_t index) {
return elem[index];
}
/**
* Returns a 3x3 matrix representation of this rank-2 Tensor
* \return tgt::Matrix3<T>(Dxx, Dxy, Dxz, Dxy, Dyy, Dyz, Dxz, Dyz, Dzz)
*/
tgt::Matrix3<T> getMatrix() const {
return tgt::Matrix3<T>(Dxx, Dxy, Dxz, Dxy, Dyy, Dyz, Dxz, Dyz, Dzz);
}
/**
* Creates a second order tensor from values given in row order as
* lower diagonal matrix, meaning
* Dxx
* elem = Dxy Dyy = [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]
* Dxz Dyz Dzz
**/
static Tensor2<T> createTensorFromLowerDiagonalMatrix(T Dxx, T Dxy, T Dyy, T Dxz, T Dyz, T Dzz) {
return Tensor2(Dxx, Dxy, Dxz, Dyy, Dyz, Dzz);
}
/**
* Creates a second order tensor from values given in row order as
* lower diagonal matrix, meaning
* Dxx
* elem = Dxy Dyy = [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]
* Dxz Dyz Dzz
**/
static Tensor2<T> createTensorFromLowerDiagonalMatrix(const T* elem) {
return Tensor2(elem[0], elem[1], elem[3], elem[2], elem[4], elem[5]);
}
/**
* Creates a second order tensor from values given in row order as
* lower diagonal matrix, meaning
* 1 4 5
* elem = 2 6 = [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]
* 3
**/
static Tensor2<T> createTensorFromDiagonalOrder(T Dxx, T Dyy, T Dzz, T Dxy, T Dxz, T Dyz) {
return Tensor2(Dxx, Dxy, Dxz, Dyy, Dyz, Dzz);
}
/**
* Creates a second order tensor from values given in row order as
* lower diagonal matrix, meaning
* 1 4 5
* elem = 2 6 = [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]
* 3
**/
static Tensor2<T> createTensorFromDiagonalOrder(const T* elem) {
return Tensor2(elem[0], elem[3], elem[4], elem[1], elem[5], elem[2]);
}
bool operator==(const Tensor2<T>& rhs) {
return ( (Dxx == rhs.Dxx)
&& (Dxy == rhs.Dxy)
&& (Dxz == rhs.Dxz)
&& (Dyy == rhs.Dyy)
&& (Dyz == rhs.Dyz)
&& (Dzz == rhs.Dzz));
}
bool operator!=(const Tensor2<T>& rhs) {
return ( (Dxx != rhs.Dxx)
|| (Dxy != rhs.Dxy)
|| (Dxz != rhs.Dxz)
|| (Dyy != rhs.Dyy)
|| (Dyz != rhs.Dyz)
|| (Dzz != rhs.Dzz));
}
Tensor2<T> operator*(const T& rhs) {
return Tensor2<T>(Dxx*rhs, Dxy*rhs, Dxz*rhs, Dyy*rhs, Dyz*rhs, Dzz*rhs);
}
Tensor2<T> operator/(const T& rhs) {
return Tensor2<T>(Dxx/rhs, Dxy/rhs, Dxz/rhs, Dyy/rhs, Dyz/rhs, Dzz/rhs);
}
Tensor2<T> operator+(const Tensor2<T>& rhs) {
return Tensor2<T>(Dxx+rhs.Dxx, Dxy+rhs.Dxy, Dxz+rhs.Dxz, Dyy+rhs.Dyy, Dyz+rhs.Dyz, Dzz+rhs.Dzz);
}
Tensor2<T> operator-(const Tensor2<T>& rhs) {
return Tensor2<T>(Dxx-rhs.Dxx, Dxy-rhs.Dxy, Dxz-rhs.Dxz, Dyy-rhs.Dyy, Dyz-rhs.Dyz, Dzz-rhs.Dzz);
}
Tensor2<T>& operator+=(const Tensor2<T>& rhs) {
for (size_t i = 0; i < size; ++i)
elem[i] += rhs.elem[i];
return *this;
}
Tensor2<T>& operator-=(const Tensor2<T>& rhs) {
for (size_t i = 0; i < size; ++i)
elem[i] -= rhs.elem[i];
return *this;
}
};
}
#endif // TENSOR_H__
......@@ -28,6 +28,7 @@
#include "tgt/tgt_gl.h"
#include "tgt/tgt_math.h"
#include "tgt/vector.h"
#include "core/datastructures/tensor.h"
#include "core/tools/weaklytypedpointer.h"
#include <limits>
......@@ -48,12 +49,12 @@ namespace {
template<>
struct TypeTraitsHelperPerChannel<1> {
static const GLint glFormat = GL_ALPHA;
static const GLint glFormat = GL_RED;
};
template<>
struct TypeTraitsHelperPerChannel<2> {
static const GLint glFormat = GL_LUMINANCE_ALPHA;
static const GLint glFormat = GL_RG;
};
template<>
......@@ -66,6 +67,11 @@ namespace {
static const GLint glFormat = GL_RGBA;
};
template<>
struct TypeTraitsHelperPerChannel<6> {
static const GLint glFormat = GL_RGB;
};
// ================================================================================================
// ================================================================================================
......@@ -84,37 +90,45 @@ namespace {
static const GLint glInteralFormat = internalFormat; \
}; \
SPCIALIZE_TTIF(uint8_t, 1, GL_ALPHA8)
SPCIALIZE_TTIF(int8_t, 1, GL_ALPHA8)
SPCIALIZE_TTIF(uint16_t,1, GL_ALPHA16)
SPCIALIZE_TTIF(int16_t, 1, GL_ALPHA16)
SPCIALIZE_TTIF(uint32_t,1, GL_ALPHA)
SPCIALIZE_TTIF(int32_t, 1, GL_ALPHA)
SPCIALIZE_TTIF(float, 1, GL_ALPHA32F_ARB)
SPCIALIZE_TTIF(uint8_t, 2, GL_LUMINANCE_ALPHA)
SPCIALIZE_TTIF(int8_t, 2, GL_LUMINANCE_ALPHA)
SPCIALIZE_TTIF(uint16_t,2, GL_LUMINANCE_ALPHA)
SPCIALIZE_TTIF(int16_t, 2, GL_LUMINANCE_ALPHA)
SPCIALIZE_TTIF(uint32_t,2, GL_LUMINANCE_ALPHA)
SPCIALIZE_TTIF(int32_t, 2, GL_LUMINANCE_ALPHA)
SPCIALIZE_TTIF(float, 2, GL_LUMINANCE_ALPHA)
SPCIALIZE_TTIF(uint8_t, 1, GL_R8)
SPCIALIZE_TTIF(int8_t, 1, GL_R8)
SPCIALIZE_TTIF(uint16_t,1, GL_R16)
SPCIALIZE_TTIF(int16_t, 1, GL_R16)
SPCIALIZE_TTIF(uint32_t,1, GL_R32F)
SPCIALIZE_TTIF(int32_t, 1, GL_R32F)
SPCIALIZE_TTIF(float, 1, GL_R32F)
SPCIALIZE_TTIF(uint8_t, 2, GL_RG8)
SPCIALIZE_TTIF(int8_t, 2, GL_RG8)
SPCIALIZE_TTIF(uint16_t,2, GL_RG16)
SPCIALIZE_TTIF(int16_t, 2, GL_RG16)
SPCIALIZE_TTIF(uint32_t,2, GL_RG32F)
SPCIALIZE_TTIF(int32_t, 2, GL_RG32F)
SPCIALIZE_TTIF(float, 2, GL_RG32F)
SPCIALIZE_TTIF(uint8_t, 3, GL_RGB8)
SPCIALIZE_TTIF(int8_t, 3, GL_RGB8)
SPCIALIZE_TTIF(uint16_t,3, GL_RGB16)
SPCIALIZE_TTIF(int16_t, 3, GL_RGB16)
SPCIALIZE_TTIF(uint32_t,3, GL_RGB)
SPCIALIZE_TTIF(int32_t, 3, GL_RGB)
SPCIALIZE_TTIF(float, 3, GL_RGB32F_ARB)
SPCIALIZE_TTIF(uint32_t,3, GL_RGB32F)
SPCIALIZE_TTIF(int32_t, 3, GL_RGB32F)
SPCIALIZE_TTIF(float, 3, GL_RGB32F)
SPCIALIZE_TTIF(uint8_t, 4, GL_RGBA8)
SPCIALIZE_TTIF(int8_t, 4, GL_RGBA8)
SPCIALIZE_TTIF(uint16_t,4, GL_RGBA16)
SPCIALIZE_TTIF(int16_t, 4, GL_RGBA16)
SPCIALIZE_TTIF(uint32_t,4, GL_RGBA)
SPCIALIZE_TTIF(int32_t, 4, GL_RGBA)
SPCIALIZE_TTIF(float, 4, GL_RGBA32F_ARB)
SPCIALIZE_TTIF(uint32_t,4, GL_RGBA32F)
SPCIALIZE_TTIF(int32_t, 4, GL_RGBA32F)
SPCIALIZE_TTIF(float, 4, GL_RGBA32F)
SPCIALIZE_TTIF(uint8_t, 6, GL_RGB8)
SPCIALIZE_TTIF(int8_t, 6, GL_RGB8)
SPCIALIZE_TTIF(uint16_t,6, GL_RGB16)
SPCIALIZE_TTIF(int16_t, 6, GL_RGB16)
SPCIALIZE_TTIF(uint32_t,6, GL_RGB32F)
SPCIALIZE_TTIF(int32_t, 6, GL_RGB32F)
SPCIALIZE_TTIF(float, 6, GL_RGB32F)
// ================================================================================================
// ================================================================================================
......@@ -259,6 +273,21 @@ namespace {
}
};
template<typename BASETYPE>
struct TypeTraitsHelperOfBasetypePerChannel<BASETYPE, 6> {
typedef Tensor2< BASETYPE > ElementType;
static inline BASETYPE getChannel(const ElementType& element, size_t channel) {
tgtAssert(channel >= 0 && channel <= 5, "Channel out of bounds!");
return element[channel];
}
static inline void setChannel(ElementType& element, size_t channel, BASETYPE value) {
tgtAssert(channel >= 0 && channel <= 5, "Channel out of bounds!");
element[channel] = value;
}
};
// ================================================================================================
// ================================================================================================
......
......@@ -76,8 +76,14 @@ namespace campvis {
// image type
if (tfp.hasKey("ObjectType")) {
if (tfp.getString("ObjectType") != "Image") {
LERROR("Error while parsing MHD header: ObjectType = Image expected");
if (tfp.getString("ObjectType") == "Image") {
numChannels = 1;
}
else if (tfp.getString("ObjectType") == "TensorImage") {
numChannels = 6;
}
else {
LERROR("Error while parsing MHD header: ObjectType = Image or ObjectType = TensorImage expected");
return;
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment