Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit b019d377 authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

Further work on Empty Space Skipping implementation

parent 6d43b513
......@@ -58,7 +58,7 @@ namespace campvis {
return _invalidationLevel;
}
void AbstractProperty::setInvalidationLevel(AbstractProcessor::InvalidationLevel il) {
void AbstractProperty::setInvalidationLevel(int il) {
_invalidationLevel = il;
}
......
......@@ -96,7 +96,7 @@ namespace campvis {
* Sets the invalidation level that this property triggers.
* \param il New invalidation level that this property triggers.
*/
void setInvalidationLevel(AbstractProcessor::InvalidationLevel il);
void setInvalidationLevel(int il);
/**
* Returns whether this proberty shall be visible in the GUI.
......
......@@ -73,16 +73,34 @@ namespace campvis {
}
std::vector<tgt::svec3> BinaryBrickedVolume::getAllVoxelsForBrick(size_t brickIndex) const {
const tgt::svec3& refImageSize = _referenceImage->getSize();
tgt::ivec3 refImageSize = _referenceImage->getSize();
std::vector<tgt::svec3> toReturn;
toReturn.reserve(_brickSize * _brickSize * _brickSize);
toReturn.reserve((_brickSize+2) * (_brickSize+2) * (_brickSize+2));
// traverse each dimension, check that voxel is within reference image size
tgt::svec3 startVoxel = indexToBrick(brickIndex) * _brickSize;
for (size_t x = 0; x < _brickSize && startVoxel.x + x < refImageSize.x; ++x) {
for (size_t y = 0; y < _brickSize && startVoxel.y + y < refImageSize.y; ++y) {
for (size_t z = 0; z < _brickSize && startVoxel.z + z < refImageSize.z; ++z) {
toReturn.push_back(tgt::svec3(startVoxel.x + x, startVoxel.y + y, startVoxel.z + z));
tgt::ivec3 startVoxel = indexToBrick(brickIndex) * _brickSize;
for (int x = -1; x < static_cast<int>(_brickSize + 1); ++x) {
int xx = startVoxel.x + x;
if (xx < 0)
continue;
else if (xx >= refImageSize.x)
break;
for (int y = -1; y < static_cast<int>(_brickSize + 1); ++y) {
int yy = startVoxel.y + y;
if (yy < 0)
continue;
else if (yy >= refImageSize.y)
break;
for (int z = -1; z < static_cast<int>(_brickSize + 1); ++z) {
int zz = startVoxel.z + z;
if (zz < 0)
continue;
else if (zz >= refImageSize.z)
break;
toReturn.push_back(tgt::svec3(xx, yy, zz));
}
}
}
......@@ -153,5 +171,9 @@ namespace campvis {
return _numBrickIndices;
}
size_t BinaryBrickedVolume::getBrickSize() const {
return _brickSize;
}
}
\ No newline at end of file
......@@ -32,6 +32,8 @@ namespace campvis {
*/
size_t getNumBrickIndices() const;
size_t getBrickSize() const;
/**
* Returns the boolean value for the brick with index \a brickIndex.
......
......@@ -88,17 +88,12 @@ ivec3 voxelToBrick(in vec3 voxel) {
return ivec3(floor(voxel / _bbvBrickSize));
}
int brickToIndex(in ivec3 brick) {
return int(brick.x + (_bbvTextureParams._size.x * brick.y) + (_bbvTextureParams._size.x * _bbvTextureParams._size.y * brick.z));
}
// samplePosition is in texture coordiantes [0, 1]
bool lookupInBbv(in vec3 samplePosition) {
ivec3 brick = voxelToBrick(samplePosition * _volumeTextureParams._size);
ivec3 byte = brick;
ivec3 byte = voxelToBrick(samplePosition * _volumeTextureParams._size);
uint bit = uint(byte.x % 8);
byte.x /= 8;
uint bit = uint(brick.x % 8);
uint texel = texelFetch(_bbvTexture, byte, 0).r;
return (texel & (1U << bit)) != 0U;
......@@ -127,13 +122,14 @@ vec4 performRaycasting(in vec3 entryPoint, in vec3 exitPoint, in vec2 texCoords)
vec3 samplePosition = entryPoint.rgb + t * direction;
if (_hasBbv) {
if (lookupInBbv(samplePosition)) {
// advance to the next evaluation point along the ray
if (! lookupInBbv(samplePosition)) {
// advance to the next evaluation point along the ray
t += 4.0*_samplingStepSize;
#ifdef ENABLE_ADAPTIVE_STEPSIZE
samplingRateCompensationMultiplier = 1.0;
t += _samplingStepSize;
#else
t += _samplingStepSize;
samplingRateCompensationMultiplier = 1.0;
#endif
continue;
}
......
......@@ -58,12 +58,13 @@ namespace campvis {
addProperty(&p_shadowIntensity);
p_shadowIntensity.setVisible(false);
// p_transferFunction.setInvalidationLevel(p_transferFunction.getInvalidationLevel() | INVALID_BBV);
// p_sourceImageID.setInvalidationLevel(p_sourceImageID.getInvalidationLevel() | INVALID_BBV);
decoratePropertyCollection(this);
}
SimpleRaycaster::~SimpleRaycaster() {
delete _bbv;
delete _t;
}
void SimpleRaycaster::init() {
......@@ -71,6 +72,8 @@ namespace campvis {
}
void SimpleRaycaster::deinit() {
delete _bbv;
delete _t;
RaycastingProcessor::deinit();
}
......@@ -80,12 +83,16 @@ namespace campvis {
if (getInvalidationLevel() & INVALID_BBV) {
DataHandle dh = DataHandle(const_cast<ImageData*>(image->getParent())); // HACK HACK HACK
generateBbv(dh);
// tgt::Texture* batman = _bbv->exportToImageData();
// ImageData* robin = new ImageData(3, batman->getDimensions(), 1);
// ImageRepresentationGL::create(robin, batman);
// data.addData("All glory to the HYPNOTOAD!", robin);
validate(INVALID_BBV);
}
if (_t != 0 && p_useEmptySpaceSkipping.getValue()) {
// bind
bbvUnit.activate();
_t->bind();
......@@ -95,7 +102,7 @@ namespace campvis {
_shader->setUniform("_bbvTextureParams._sizeRCP", tgt::vec3(1.f) / tgt::vec3(_t->getDimensions()));
_shader->setUniform("_bbvTextureParams._numChannels", static_cast<int>(1));
_shader->setUniform("_bbvBrickSize", 2);
_shader->setUniform("_bbvBrickSize", static_cast<int>(_bbv->getBrickSize()));
_shader->setUniform("_hasBbv", true);
_shader->setIgnoreUniformLocationError(false);
}
......@@ -155,6 +162,7 @@ namespace campvis {
GLubyte* tfBuffer = p_transferFunction.getTF()->getTexture()->downloadTextureToBuffer(GL_RGBA, GL_UNSIGNED_BYTE);
size_t tfNumElements = p_transferFunction.getTF()->getTexture()->getDimensions().x;
LDEBUG("Start computing brick visibilities...");
// parallelly traverse the bricks
// have minimum group size 8 to avoid race conditions (every 8 neighbor bricks write to the same byte)!
tbb::parallel_for(tbb::blocked_range<size_t>(0, _bbv->getNumBrickIndices(), 8), [&] (const tbb::blocked_range<size_t>& range) {
......@@ -185,6 +193,8 @@ namespace campvis {
}
});
LDEBUG("...finished computing brick visibilities.");
// export to texture:
_t = _bbv->exportToImageData();
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment