Commit aead807e authored by schultezub's avatar schultezub

some work on OpenCL module

introducing OpenCL pipeline
first steps to adapt to Intel OpenCL SDK for debugging

git-svn-id: https://camplinux.in.tum.de/svn/campvis/trunk@380 bb408c1c-ae56-11e1-83d9-df6b3e0c105e
parent aa8a6504
......@@ -30,6 +30,7 @@
#include "application/campvisapplication.h"
#include "modules/vis/pipelines/dvrvis.h"
#include "modules/vis/pipelines/slicevis.h"
#include "modules/opencl/pipelines/openclpipeline.h"
using namespace campvis;
......@@ -42,8 +43,9 @@ using namespace campvis;
**/
int main(int argc, char** argv) {
CampVisApplication app(argc, argv);
app.addVisualizationPipeline("SliceVis", new SliceVis());
app.addVisualizationPipeline("DVRVis", new DVRVis());
//app.addVisualizationPipeline("SliceVis", new SliceVis());
//app.addVisualizationPipeline("DVRVis", new DVRVis());
app.addVisualizationPipeline("DVR with OpenCL", new OpenCLPipeline());
app.init();
int toReturn = app.run();
......
......@@ -254,7 +254,7 @@ namespace campvis {
cl_channel_order WeaklyTypedPointer::getClChannelOrder() const {
switch (_numChannels) {
case 1:
return CL_A;
return CL_INTENSITY;
case 2:
return CL_RA;
case 4:
......
......@@ -18,11 +18,11 @@ FILE(GLOB KISSCL_SOURCES *.cpp)
# define library target
################################################################################
ADD_LIBRARY(kisscl ${KISSCL_SOURCES} ${KISSCL_HEADERS})
ADD_DEFINITIONS(${CAMPVIS_DEFINITIONS} ${CAMPVIS_MODULE_DEFINITIONS})
ADD_DEFINITIONS(${CampvisGlobalDefinitions} ${CAMPVIS_MODULE_DEFINITIONS})
IF(CAMPVIS_SHARED_LIBS AND MSVC)
ADD_DEFINITIONS("-DKISSCL_BUILD_DLL")
ENDIF()
INCLUDE_DIRECTORIES(${CAMPVIS_INCLUDE_DIRECTORIES} ${CAMPVIS_MODULE_INCLUDE_DIRECTORIES})
INCLUDE_DIRECTORIES(${CampvisGlobalIncludeDirs} ${CAMPVIS_MODULE_INCLUDE_DIRECTORIES})
TARGET_LINK_LIBRARIES(kisscl ${CAMPVIS_EXTERNAL_LIBRARIES})
################################################################################
......
......@@ -94,7 +94,7 @@ namespace kisscl {
std::vector<ContextProperty> properties = Context::generateGlSharingProperties();
properties.insert(properties.end(), additionalProperties.begin(), additionalProperties.end());
for (std::vector<Device*>::const_iterator it = _gpuDevices.begin(); it != _gpuDevices.end(); ++it) {
for (std::vector<Device*>::const_iterator it = _cpuDevices.begin(); it != _cpuDevices.end(); ++it) {
toReturn = new Context(*it, properties);
if (toReturn->isValid())
return toReturn;
......
......@@ -591,7 +591,7 @@ void Shader::setHeaders(const string& customHeader) {
void Shader::bindFragDataLocation(GLuint colorNumber, std::string name) {
if (GpuCaps.getShaderVersion() >= GpuCapabilities::GlVersion::SHADER_VERSION_130) {
glBindFragDataLocationEXT(id_, colorNumber, name.c_str());
glBindFragDataLocation(id_, colorNumber, name.c_str());
}
}
......
#include "tools/raycasting.cl"
__constant sampler_t smpNorm = CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP | CLK_FILTER_LINEAR;
__constant float SAMPLING_BASE_INTERVAL_RCP = 200.0;
/**
* Makes a simple raycast through the volume for entry to exit point with minimal diffuse shading.
*/
float4 simpleRaycast(__global read_only image3d_t volumeTex, __global read_only image2d_t tfData, const float4 entryPoint, const float4 exitPoint, float* depth, const float stepSize, float tfLowerBound, float tfUpperBound) {
// result color
float4 result = (float4)(0.0, 0.0, 0.0, 0.0);
float t = 0.0; //the current position on the ray with entryPoint as the origin
float4 direction = exitPoint - entryPoint; //the direction of the ray
direction.w = 0.0;
float tend = fast_length(direction); //the length of the ray
direction = fast_normalize(direction);
while(t <= tend) {
//calculate sample position and get corresponding voxel
float4 sample = entryPoint + t * direction;
float intensity = read_imagef(volumeTex, smpNorm, sample).w;
// apply tf intensity domain mapping:
intensity = (intensity - tfLowerBound) / (tfUpperBound - tfLowerBound);
float4 color = read_imagef(tfData, smpNorm, (float2)(intensity, 0.0));
// apply opacity correction to accomodate for variable sampling intervals
color.w = 1.0 - pow(1.0 - color.w, stepSize * SAMPLING_BASE_INTERVAL_RCP);
// Add a little shading. calcGradient is declared in mod_gradients.cl
//float4 norm = normalize(calcGradient(sample, volumeTex));
//color *= fabs(dot(norm, direction));
//calculate ray integral
result.xyz = result.xyz + (1.0 - result.w) * color.w * color.xyz;
result.w = result.w + (1.0 - result.w) * color.w;
// early ray termination
if(result.w > 0.95)
break;
//raise position on ray
t += stepSize;
}
// TODO: calculate correct depth value
if(t >= 0.0)
*depth = t / tend;
else
*depth = 1.0;
return result;
}
//main for raycasting. This function is called for every pixel in view.
// TODO: Depth values are currently not read or written as OpenCL does not support OpenGL GL_DEPTH_COMPONENT image formats.
__kernel void clraycaster(__global read_only image3d_t volumeTex,
__global read_only image2d_t tfData,
__global read_only image2d_t entryTexCol,
__global read_only image2d_t exitTexCol,
__global write_only image2d_t outCol,
float stepSize,
float tfLowerBound,
float tfUpperBound
)
{
//output image pixel coordinates
int2 target = (int2)(get_global_id(0), get_global_id(1));
// Need to add 0.5 in order to get the correct coordinate. We could also use the integer coordinate directly...
float2 targetNorm = (convert_float2(target) + (float2)(0.5)) / convert_float2((int2)(get_global_size(0), get_global_size(1)));
float4 color;
float depth = 1.0;
float4 entry = read_imagef(entryTexCol, smpNorm, targetNorm);
float4 exit = read_imagef(exitTexCol, smpNorm, targetNorm);
if( entry.x != exit.x || entry.y != exit.y || entry.z != exit.z )
color = simpleRaycast(volumeTex, tfData, entry, exit, &depth, stepSize, tfLowerBound, tfUpperBound);
else
color = (float4)(0.0);
write_imagef(outCol, target, color);
//write_imagef(outDepth, target, (float4)(depth));
}
__kernel void foobar(__global read_only image2d_t entryTexCol,
__global read_only image2d_t exitTexCol,
__global write_only image2d_t outCol)
{
//output image pixel coordinates
int2 target = (int2)(get_global_id(0), get_global_id(1));
// Need to add 0.5 in order to get the correct coordinate. We could also use the integer coordinate directly...
float2 targetNorm = (convert_float2(target) + (float2)(0.5)) / convert_float2((int2)(get_global_size(0), get_global_size(1)));
float4 color;
float4 entry = read_imagef(entryTexCol, smpNorm, targetNorm);
float4 exit = read_imagef(exitTexCol, smpNorm, targetNorm);
if( entry.x != exit.x || entry.y != exit.y || entry.z != exit.z )
color = exit - entry;
else
color = (float4)(0.0);
write_imagef(outCol, target, color);
}
//#include "tools/raycasting.cl"
float4 jitterEntryPoint(float4 position, float4 direction, float stepSize) {
float random;
fract(sin((float)get_global_id(0) * 12.9898f + (float)get_global_id(1) * 78.233f) * 43758.5453f, &random);
return position + direction * (stepSize * random);
}
__constant sampler_t smpNorm = CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP | CLK_FILTER_LINEAR;
__constant float SAMPLING_BASE_INTERVAL_RCP = 200.0f;
/**
* Makes a simple raycast through the volume for entry to exit point with minimal diffuse shading.
*/
float4 simpleRaycast(
image3d_t volumeTex,
image2d_t tfData,
const float4 entryPoint,
const float4 exitPoint,
float* depth,
const float stepSize,
float tfLowerBound,
float tfUpperBound) {
// result color
float4 result = (float4)(0.0f, 0.0f, 0.0f, 0.0f);
float t = 0.0f; //the current position on the ray with entryPoint as the origin
float4 direction = exitPoint - entryPoint; //the direction of the ray
direction.w = 0.0f;
float tend = fast_length(direction); //the length of the ray
direction = fast_normalize(direction);
while(t <= tend) {
//calculate sample position and get corresponding voxel
float4 sample = entryPoint + t * direction;
float intensity = read_imagef(volumeTex, smpNorm, sample).w;
// apply tf intensity domain mapping:
intensity = (intensity - tfLowerBound) / (tfUpperBound - tfLowerBound);
float4 color = read_imagef(tfData, smpNorm, (float2)(intensity, 0.0));
// apply opacity correction to accomodate for variable sampling intervals
color.w = 1.0 - pow(1.0 - color.w, stepSize * SAMPLING_BASE_INTERVAL_RCP);
// Add a little shading. calcGradient is declared in mod_gradients.cl
//float4 norm = normalize(calcGradient(sample, volumeTex));
//color *= fabs(dot(norm, direction));
//calculate ray integral
result.xyz = result.xyz + (1.0 - result.w) * color.w * color.xyz;
result.w = result.w + (1.0 - result.w) * color.w;
// early ray termination
if(result.w > 0.95)
break;
//raise position on ray
t += stepSize;
}
// TODO: calculate correct depth value
if(t >= 0.0)
*depth = t / tend;
else
*depth = 1.0;
return result;
}
//main for raycasting. This function is called for every pixel in view.
// TODO: Depth values are currently not read or written as OpenCL does not support OpenGL GL_DEPTH_COMPONENT image formats.
__kernel void clraycaster( read_only image3d_t volumeTex,
read_only image2d_t tfData,
read_only image2d_t entryTexCol,
read_only image2d_t exitTexCol,
write_only image2d_t outCol,
float stepSize,
float tfLowerBound,
float tfUpperBound
)
{
//output image pixel coordinates
int2 target = (int2)(get_global_id(0), get_global_id(1));
// Need to add 0.5 in order to get the correct coordinate. We could also use the integer coordinate directly...
float2 targetNorm = (convert_float2(target) + (float2)(0.5)) / convert_float2((int2)(get_global_size(0), get_global_size(1)));
float4 color;
float depth = 1.0;
float4 entry = read_imagef(entryTexCol, smpNorm, targetNorm);
float4 exit = read_imagef(exitTexCol, smpNorm, targetNorm);
if( entry.x != exit.x || entry.y != exit.y || entry.z != exit.z )
color = simpleRaycast(volumeTex, tfData, entry, exit, &depth, stepSize, tfLowerBound, tfUpperBound);
else
color = (float4)(0.0);
write_imagef(outCol, target, color);
//write_imagef(outDepth, target, (float4)(depth));
}
__kernel void foobar( read_only image2d_t entryTexCol,
read_only image2d_t exitTexCol,
write_only image2d_t outCol)
{
//output image pixel coordinates
int2 target = (int2)(get_global_id(0), get_global_id(1));
// Need to add 0.5 in order to get the correct coordinate. We could also use the integer coordinate directly...
float2 targetNorm = (convert_float2(target) + (float2)(0.5)) / convert_float2((int2)(get_global_size(0), get_global_size(1)));
float4 color;
float4 entry = read_imagef(entryTexCol, smpNorm, targetNorm);
float4 exit = read_imagef(exitTexCol, smpNorm, targetNorm);
if( entry.x != exit.x || entry.y != exit.y || entry.z != exit.z )
color = exit - entry;
else
color = (float4)(0.0);
write_imagef(outCol, target, color);
}
......@@ -6,12 +6,14 @@ ENDIF(NOT OPENCL_FOUND)
# Source files:
FILE(GLOB ThisModSources RELATIVE ${ModulesDir}
modules/opencl/pipelines/*.cpp
modules/opencl/processors/*.cpp
)
# Header files (including CL files so that they'll appear in VS projects)
FILE(GLOB ThisModHeaders RELATIVE ${ModulesDir}
modules/opencl/glsl/*.cl
modules/opencl/pipelines/*.h
modules/opencl/processors/*.h
)
......
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universitt Mnchen
// Boltzmannstr. 3, 85748 Garching b. Mnchen, Germany
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// The licensing of this softare is not yet resolved. Until then, redistribution in source or
// binary forms outside the CAMP chair is not permitted, unless explicitly stated in legal form.
// However, the names of the original authors and the above copyright notice must retain in its
// original state in any case.
//
// Legal disclaimer provided by the BSD license:
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
// AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ================================================================================================
#include "openclpipeline.h"
#include "tgt/event/keyevent.h"
#include "tgt/glcontext.h"
#include "tgt/qt/qtcontextmanager.h"
#include "core/datastructures/imagedataconverter.h"
#include "core/classification/geometry1dtransferfunction.h"
#include "core/classification/tfgeometry1d.h"
namespace campvis {
OpenCLPipeline::OpenCLPipeline()
: VisualizationPipeline()
, _camera("camera", "Camera")
, _imageReader()
, _pgGenerator()
, _eepGenerator(_effectiveRenderTargetSize)
, _clRaycaster(_effectiveRenderTargetSize)
, _trackballEH(0)
{
addProperty(&_camera);
_trackballEH = new TrackballNavigationEventHandler(this, &_camera, _renderTargetSize);
_eventHandlers.push_back(_trackballEH);
addProcessor(&_imageReader);
addProcessor(&_pgGenerator);
addProcessor(&_eepGenerator);
addProcessor(&_clRaycaster);
}
OpenCLPipeline::~OpenCLPipeline() {
delete _trackballEH;
}
void OpenCLPipeline::init() {
VisualizationPipeline::init();
_camera.addSharedProperty(&_eepGenerator.p_camera);
_camera.addSharedProperty(&_clRaycaster._camera);
//_imageReader.p_url.setValue("D:\\Medical Data\\Dentalscan\\dental.mhd");
_imageReader.p_url.setValue("D:\\Medical Data\\smallHeart.mhd");
_imageReader.p_targetImageID.setValue("reader.output");
_clRaycaster._targetImageID.setValue("cl.output");
_clRaycaster._sourceImageID.setValue("clr.input");
Geometry1DTransferFunction* dvrTF = new Geometry1DTransferFunction(128, tgt::vec2(0.f, .05f));
dvrTF->addGeometry(TFGeometry1D::createQuad(tgt::vec2(.4f, .42f), tgt::col4(255, 0, 0, 255), tgt::col4(255, 0, 0, 255)));
dvrTF->addGeometry(TFGeometry1D::createQuad(tgt::vec2(.45f, .5f), tgt::col4(0, 255, 0, 255), tgt::col4(0, 255, 0, 255)));
_clRaycaster._transferFunction.replaceTF(dvrTF);
_eepGenerator.p_sourceImageID.setValue("eep.input");
_pgGenerator.p_sourceImageID.setValue("eep.input");
_renderTargetID.setValue("cl.output");
_pgGenerator.p_geometryID.connect(&_eepGenerator.p_geometryID);
_eepGenerator.p_entryImageID.connect(&_clRaycaster._entryImageID);
_eepGenerator.p_exitImageID.connect(&_clRaycaster._exitImageID);
_imageReader.s_invalidated.connect<OpenCLPipeline>(this, &OpenCLPipeline::onProcessorInvalidated);
_pgGenerator.s_invalidated.connect<OpenCLPipeline>(this, &OpenCLPipeline::onProcessorInvalidated);
_eepGenerator.s_invalidated.connect<OpenCLPipeline>(this, &OpenCLPipeline::onProcessorInvalidated);
_clRaycaster.s_invalidated.connect<OpenCLPipeline>(this, &OpenCLPipeline::onProcessorInvalidated);
_trackballEH->setViewportSize(_effectiveRenderTargetSize.getValue());
_effectiveRenderTargetSize.s_changed.connect<OpenCLPipeline>(this, &OpenCLPipeline::onRenderTargetSizeChanged);
}
void OpenCLPipeline::execute() {
{
tbb::spin_mutex::scoped_lock lock(_localMutex);
_invalidationLevel.setValid();
// TODO: think whether we want to lock all processors already here.
}
if (! _imageReader.getInvalidationLevel().isValid()) {
executeProcessor(&_imageReader);
// convert data
DataContainer::ScopedTypedData<ImageData> img(_data, "reader.output");
if (img != 0) {
ImageDataLocal* local = ImageDataConverter::tryConvert<ImageDataLocal>(img);
if (local != 0) {
size_t numElements = local->getNumElements();
float* asFloats = new float[numElements];
for (size_t i = 0; i < numElements; ++i)
asFloats[i] = local->getElementNormalized(i, 0);
GenericImageDataLocal<float, 1>* imageWithFloats = new GenericImageDataLocal<float, 1>(local->getDimensionality(), local->getSize(), asFloats);
DataHandle dh = _data.addData("clr.input", imageWithFloats);
_clRaycaster._transferFunction.getTF()->setImageHandle(dh);
}
delete local;
{
tgt::GLContextScopedLock lock(_canvas->getContext());
ImageDataGL* gl = ImageDataConverter::tryConvert<ImageDataGL>(img);
if (gl != 0) {
_data.addData("eep.input", gl);
}
}
CtxtMgr.releaseCurrentContext();
tgt::Bounds volumeExtent = img->getWorldBounds();
tgt::vec3 pos = volumeExtent.center() - tgt::vec3(0, 0, tgt::length(volumeExtent.diagonal()));
_trackballEH->setSceneBounds(volumeExtent);
_trackballEH->setCenter(volumeExtent.center());
_trackballEH->reinitializeCamera(pos, volumeExtent.center(), _camera.getValue().getUpVector());
}
}
if (! _pgGenerator.getInvalidationLevel().isValid()) {
lockGLContextAndExecuteProcessor(&_pgGenerator);
}
if (! _eepGenerator.getInvalidationLevel().isValid()) {
lockGLContextAndExecuteProcessor(&_eepGenerator);
}
if (! _clRaycaster.getInvalidationLevel().isValid()) {
lockGLContextAndExecuteProcessor(&_clRaycaster);
}
}
const std::string OpenCLPipeline::getName() const {
return "OpenCLPipeline";
}
void OpenCLPipeline::onRenderTargetSizeChanged(const AbstractProperty* prop) {
_trackballEH->setViewportSize(_renderTargetSize);
float ratio = static_cast<float>(_effectiveRenderTargetSize.getValue().x) / static_cast<float>(_effectiveRenderTargetSize.getValue().y);
_camera.setWindowRatio(ratio);
}
}
\ No newline at end of file
// ================================================================================================
//
// This file is part of the CAMPVis Software Framework.
//
// If not explicitly stated otherwise: Copyright (C) 2012, all rights reserved,
// Christian Schulte zu Berge <christian.szb@in.tum.de>
// Chair for Computer Aided Medical Procedures
// Technische Universität München
// Boltzmannstr. 3, 85748 Garching b. München, Germany
// For a full list of authors and contributors, please refer to the file "AUTHORS.txt".
//
// The licensing of this softare is not yet resolved. Until then, redistribution in source or
// binary forms outside the CAMP chair is not permitted, unless explicitly stated in legal form.
// However, the names of the original authors and the above copyright notice must retain in its
// original state in any case.
//
// Legal disclaimer provided by the BSD license:
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
// AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ================================================================================================
#ifndef OPENCLPIPELINE_H__
#define OPENCLPIPELINE_H__
#include "core/datastructures/imagedatalocal.h"
#include "core/eventhandlers/trackballnavigationeventhandler.h"
#include "core/pipeline/visualizationpipeline.h"
#include "core/properties/cameraproperty.h"
#include "modules/io/processors/mhdimagereader.h"
#include "modules/vis/processors/proxygeometrygenerator.h"
#include "modules/vis/processors/eepgenerator.h"
#include "modules/opencl/processors/clraycaster.h"
namespace campvis {
class OpenCLPipeline : public VisualizationPipeline {
public:
/**
* Creates a VisualizationPipeline.
*/
OpenCLPipeline();
/**
* Virtual Destructor
**/
virtual ~OpenCLPipeline();
/// \see VisualizationPipeline::init()
virtual void init();
/// \see AbstractPipeline::getName()
virtual const std::string getName() const;
/**
* Execute this pipeline.
**/
virtual void execute();
void onRenderTargetSizeChanged(const AbstractProperty* prop);
protected:
CameraProperty _camera;
MhdImageReader _imageReader;
ProxyGeometryGenerator _pgGenerator;
EEPGenerator _eepGenerator;
CLRaycaster _clRaycaster;
TrackballNavigationEventHandler* _trackballEH;
};
}
#endif // OPENCLPIPELINE_H__
......@@ -93,8 +93,8 @@ namespace campvis {
_clContext = CLRtm.createGlSharingContext();
if (_clContext != 0) {
_clProgram = CLRtm.loadProgram(_clContext, "modules/vis/cl/clraycaster.cl");
_clProgram->setBuildOptions(" -cl-fast-relaxed-math -cl-mad-enable");
_clProgram = CLRtm.loadProgram(_clContext, "modules/opencl/cl/clraycaster.cl");
_clProgram->setBuildOptions(" -cl-fast-relaxed-math -cl-mad-enable -g -s \"C:\\Users\\Christian\\Documents\\TUM\\code\\campvis\\modules\\opencl\\cl\\clraycaster.cl\"");
_clProgram->build();
}
}
......@@ -105,6 +105,44 @@ namespace campvis {
VisualizationProcessor::deinit();
}
// Helper function to get OpenCL image format string (channel order and type) from constant
// *********************************************************************
const char* oclImageFormatString(cl_uint uiImageFormat)
{
// cl_channel_order
if (uiImageFormat == CL_R)return "CL_R";
if (uiImageFormat == CL_A)return "CL_A";
if (uiImageFormat == CL_RG)return "CL_RG";
if (uiImageFormat == CL_RA)return "CL_RA";