The expiration time for new job artifacts in CI/CD pipelines is now 30 days (GitLab default). Previously generated artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit d281e78e authored by Christian Schulte zu Berge's avatar Christian Schulte zu Berge
Browse files

closes Issue #42: Completely removed ViennaCL dependency from RandomWalksLib.

parent 9fe8a51f
......@@ -33,29 +33,13 @@ SET(RandomWalksLibSources ConfidenceMaps2D.cpp
SparseSolverEigenLLT.cpp
SparseSolverFactory.cpp)
# Make OpenCL dependency optional
IF(OPENCL_FOUND)
ADD_DEFINITIONS("-DRANDOMWALKSLIB_HAS_OPENCL")
LIST(APPEND RandomWalksLibExternalLibs ${OPENCL_LIBRARY})
LIST(APPEND RandomWalksLibHeaders SparseSolverViennaCPU.h SparseSolverViennaGPU.h)
LIST(APPEND RandomWalksLibSources SparseSolverViennaCPU.cpp SparseSolverViennaGPU.cpp)
ENDIF()
################################################################################
# define library target
################################################################################
# Eigen RandomWalksLibHeaders are already in CAMPVis/ext
INCLUDE_DIRECTORIES(${CampvisGlobalIncludeDirs})
#include ViennaCl RandomWalksLibHeaders
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
ADD_LIBRARY(RandomWalksLib STATIC ${RandomWalksLibSources} ${RandomWalksLibHeaders})
# We don't want this within CAMPVis...
#put debug target to x64/Debug and all other configurations to x64/Release
#SET_TARGET_PROPERTIES( RandomWalksLib PROPERTIES
# ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/x64/Release #static libs are archives
# ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/x64/Debug
# ARCHIVE_OUTPUT_NAME RandomWalksLib
# ARCHIVE_OUTPUT_NAME_DEBUG RandomWalksLibd ) #debug build should end with a 'd'
TARGET_LINK_LIBRARIES(RandomWalksLib ${RandomWalksLibExternalLibs})
......@@ -35,18 +35,6 @@ SparseSolverInterface * SparseSolverFactory::createSolver(std::string type, int
SparseSolverInterface * solver = new SparseSolverEigenCustom(iterations,tolerance);
return solver;
}
#ifdef RANDOMWALKSLIB_HAS_OPENCL
else if(type.compare("Vienna-CG-CPU")==0)
{
SparseSolverInterface * solver = new SparseSolverViennaCPU(iterations,tolerance);
return solver;
}
else if(type.compare("Vienna-CG-GPU")==0)
{
SparseSolverInterface * solver = new SparseSolverViennaGPU(iterations,tolerance);
return solver;
}
#endif
else
{
return new SparseSolverEigenLLT();
......
#include "SparseSolverViennaCPU.h"
#define VIENNACL_HAVE_EIGEN
#include <viennacl\scalar.hpp>
#include <viennacl\matrix.hpp>
#include <viennacl\compressed_matrix.hpp>
#include <viennacl\linalg\cg.hpp>
SparseSolverViennaCPU::SparseSolverViennaCPU(int iterations, double tolerance)
{
this->iterations = iterations;
this->tolerance = tolerance;
}
std::vector<double> SparseSolverViennaCPU::solve_Ax_b(SparseMatrix<double> A, SparseVector<double> b, int numel, std::vector<int> & uidx, const std::vector<int> * labels, const std::vector<int> * seeds, int active_label)
{
SparseMatrix<double> A_sparse_matrix(A);
VectorXd b_dense = b;
viennacl::linalg::cg_tag custom_tag(tolerance, iterations);
VectorXd x_dense = viennacl::linalg::solve(A_sparse_matrix, b_dense, custom_tag); // CPU Solver with dense
std::vector<double> xmat(numel);
for (int i=0; i<x_dense.rows(); i++)
{
xmat[uidx[i]] = x_dense(i);
}
for (size_t i=0; i<seeds->size(); i++)
{
if((*labels)[i] == active_label)
xmat[(*seeds)[i]] = 1.0;
else
xmat[(*seeds)[i]] = 0.0;
}
return xmat;
}
\ No newline at end of file
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef SPARSE_SOLVER_VIENNA_CPU_H__
#define SPARSE_SOLVER_VIENNA_CPU_H__
#include "SparseSolverInterface.h"
/** \brief Conjugate Gradient Vienna solver on CPU for random walks system
* \author Athanasios Karamalis
* \date 11.06.2012
*/
class SparseSolverViennaCPU : public SparseSolverInterface
{
public:
SparseSolverViennaCPU(int iterations, double tolerance);
/// Solver with CG
virtual std::vector<double> solve_Ax_b(SparseMatrix<double> A, SparseVector<double> b, int numel, std::vector<int> & uidx, const std::vector<int> * labels, const std::vector<int> * seeds, int active_label);
private:
int iterations; ///< CG iterations
double tolerance; ///< CG tolerance
};
#endif
\ No newline at end of file
#include "SparseSolverViennaGPU.h"
#define VIENNACL_HAVE_EIGEN
#include <viennacl\scalar.hpp>
#include <viennacl\matrix.hpp>
#include <viennacl\compressed_matrix.hpp>
#include <viennacl\linalg\cg.hpp>
SparseSolverViennaGPU::SparseSolverViennaGPU(int iterations, double tolerance)
{
this->iterations = iterations;
this->tolerance = tolerance;
}
std::vector<double> SparseSolverViennaGPU::solve_Ax_b(SparseMatrix<double> A, SparseVector<double> b, int numel, std::vector<int> & uidx, const std::vector<int> * labels, const std::vector<int> * seeds, int active_label)
{
SparseMatrix<double> A_sparse_matrix(A);
VectorXd b_dense = b;
viennacl::linalg::cg_tag custom_tag(tolerance, iterations);
viennacl::compressed_matrix<double> viennacl_A(A_sparse_matrix.rows(), A_sparse_matrix.cols());
viennacl::vector<double> viennacl_b(b_dense.rows());
viennacl::vector<double> viennacl_x(b_dense.rows());
viennacl::copy(A_sparse_matrix, viennacl_A);
viennacl::copy(b_dense, viennacl_b);
viennacl_x = viennacl::linalg::solve(viennacl_A, viennacl_b, custom_tag); // GPU Solver
VectorXd x_dense(b.rows());
viennacl::copy(viennacl_x, x_dense);
std::vector<double> xmat(numel);
for (int i=0; i<x_dense.rows(); i++)
{
xmat[uidx[i]] = x_dense(i);
}
for (size_t i=0; i<seeds->size(); i++)
{
if((*labels)[i] == active_label)
xmat[(*seeds)[i]] = 1.0;
else
xmat[(*seeds)[i]] = 0.0;
}
return xmat;
}
\ No newline at end of file
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef SPRASE_SOLVER_VIENNA_GPU_H__
#define SPRASE_SOLVER_VIENNA_GPU_H__
#include "SparseSolverInterface.h"
/** \brief Conjugate Gradient Vienna solver on GPU for random walks system
* \author Athanasios Karamalis
* \date 11.06.2012
*/
class SparseSolverViennaGPU : public SparseSolverInterface
{
public:
SparseSolverViennaGPU(int iterations, double tolerance);
/// Solver with CG
virtual std::vector<double> solve_Ax_b(SparseMatrix<double> A, SparseVector<double> b, int numel, std::vector<int> & uidx, const std::vector<int> * labels, const std::vector<int> * seeds, int active_label);
private:
int iterations; ///< CG iterations
double tolerance; ///< CG tolerance
};
#endif
\ No newline at end of file
#ifndef VIENNACL_BACKEND_CPU_RAM_HPP_
#define VIENNACL_BACKEND_CPU_RAM_HPP_
/* =========================================================================
Copyright (c) 2010-2012, Institute for Microelectronics,
Institute for Analysis and Scientific Computing,
TU Wien.
Portions of this software are copyright by UChicago Argonne, LLC.
-----------------
ViennaCL - The Vienna Computing Library
-----------------
Project Head: Karl Rupp rupp@iue.tuwien.ac.at
(A list of authors and contributors can be found in the PDF manual)
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
/** @file viennacl/backend/cpu_ram.hpp
@brief Implementations for the OpenCL backend functionality
*/
#include <vector>
#include "viennacl/tools/shared_ptr.hpp"
namespace viennacl
{
namespace backend
{
namespace cpu_ram
{
typedef viennacl::tools::shared_ptr<char> handle_type;
// Requirements for backend:
// * memory_create(size, host_ptr)
// * memory_copy(src, dest, offset_src, offset_dest, size)
// * memory_write_from_main_memory(src, offset, size,
// dest, offset, size)
// * memory_read_to_main_memory(src, offset, size
// dest, offset, size)
// *
//
namespace detail
{
template<class U>
struct array_deleter
{
void operator()(U* p) const { delete[] p; }
};
}
inline handle_type memory_create(std::size_t size_in_bytes, const void * host_ptr = NULL)
{
if (!host_ptr)
return handle_type(new char[size_in_bytes], detail::array_deleter<char>());
handle_type new_handle(new char[size_in_bytes], detail::array_deleter<char>());
// copy data:
char * raw_ptr = new_handle.get();
const char * data_ptr = static_cast<const char *>(host_ptr);
for (std::size_t i=0; i<size_in_bytes; ++i)
raw_ptr[i] = data_ptr[i];
return new_handle;
}
inline void memory_copy(handle_type const & src_buffer,
handle_type & dst_buffer,
std::size_t src_offset,
std::size_t dst_offset,
std::size_t bytes_to_copy)
{
assert( (dst_buffer.get() != NULL) && bool("Memory not initialized!"));
assert( (src_buffer.get() != NULL) && bool("Memory not initialized!"));
for (std::size_t i=0; i<bytes_to_copy; ++i)
dst_buffer.get()[i+dst_offset] = src_buffer.get()[i + src_offset];
}
inline void memory_write(handle_type & dst_buffer,
std::size_t dst_offset,
std::size_t bytes_to_copy,
const void * ptr)
{
assert( (dst_buffer.get() != NULL) && bool("Memory not initialized!"));
for (std::size_t i=0; i<bytes_to_copy; ++i)
dst_buffer.get()[i+dst_offset] = static_cast<const char *>(ptr)[i];
}
inline void memory_read(handle_type const & src_buffer,
std::size_t src_offset,
std::size_t bytes_to_copy,
void * ptr)
{
assert( (src_buffer.get() != NULL) && bool("Memory not initialized!"));
for (std::size_t i=0; i<bytes_to_copy; ++i)
static_cast<char *>(ptr)[i] = src_buffer.get()[i+src_offset];
}
}
} //backend
} //viennacl
#endif
#ifndef VIENNACL_BACKEND_CUDA_HPP_
#define VIENNACL_BACKEND_CUDA_HPP_
/* =========================================================================
Copyright (c) 2010-2012, Institute for Microelectronics,
Institute for Analysis and Scientific Computing,
TU Wien.
Portions of this software are copyright by UChicago Argonne, LLC.
-----------------
ViennaCL - The Vienna Computing Library
-----------------
Project Head: Karl Rupp rupp@iue.tuwien.ac.at
(A list of authors and contributors can be found in the PDF manual)
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
/** @file viennacl/backend/cuda.hpp
@brief Implementations for the CUDA backend functionality
*/
#include <iostream>
#include <vector>
#include "viennacl/tools/shared_ptr.hpp"
// includes CUDA
#include <cuda_runtime.h>
#define VIENNACL_CUDA_ERROR_CHECK(err) detail::cuda_error_check (err, __FILE__, __LINE__)
namespace viennacl
{
namespace backend
{
namespace cuda
{
typedef viennacl::tools::shared_ptr<char> handle_type;
// Requirements for backend:
// * memory_create(size, host_ptr)
// * memory_copy(src, dest, offset_src, offset_dest, size)
// * memory_write_from_main_memory(src, offset, size,
// dest, offset, size)
// * memory_read_to_main_memory(src, offset, size
// dest, offset, size)
// *
//
namespace detail
{
inline void cuda_error_check(cudaError error_code, const char *file, const int line )
{
if(cudaSuccess != error_code)
{
std::cerr << file << "(" << line << "): " << ": CUDA Runtime API error " << error_code << ": " << cudaGetErrorString( error_code ) << std::endl;
throw "CUDA error";
}
}
template <typename U>
struct cuda_deleter
{
void operator()(U * p) const
{
//std::cout << "Freeing handle " << reinterpret_cast<void *>(p) << std::endl;
cudaFree(p);
}
};
}
inline handle_type memory_create(std::size_t size_in_bytes, const void * host_ptr = NULL)
{
void * dev_ptr = NULL;
VIENNACL_CUDA_ERROR_CHECK( cudaMalloc(&dev_ptr, size_in_bytes) );
//std::cout << "Allocated new dev_ptr " << dev_ptr << " of size " << size_in_bytes << std::endl;
if (!host_ptr)
return handle_type(reinterpret_cast<char *>(dev_ptr), detail::cuda_deleter<char>());
handle_type new_handle(reinterpret_cast<char*>(dev_ptr), detail::cuda_deleter<char>());
// copy data:
//std::cout << "Filling new handle from host_ptr " << host_ptr << std::endl;
cudaMemcpy(new_handle.get(), host_ptr, size_in_bytes, cudaMemcpyHostToDevice);
return new_handle;
}
inline void memory_copy(handle_type const & src_buffer,
handle_type & dst_buffer,
std::size_t src_offset,
std::size_t dst_offset,
std::size_t bytes_to_copy)
{
assert( (dst_buffer.get() != NULL) && bool("Memory not initialized!"));
assert( (src_buffer.get() != NULL) && bool("Memory not initialized!"));
cudaMemcpy(reinterpret_cast<void *>(dst_buffer.get() + dst_offset),
reinterpret_cast<void *>(src_buffer.get() + src_offset),
bytes_to_copy,
cudaMemcpyDeviceToDevice);
}
inline void memory_write(handle_type & dst_buffer,
std::size_t dst_offset,
std::size_t bytes_to_copy,
const void * ptr)
{
assert( (dst_buffer.get() != NULL) && bool("Memory not initialized!"));
cudaMemcpy(reinterpret_cast<char *>(dst_buffer.get()) + dst_offset,
reinterpret_cast<const char *>(ptr),
bytes_to_copy,
cudaMemcpyHostToDevice);
}
inline void memory_read(handle_type const & src_buffer,
std::size_t src_offset,
std::size_t bytes_to_copy,
void * ptr)
{
assert( (src_buffer.get() != NULL) && bool("Memory not initialized!"));
cudaMemcpy(reinterpret_cast<char *>(ptr),
reinterpret_cast<char *>(src_buffer.get()) + src_offset,
bytes_to_copy,
cudaMemcpyDeviceToHost);
}
} //cuda
} //backend
} //viennacl
#endif
#ifndef VIENNACL_BACKEND_MEM_HANDLE_HPP
#define VIENNACL_BACKEND_MEM_HANDLE_HPP
/* =========================================================================
Copyright (c) 2010-2012, Institute for Microelectronics,
Institute for Analysis and Scientific Computing,
TU Wien.
Portions of this software are copyright by UChicago Argonne, LLC.
-----------------
ViennaCL - The Vienna Computing Library
-----------------
Project Head: Karl Rupp rupp@iue.tuwien.ac.at
(A list of authors and contributors can be found in the PDF manual)
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
/** @file viennacl/backend/mem_handle.hpp
@brief Implements the multi-memory-domain handle
*/
#include <vector>
#include <cassert>
#include "viennacl/forwards.h"
#include "viennacl/tools/shared_ptr.hpp"
#include "viennacl/backend/cpu_ram.hpp"
#ifdef VIENNACL_WITH_OPENCL
#include "viennacl/backend/opencl.hpp"
#endif
#ifdef VIENNACL_WITH_CUDA
#include "viennacl/backend/cuda.hpp"
#endif
namespace viennacl
{
namespace backend
{
// if a user compiles with CUDA, it is reasonable to expect that CUDA should be the default
#ifdef VIENNACL_WITH_CUDA
inline memory_types default_memory_type() { return CUDA_MEMORY; }
#elif defined(VIENNACL_WITH_OPENCL)
inline memory_types default_memory_type() { return OPENCL_MEMORY; }
#else
inline memory_types default_memory_type() { return MAIN_MEMORY; }
#endif
class mem_handle
{
public:
typedef viennacl::tools::shared_ptr<char> ram_handle_type;
typedef viennacl::tools::shared_ptr<char> cuda_handle_type;
mem_handle() : active_handle_(MEMORY_NOT_INITIALIZED), size_in_bytes_(0) {}
ram_handle_type & ram_handle() { return ram_handle_; }
ram_handle_type const & ram_handle() const { return ram_handle_; }
#ifdef VIENNACL_WITH_OPENCL
viennacl::ocl::handle<cl_mem> & opencl_handle() { return opencl_handle_; }
viennacl::ocl::handle<cl_mem> const & opencl_handle() const { return opencl_handle_; }
#endif
#ifdef VIENNACL_WITH_CUDA
cuda_handle_type & cuda_handle() { return cuda_handle_; }
cuda_handle_type const & cuda_handle() const { return cuda_handle_; }
#endif
memory_types get_active_handle_id() const { return active_handle_; }
void switch_active_handle_id(memory_types new_id)
{
if (new_id != active_handle_)
{
if (active_handle_ == MEMORY_NOT_INITIALIZED)
active_handle_ = new_id;
else if (active_handle_ == MAIN_MEMORY)
{
active_handle_ = new_id;
}
else if (active_handle_ == OPENCL_MEMORY)
{
#ifdef VIENNACL_WITH_OPENCL
active_handle_ = new_id;
#else
throw "compiled without OpenCL suppport!";
#endif
}
else if (active_handle_ == CUDA_MEMORY)
{
#ifdef VIENNACL_WITH_CUDA
active_handle_ = new_id;
#else
throw "compiled without CUDA suppport!";
#endif
}
else
throw "invalid new memory region!";
}
}
bool operator==(mem_handle const & other) const
{
if (active_handle_ != other.active_handle_)
return false;
switch (active_handle_)
{
case MAIN_MEMORY:
return ram_handle_.get() == other.ram_handle_.get();
#ifdef VIENNACL_WITH_OPENCL
case OPENCL_MEMORY:
return opencl_handle_.get() == other.opencl_handle_.get();
#endif
#ifdef VIENNACL_WITH_CUDA
case CUDA_MEMORY:
return cuda_handle_.get() == other.cuda_handle_.get();
#endif
default: break;
}
return false;