Commit 6890c79e authored by Tobias Lasser's avatar Tobias Lasser
Browse files

second pre-release commit

parent 1a751d49
Pipeline #129818 passed with stages
in 4 minutes and 26 seconds
......@@ -12,6 +12,9 @@ add_subdirectory(core)
add_subdirectory(logging)
add_subdirectory(io)
add_subdirectory(operators)
add_subdirectory(functionals)
add_subdirectory(problems)
add_subdirectory(solvers)
# propogate the variable to the parent scope
......
......@@ -114,9 +114,21 @@ namespace elsa {
}
template<typename data_t>
data_t DataContainer<data_t>::squaredNorm() const
data_t DataContainer<data_t>::squaredL2Norm() const
{
return _dataHandler->squaredNorm();
return _dataHandler->squaredL2Norm();
}
template <typename data_t>
data_t DataContainer<data_t>::l1Norm() const
{
return _dataHandler->l1Norm();
}
template <typename data_t>
data_t DataContainer<data_t>::lInfNorm() const
{
return _dataHandler->lInfNorm();
}
template<typename data_t>
......
......@@ -110,7 +110,13 @@ namespace elsa
data_t dot(const DataContainer<data_t>& other) const;
/// return the squared l2 norm of this signal (dot product with itself)
data_t squaredNorm() const;
data_t squaredL2Norm() const;
/// return the l1 norm of this signal (sum of absolute values)
data_t l1Norm() const;
/// return the linf norm of this signal (maximum of absolute values)
data_t lInfNorm() const;
/// return the sum of all elements of this signal
data_t sum() const;
......
......@@ -37,7 +37,13 @@ namespace elsa
virtual data_t dot(const DataHandler<data_t>& v) const = 0;
/// return the squared l2 norm of the data vector (dot product with itself)
virtual data_t squaredNorm() const = 0;
virtual data_t squaredL2Norm() const = 0;
/// return the l1 norm of the data vector (sum of absolute values)
virtual data_t l1Norm() const = 0;
/// return the linf norm of the data vector (maximum of absolute values)
virtual data_t lInfNorm() const = 0;
/// return the sum of all elements of the data vector
virtual data_t sum() const = 0;
......
......@@ -53,11 +53,23 @@ namespace elsa
}
template <typename data_t>
data_t DataHandlerCPU<data_t>::squaredNorm() const
data_t DataHandlerCPU<data_t>::squaredL2Norm() const
{
return _data.squaredNorm();
}
template <typename data_t>
data_t DataHandlerCPU<data_t>::l1Norm() const
{
return _data.array().abs().sum();
}
template <typename data_t>
data_t DataHandlerCPU<data_t>::lInfNorm() const
{
return _data.array().abs().maxCoeff();
}
template <typename data_t>
data_t DataHandlerCPU<data_t>::sum() const
{
......
......@@ -63,7 +63,14 @@ namespace elsa
data_t dot(const DataHandler<data_t>& v) const override;
/// return the squared l2 norm of the data vector (dot product with itself)
data_t squaredNorm() const override;
data_t squaredL2Norm() const override;
/// return the l1 norm of the data vector (sum of absolute values)
data_t l1Norm() const override;
/// return the linf norm of the data vector (maximum of absolute values)
data_t lInfNorm() const override;
/// return the sum of all elements of the data vector
data_t sum() const override;
......
#include "LinearOperator.h"
#include <stdexcept>
#include <typeinfo>
namespace elsa
{
template <typename data_t>
LinearOperator<data_t>::LinearOperator(const DataDescriptor& domainDescriptor, const DataDescriptor& rangeDescriptor)
: _domainDescriptor{domainDescriptor.clone()}, _rangeDescriptor{rangeDescriptor.clone()}
: _domainDescriptor{domainDescriptor.clone()}, _rangeDescriptor{rangeDescriptor.clone()}
{}
template <typename data_t>
LinearOperator<data_t>::LinearOperator(const LinearOperator<data_t>& other)
: _domainDescriptor{other._domainDescriptor->clone()}, _rangeDescriptor{other._rangeDescriptor->clone()},
_isLeaf{other._isLeaf}, _isAdjoint{other._isAdjoint}, _isComposite{other._isComposite}, _mode{other._mode}
{
if (_isLeaf)
_lhs = other._lhs->clone();
if (_isComposite) {
_lhs = other._lhs->clone();
_rhs = other._rhs->clone();
}
}
template <typename data_t>
LinearOperator<data_t>& LinearOperator<data_t>::operator=(const LinearOperator<data_t>& other)
{
if (*this != other) {
_domainDescriptor = other._domainDescriptor->clone();
_rangeDescriptor = other._rangeDescriptor->clone();
_isLeaf = other._isLeaf;
_isAdjoint = other._isAdjoint;
_isComposite = other._isComposite;
_mode = other._mode;
if (_isLeaf)
_lhs = other._lhs->clone();
if (_isComposite) {
_lhs = other._lhs->clone();
_rhs = other._rhs->clone();
}
}
return *this;
}
template <typename data_t>
const DataDescriptor& LinearOperator<data_t>::getDomainDescriptor() const
......@@ -40,14 +78,25 @@ namespace elsa
template <typename data_t>
void LinearOperator<data_t>::_apply(const DataContainer<data_t>& x, DataContainer<data_t>& Ax)
{
if (_isAdjoint) {
// sanity check the arguments for the intended evaluation tree leaf operation
if (_lhs->getRangeDescriptor().getNumberOfCoefficients() != x.getSize() ||
_lhs->getDomainDescriptor().getNumberOfCoefficients() != Ax.getSize())
throw std::invalid_argument("LinearOperator::apply: incorrect input/output sizes for adjoint leaf");
_lhs->applyAdjoint(x, Ax);
return;
if (_isLeaf) {
if (_isAdjoint) {
// sanity check the arguments for the intended evaluation tree leaf operation
if (_lhs->getRangeDescriptor().getNumberOfCoefficients() != x.getSize() ||
_lhs->getDomainDescriptor().getNumberOfCoefficients() != Ax.getSize())
throw std::invalid_argument("LinearOperator::apply: incorrect input/output sizes for adjoint leaf");
_lhs->applyAdjoint(x, Ax);
return;
}
else {
// sanity check the arguments for the intended evaluation tree leaf operation
if (_lhs->getDomainDescriptor().getNumberOfCoefficients() != x.getSize() ||
_lhs->getRangeDescriptor().getNumberOfCoefficients() != Ax.getSize())
throw std::invalid_argument("LinearOperator::apply: incorrect input/output sizes for leaf");
_lhs->apply(x, Ax);
return;
}
}
if (_isComposite) {
......@@ -99,14 +148,26 @@ namespace elsa
template <typename data_t>
void LinearOperator<data_t>::_applyAdjoint(const DataContainer<data_t>& y, DataContainer<data_t>& Aty)
{
if (_isAdjoint) {
// sanity check the arguments for the intended evaluation tree leaf operation
if (_lhs->getDomainDescriptor().getNumberOfCoefficients() != y.getSize() ||
_lhs->getRangeDescriptor().getNumberOfCoefficients() != Aty.getSize())
throw std::invalid_argument("LinearOperator::applyAdjoint: incorrect input/output sizes for adjoint leaf");
_lhs->apply(y, Aty);
return;
if (_isLeaf) {
if (_isAdjoint) {
// sanity check the arguments for the intended evaluation tree leaf operation
if (_lhs->getDomainDescriptor().getNumberOfCoefficients() != y.getSize() ||
_lhs->getRangeDescriptor().getNumberOfCoefficients() != Aty.getSize())
throw std::invalid_argument(
"LinearOperator::applyAdjoint: incorrect input/output sizes for adjoint leaf");
_lhs->apply(y, Aty);
return;
}
else {
// sanity check the arguments for the intended evaluation tree leaf operation
if (_lhs->getRangeDescriptor().getNumberOfCoefficients() != y.getSize() ||
_lhs->getDomainDescriptor().getNumberOfCoefficients() != Aty.getSize())
throw std::invalid_argument("LinearOperator::applyAdjoint: incorrect input/output sizes for leaf");
_lhs->applyAdjoint(y, Aty);
return;
}
}
if (_isComposite) {
......@@ -143,8 +204,8 @@ namespace elsa
template <typename data_t>
LinearOperator<data_t>* LinearOperator<data_t>::cloneImpl() const
{
if (_isAdjoint)
return new LinearOperator<data_t>(*_lhs);
if (_isLeaf)
return new LinearOperator<data_t>(*_lhs, _isAdjoint);
if (_isComposite)
return new LinearOperator<data_t>(*_lhs, *_rhs, _mode);
......@@ -159,8 +220,8 @@ namespace elsa
*_rangeDescriptor != *other._rangeDescriptor)
return false;
if (_isAdjoint)
return other._isAdjoint && (*_lhs == *other._lhs);
if (_isLeaf)
return other._isLeaf && (_isAdjoint == other._isAdjoint) && (*_lhs == *other._lhs);
if (_isComposite)
return other._isComposite && _mode == other._mode
......@@ -170,10 +231,12 @@ namespace elsa
}
template <typename data_t>
LinearOperator<data_t>::LinearOperator(const LinearOperator<data_t>& op)
: _domainDescriptor{op.getRangeDescriptor().clone()}, _rangeDescriptor{op.getDomainDescriptor().clone()},
_lhs{op.clone()}, _isAdjoint{true}
{}
LinearOperator<data_t>::LinearOperator(const LinearOperator<data_t>& op, bool isAdjoint)
: _domainDescriptor{(isAdjoint) ? op.getRangeDescriptor().clone() : op.getDomainDescriptor().clone()},
_rangeDescriptor{(isAdjoint) ? op.getDomainDescriptor().clone() : op.getRangeDescriptor().clone()},
_lhs{op.clone()}, _isLeaf{true}, _isAdjoint{isAdjoint}
{
}
template <typename data_t>
LinearOperator<data_t>::LinearOperator(const LinearOperator<data_t>& lhs,
......
......@@ -21,7 +21,7 @@ namespace elsa
*
* This class represents a linear operator A, expressed through its apply/applyAdjoint methods,
* which implement Ax and A^ty for DataContainers x,y of appropriate sizes. Concrete implementations
* of linear operators will derive from this class and override the apply/applyAdjoint methods.
* of linear operators will derive from this class and override the _apply/_applyAdjoint methods.
*
* LinearOperator also provides functionality to support constructs like the operator expression A^t*B+C,
* where A,B,C are linear operators. This operator composition is implemented via evaluation trees.
......@@ -44,6 +44,12 @@ namespace elsa
/// default destructor
~LinearOperator() override = default;
/// copy construction
LinearOperator(const LinearOperator<data_t>& other);
/// copy assignment
LinearOperator<data_t>& operator=(const LinearOperator<data_t>& other);
/// return the domain DataDescriptor
const DataDescriptor& getDomainDescriptor() const;
......@@ -68,6 +74,9 @@ namespace elsa
* \param[in] x input DataContainer (in the domain of the operator)
* \param[out] Ax output DataContainer (in the range of the operator)
*
* Please note: this method calls the method _apply that has to be overridden in derived
* classes. (Why is this method not virtual itself? Because you cannot have a non-virtual
* function overloading a virtual one [apply with one vs. two arguments]).
*/
void apply(const DataContainer<data_t>& x, DataContainer<data_t>& Ax);
......@@ -88,6 +97,10 @@ namespace elsa
*
* \param[in] y input DataContainer (in the range of the operator)
* \param[out] A^ty output DataContainer (in the domain of the operator)
*
* Please note: this method calls the method _applyAdjoint that has to be overridden in
* derived classes. (Why is this method not virtual itself? Because you cannot have a
* non-virtual function overloading a virtual one [applyAdjoint with one vs. two args]).
*/
void applyAdjoint(const DataContainer<data_t>& y, DataContainer<data_t>& Aty);
......@@ -104,7 +117,12 @@ namespace elsa
/// friend function to return the adjoint of a LinearOperator (and its derivatives)
friend LinearOperator<data_t> adjoint(const LinearOperator<data_t>& op) {
return LinearOperator(op);
return LinearOperator(op, true);
}
/// friend function to return a leaf node of a LinearOperator (and its derivatives)
friend LinearOperator<data_t> leaf(const LinearOperator<data_t>& op) {
return LinearOperator(op, false);
}
protected:
......@@ -132,6 +150,9 @@ namespace elsa
/// pointers to nodes in the evaluation tree
std::unique_ptr<LinearOperator<data_t>> _lhs{}, _rhs{};
/// flag whether this is a leaf-node
bool _isLeaf{false};
/// flag whether this is a leaf-node to implement an adjoint operator
bool _isAdjoint{false};
......@@ -145,7 +166,7 @@ namespace elsa
compositeMode _mode{compositeMode::mult};
/// constructor to produce an adjoint leaf node
LinearOperator(const LinearOperator<data_t>& op);
LinearOperator(const LinearOperator<data_t>& op, bool isAdjoint);
/// constructor to produce a composite (internal node) of the evaluation tree
LinearOperator(const LinearOperator<data_t>& lhs, const LinearOperator<data_t>& rhs, compositeMode mode);
......
......@@ -168,7 +168,9 @@ SCENARIO("Testing the reduction operations of DataContainer") {
THEN("the reductions work as expected") {
REQUIRE(dc.sum() == Approx(randVec.sum()) );
REQUIRE(dc.squaredNorm() == Approx(randVec.squaredNorm()) );
REQUIRE(dc.l1Norm() == Approx(randVec.array().abs().sum()));
REQUIRE(dc.lInfNorm() == Approx(randVec.array().abs().maxCoeff()));
REQUIRE(dc.squaredL2Norm() == Approx(randVec.squaredNorm()) );
Eigen::VectorXf randVec2 = Eigen::VectorXf::Random(dc.getSize());
DataContainer dc2(desc);
......
......@@ -69,7 +69,9 @@ SCENARIO("Testing the reduction operations of DataHandlerCPU") {
THEN("the reductions work as expected") {
REQUIRE(dh.sum() == Approx(randVec.sum()) );
REQUIRE(dh.squaredNorm() == Approx(randVec.squaredNorm()) );
REQUIRE(dh.l1Norm() == Approx(randVec.array().abs().sum()));
REQUIRE(dh.lInfNorm() == Approx(randVec.array().abs().maxCoeff()));
REQUIRE(dh.squaredL2Norm() == Approx(randVec.squaredNorm()) );
Eigen::VectorXf randVec2 = Eigen::VectorXf::Random(size);
DataHandlerCPU dh2(randVec2);
......
......@@ -57,6 +57,10 @@ SCENARIO("Constructing a LinearOperator") {
REQUIRE_THROWS_AS(linOp.applyAdjoint(dc), std::logic_error);
}
THEN("copies are good") {
auto newOp = linOp;
REQUIRE(newOp == linOp);
}
}
}
......@@ -77,12 +81,67 @@ SCENARIO("Cloning LinearOperators") {
REQUIRE(linOpClone.get() != &linOp);
REQUIRE(*linOpClone == linOp);
}
THEN("copies are also identical") {
auto newOp = *linOpClone;
REQUIRE(newOp == linOp);
}
}
}
}
SCENARIO("Adjoint LinearOperator") {
SCENARIO("Leaf LinearOperator") {
GIVEN("a non-adjoint leaf linear operator") {
IndexVector_t numCoeff(2); numCoeff << 12, 23;
IndexVector_t numCoeff2(2); numCoeff2 << 34, 45;
DataDescriptor ddDomain(numCoeff);
DataDescriptor ddRange(numCoeff2);
MockOperator mockOp(ddDomain, ddRange);
auto leafOp = leaf(mockOp);
WHEN("the operator is there") {
THEN("the descriptors are set correctly") {
REQUIRE(leafOp.getDomainDescriptor() == ddDomain);
REQUIRE(leafOp.getRangeDescriptor() == ddRange);
}
}
WHEN("given data") {
DataContainer dcDomain(ddDomain);
DataContainer dcRange(ddRange);
THEN("the apply operations return the correct result") {
auto resultApply = leafOp.apply(dcDomain);
for (int i = 0; i < resultApply.getSize(); ++i)
REQUIRE(resultApply[i] == 1);
auto resultApplyAdjoint = leafOp.applyAdjoint(dcRange);
for (int i = 0; i < resultApplyAdjoint.getSize(); ++i)
REQUIRE(resultApplyAdjoint[i] == 3);
}
THEN("the apply operations care for appropriately sized containers") {
REQUIRE_THROWS_AS(leafOp.apply(dcRange), std::invalid_argument);
REQUIRE_THROWS_AS(leafOp.applyAdjoint(dcDomain), std::invalid_argument);
}
}
WHEN("copying/assigning") {
auto newOp = leafOp;
auto assignedOp = leaf(newOp);
THEN("it should be identical") {
REQUIRE(newOp == leafOp);
REQUIRE(assignedOp == leaf(newOp));
assignedOp = newOp;
REQUIRE(assignedOp == newOp);
}
}
}
GIVEN("an adjoint linear operator") {
IndexVector_t numCoeff(2); numCoeff << 12, 23;
IndexVector_t numCoeff2(2); numCoeff2 << 34, 45;
......@@ -118,6 +177,19 @@ SCENARIO("Adjoint LinearOperator") {
REQUIRE_THROWS_AS(adjointOp.applyAdjoint(dcRange), std::invalid_argument);
}
}
WHEN("copying/assigning") {
auto newOp = adjointOp;
auto assignedOp = adjoint(newOp);
THEN("it should be identical") {
REQUIRE(newOp == adjointOp);
REQUIRE(assignedOp == adjoint(newOp));
assignedOp = newOp;
REQUIRE(assignedOp == newOp);
}
}
}
}
......@@ -160,6 +232,20 @@ SCENARIO("Composite LinearOperator") {
REQUIRE_THROWS_AS(addOp.applyAdjoint(dcDomain), std::invalid_argument);
}
}
WHEN("copying/assigning") {
auto newOp = addOp;
auto assignedOp = adjoint(newOp);
THEN("it should be identical") {
REQUIRE(newOp == addOp);
REQUIRE(assignedOp == adjoint(newOp));
assignedOp = newOp;
REQUIRE(assignedOp == newOp);
}
}
}
GIVEN("a multiplicative composite linear operator") {
......@@ -201,6 +287,20 @@ SCENARIO("Composite LinearOperator") {
REQUIRE_THROWS_AS(multOp.applyAdjoint(dcDomain), std::invalid_argument);
}
}
WHEN("copying/assigning") {
auto newOp = multOp;
auto assignedOp = adjoint(newOp);
THEN("it should be identical") {
REQUIRE(newOp == multOp);
REQUIRE(assignedOp == adjoint(newOp));
assignedOp = newOp;
REQUIRE(assignedOp == newOp);
}
}
}
GIVEN("a complex composite with multiple leafs and levels") {
......@@ -243,5 +343,19 @@ SCENARIO("Composite LinearOperator") {
REQUIRE_THROWS_AS(compositeOp.applyAdjoint(dcDomain), std::invalid_argument);
}
}
WHEN("copying/assigning") {
auto newOp = compositeOp;
auto assignedOp = adjoint(newOp);
THEN("it should be identical") {
REQUIRE(newOp == compositeOp);
REQUIRE(assignedOp == adjoint(newOp));
assignedOp = newOp;
REQUIRE(assignedOp == newOp);
}
}
}
}
\ No newline at end of file
cmake_minimum_required(VERSION 3.9)
# set the name of the module
set(ELSA_MODULE_NAME functionals)
set(ELSA_MODULE_TARGET_NAME elsa_functionals)
set(ELSA_MODULE_EXPORT_TARGET elsa_${ELSA_MODULE_NAME}Targets)
# list all the headers of the module
set(MODULE_HEADERS
Residual.h
LinearResidual.h
Functional.h
L1Norm.h
L2NormPow2.h
WeightedL2NormPow2.h
LInfNorm.h
Huber.h
PseudoHuber.h
Quadric.h
EmissionLogLikelihood.h
TransmissionLogLikelihood.h)
# list all the code files of the module
set(MODULE_SOURCES
Residual.cpp
LinearResidual.cpp
Functional.cpp
L1Norm.cpp
L2NormPow2.cpp
WeightedL2NormPow2.cpp
LInfNorm.cpp
Huber.cpp
PseudoHuber.cpp
Quadric.cpp
EmissionLogLikelihood.cpp
TransmissionLogLikelihood.cpp)
# build the module library
add_library(${ELSA_MODULE_TARGET_NAME} ${MODULE_HEADERS} ${MODULE_SOURCES})
add_library(elsa::${ELSA_MODULE_NAME} ALIAS ${ELSA_MODULE_TARGET_NAME})
target_link_libraries(${ELSA_MODULE_TARGET_NAME} elsa_core elsa_logging elsa_operators)
target_include_directories(${ELSA_MODULE_TARGET_NAME}
PUBLIC
$<INSTALL_INTERFACE:include/${ELSA_MODULE_NAME}>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
)
# require C++17
target_compile_features(${ELSA_MODULE_TARGET_NAME} PUBLIC cxx_std_17)
# build the tests (if enabled)
if(ELSA_TESTING)
add_subdirectory(tests)
endif(ELSA_TESTING)
# register the module
registerComponent(${ELSA_MODULE_NAME})
# install the module
InstallElsaModule(${ELSA_MODULE_NAME} ${ELSA_MODULE_TARGET_NAME} ${ELSA_MODULE_EXPORT_TARGET})
#include "EmissionLogLikelihood.h"
#include "Scaling.h"
#include <cmath>
#include <stdexcept>
namespace elsa
{
template <typename data_t>
EmissionLogLikelihood<data_t>::EmissionLogLikelihood(const DataDescriptor& domainDescriptor,
const DataContainer<data_t>& y, const DataContainer<data_t>& r)
: Functional<data_t>(domainDescriptor), _y{std::make_unique<DataContainer<data_t>>(y)},