Commit fe4260bb authored by Jens Petit's avatar Jens Petit

Rename internal virtual _method() to methodImpl()

parent 26e0c6bd
Pipeline #171490 passed with stages
in 4 minutes and 53 seconds
......@@ -77,12 +77,12 @@ namespace elsa
void LinearOperator<data_t>::apply(const DataContainer<data_t>& x,
DataContainer<data_t>& Ax) const
{
_apply(x, Ax);
applyImpl(x, Ax);
}
template <typename data_t>
void LinearOperator<data_t>::_apply(const DataContainer<data_t>& x,
DataContainer<data_t>& Ax) const
void LinearOperator<data_t>::applyImpl(const DataContainer<data_t>& x,
DataContainer<data_t>& Ax) const
{
if (_isLeaf) {
if (_isAdjoint) {
......@@ -150,12 +150,12 @@ namespace elsa
void LinearOperator<data_t>::applyAdjoint(const DataContainer<data_t>& y,
DataContainer<data_t>& Aty) const
{
_applyAdjoint(y, Aty);
applyAdjointImpl(y, Aty);
}
template <typename data_t>
void LinearOperator<data_t>::_applyAdjoint(const DataContainer<data_t>& y,
DataContainer<data_t>& Aty) const
void LinearOperator<data_t>::applyAdjointImpl(const DataContainer<data_t>& y,
DataContainer<data_t>& Aty) const
{
if (_isLeaf) {
if (_isAdjoint) {
......
......@@ -23,7 +23,7 @@ namespace elsa
* This class represents a linear operator A, expressed through its apply/applyAdjoint methods,
* which implement Ax and A^ty for DataContainers x,y of appropriate sizes. Concrete
* implementations of linear operators will derive from this class and override the
* _apply/_applyAdjoint methods.
* applyImpl/applyAdjointImpl methods.
*
* LinearOperator also provides functionality to support constructs like the operator expression
* A^t*B+C, where A,B,C are linear operators. This operator composition is implemented via
......@@ -79,7 +79,7 @@ namespace elsa
* \param[in] x input DataContainer (in the domain of the operator)
* \param[out] Ax output DataContainer (in the range of the operator)
*
* Please note: this method calls the method _apply that has to be overridden in derived
* Please note: this method calls the method applyImpl that has to be overridden in derived
* classes. (Why is this method not virtual itself? Because you cannot have a non-virtual
* function overloading a virtual one [apply with one vs. two arguments]).
*/
......@@ -103,7 +103,7 @@ namespace elsa
* \param[in] y input DataContainer (in the range of the operator)
* \param[out] Aty output DataContainer (in the domain of the operator)
*
* Please note: this method calls the method _applyAdjoint that has to be overridden in
* Please note: this method calls the method applyAdjointImpl that has to be overridden in
* derived classes. (Why is this method not virtual itself? Because you cannot have a
* non-virtual function overloading a virtual one [applyAdjoint with one vs. two args]).
*/
......@@ -149,11 +149,11 @@ namespace elsa
bool isEqual(const LinearOperator<data_t>& other) const override;
/// the apply method that has to be overridden in derived classes
virtual void _apply(const DataContainer<data_t>& x, DataContainer<data_t>& Ax) const;
virtual void applyImpl(const DataContainer<data_t>& x, DataContainer<data_t>& Ax) const;
/// the applyAdjoint method that has to be overridden in derived classes
virtual void _applyAdjoint(const DataContainer<data_t>& y,
DataContainer<data_t>& Aty) const;
virtual void applyAdjointImpl(const DataContainer<data_t>& y,
DataContainer<data_t>& Aty) const;
private:
/// pointers to nodes in the evaluation tree
......
......@@ -24,12 +24,12 @@ public:
}
protected:
void _apply(const DataContainer<data_t>& x, DataContainer<data_t>& Ax) const override
void applyImpl(const DataContainer<data_t>& x, DataContainer<data_t>& Ax) const override
{
Ax = 1;
}
void _applyAdjoint(const DataContainer<data_t>& y, DataContainer<data_t>& Aty) const override
void applyAdjointImpl(const DataContainer<data_t>& y, DataContainer<data_t>& Aty) const override
{
Aty = 3;
}
......
......@@ -58,7 +58,7 @@ namespace elsa
}
template <typename data_t>
data_t EmissionLogLikelihood<data_t>::_evaluate(const DataContainer<data_t>& Rx)
data_t EmissionLogLikelihood<data_t>::evaluateImpl(const DataContainer<data_t>& Rx)
{
auto result = static_cast<data_t>(0.0);
......@@ -74,7 +74,7 @@ namespace elsa
}
template <typename data_t>
void EmissionLogLikelihood<data_t>::_getGradientInPlace(DataContainer<data_t>& Rx)
void EmissionLogLikelihood<data_t>::getGradientInPlaceImpl(DataContainer<data_t>& Rx)
{
for (index_t i = 0; i < Rx.getSize(); ++i) {
data_t temp = Rx[i];
......@@ -87,7 +87,7 @@ namespace elsa
template <typename data_t>
LinearOperator<data_t>
EmissionLogLikelihood<data_t>::_getHessian(const DataContainer<data_t>& Rx)
EmissionLogLikelihood<data_t>::getHessianImpl(const DataContainer<data_t>& Rx)
{
DataContainer<data_t> scaleFactors(Rx.getDataDescriptor());
for (index_t i = 0; i < Rx.getSize(); ++i) {
......
......@@ -69,13 +69,13 @@ namespace elsa
protected:
/// the evaluation of the emission log-likelihood
data_t _evaluate(const DataContainer<data_t>& Rx) override;
data_t evaluateImpl(const DataContainer<data_t>& Rx) override;
/// the computation of the gradient (in place)
void _getGradientInPlace(DataContainer<data_t>& Rx) override;
void getGradientInPlaceImpl(DataContainer<data_t>& Rx) override;
/// the computation of the Hessian
LinearOperator<data_t> _getHessian(const DataContainer<data_t>& Rx) override;
LinearOperator<data_t> getHessianImpl(const DataContainer<data_t>& Rx) override;
/// implement the polymorphic clone operation
EmissionLogLikelihood<data_t>* cloneImpl() const override;
......
......@@ -40,11 +40,11 @@ namespace elsa
// optimize for trivial LinearResiduals (no extra copy for residual result needed then)
if (auto* linearResidual = dynamic_cast<LinearResidual<data_t>*>(_residual.get())) {
if (!linearResidual->hasOperator() && !linearResidual->hasDataVector())
return _evaluate(x);
return evaluateImpl(x);
}
// in all other cases: evaluate the residual first, then call our virtual _evaluate
return _evaluate(_residual->evaluate(x));
return evaluateImpl(_residual->evaluate(x));
}
template <typename data_t>
......@@ -69,7 +69,7 @@ namespace elsa
// if trivial, no extra copy for residual result needed (and no chain rule)
if (!linearResidual->hasOperator() && !linearResidual->hasDataVector()) {
result = x;
_getGradientInPlace(result);
getGradientInPlaceImpl(result);
return;
}
......@@ -77,14 +77,14 @@ namespace elsa
if (!linearResidual->hasOperator()) {
linearResidual->evaluate(x,
result); // sizes of x and result will match in this case
_getGradientInPlace(result);
getGradientInPlaceImpl(result);
return;
}
}
// the general case
auto temp = _residual->evaluate(x);
_getGradientInPlace(temp);
getGradientInPlaceImpl(temp);
_residual->getJacobian(x).applyAdjoint(temp, result); // apply the chain rule
}
......@@ -95,16 +95,16 @@ namespace elsa
if (auto* linearResidual = dynamic_cast<LinearResidual<data_t>*>(_residual.get())) {
// if trivial, no extra copy for residual result needed (and no chain rule)
if (!linearResidual->hasOperator() && !linearResidual->hasDataVector())
return _getHessian(x);
return getHessianImpl(x);
// if no operator, no need for chain rule
if (!linearResidual->hasOperator())
return _getHessian(_residual->evaluate(x));
return getHessianImpl(_residual->evaluate(x));
}
// the general case (with chain rule)
auto jacobian = _residual->getJacobian(x);
auto hessian = adjoint(jacobian) * (_getHessian(_residual->evaluate(x))) * (jacobian);
auto hessian = adjoint(jacobian) * (getHessianImpl(_residual->evaluate(x))) * (jacobian);
return hessian;
}
......
......@@ -125,7 +125,7 @@ namespace elsa
* Please note: the evaluation of the residual is already performed in evaluate, so this
* method only has to compute the functional's value itself.
*/
virtual data_t _evaluate(const DataContainer<data_t>& Rx) = 0;
virtual data_t evaluateImpl(const DataContainer<data_t>& Rx) = 0;
/**
* \brief the _getGradientInPlace method that has to be overridden in derived classes
......@@ -137,7 +137,7 @@ namespace elsa
* as the application of the chain rule. This method here only has to compute the gradient
* of the functional itself, in an in-place manner (to avoid unnecessary DataContainers).
*/
virtual void _getGradientInPlace(DataContainer<data_t>& Rx) = 0;
virtual void getGradientInPlaceImpl(DataContainer<data_t>& Rx) = 0;
/**
* \brief the _getHessian method that has to be overridden in derived classes
......@@ -150,6 +150,6 @@ namespace elsa
* as the application of the chain rule. This method here only has to compute the Hessian of
* the functional itself.
*/
virtual LinearOperator<data_t> _getHessian(const DataContainer<data_t>& Rx) = 0;
virtual LinearOperator<data_t> getHessianImpl(const DataContainer<data_t>& Rx) = 0;
};
} // namespace elsa
......@@ -25,7 +25,7 @@ namespace elsa
}
template <typename data_t>
data_t Huber<data_t>::_evaluate(const DataContainer<data_t>& Rx)
data_t Huber<data_t>::evaluateImpl(const DataContainer<data_t>& Rx)
{
// note: this is currently not a reduction in DataContainer, but implemented here "manually"
......@@ -43,7 +43,7 @@ namespace elsa
}
template <typename data_t>
void Huber<data_t>::_getGradientInPlace(DataContainer<data_t>& Rx)
void Huber<data_t>::getGradientInPlaceImpl(DataContainer<data_t>& Rx)
{
for (index_t i = 0; i < Rx.getSize(); ++i) {
data_t value = Rx[i];
......@@ -56,7 +56,7 @@ namespace elsa
}
template <typename data_t>
LinearOperator<data_t> Huber<data_t>::_getHessian(const DataContainer<data_t>& Rx)
LinearOperator<data_t> Huber<data_t>::getHessianImpl(const DataContainer<data_t>& Rx)
{
DataContainer<data_t> scaleFactors(Rx.getDataDescriptor());
for (index_t i = 0; i < Rx.getSize(); ++i) {
......
......@@ -47,13 +47,13 @@ namespace elsa
protected:
/// the evaluation of the Huber norm
data_t _evaluate(const DataContainer<data_t>& Rx) override;
data_t evaluateImpl(const DataContainer<data_t>& Rx) override;
/// the computation of the gradient (in place)
void _getGradientInPlace(DataContainer<data_t>& Rx) override;
void getGradientInPlaceImpl(DataContainer<data_t>& Rx) override;
/// the computation of the Hessian
LinearOperator<data_t> _getHessian(const DataContainer<data_t>& Rx) override;
LinearOperator<data_t> getHessianImpl(const DataContainer<data_t>& Rx) override;
/// implement the polymorphic clone operation
Huber<data_t>* cloneImpl() const override;
......
......@@ -16,19 +16,19 @@ namespace elsa
}
template <typename data_t>
data_t L1Norm<data_t>::_evaluate(const DataContainer<data_t>& Rx)
data_t L1Norm<data_t>::evaluateImpl(const DataContainer<data_t>& Rx)
{
return Rx.l1Norm();
}
template <typename data_t>
void L1Norm<data_t>::_getGradientInPlace(DataContainer<data_t>& Rx)
void L1Norm<data_t>::getGradientInPlaceImpl(DataContainer<data_t>& Rx)
{
throw std::logic_error("L1Norm: not differentiable, so no gradient! (busted!)");
}
template <typename data_t>
LinearOperator<data_t> L1Norm<data_t>::_getHessian(const DataContainer<data_t>& Rx)
LinearOperator<data_t> L1Norm<data_t>::getHessianImpl(const DataContainer<data_t>& Rx)
{
throw std::logic_error("L1Norm: not differentiable, so no Hessian! (busted!)");
}
......
......@@ -43,13 +43,13 @@ namespace elsa
protected:
/// the evaluation of the l1 norm
data_t _evaluate(const DataContainer<data_t>& Rx) override;
data_t evaluateImpl(const DataContainer<data_t>& Rx) override;
/// the computation of the gradient (in place)
void _getGradientInPlace(DataContainer<data_t>& Rx) override;
void getGradientInPlaceImpl(DataContainer<data_t>& Rx) override;
/// the computation of the Hessian
LinearOperator<data_t> _getHessian(const DataContainer<data_t>& Rx) override;
LinearOperator<data_t> getHessianImpl(const DataContainer<data_t>& Rx) override;
/// implement the polymorphic clone operation
L1Norm<data_t>* cloneImpl() const override;
......
......@@ -17,19 +17,19 @@ namespace elsa
}
template <typename data_t>
data_t L2NormPow2<data_t>::_evaluate(const DataContainer<data_t>& Rx)
data_t L2NormPow2<data_t>::evaluateImpl(const DataContainer<data_t>& Rx)
{
return static_cast<data_t>(0.5) * Rx.squaredL2Norm();
}
template <typename data_t>
void L2NormPow2<data_t>::_getGradientInPlace(DataContainer<data_t>& Rx)
void L2NormPow2<data_t>::getGradientInPlaceImpl(DataContainer<data_t>& Rx)
{
// gradient is Rx itself (no need for self-assignment)
}
template <typename data_t>
LinearOperator<data_t> L2NormPow2<data_t>::_getHessian(const DataContainer<data_t>& Rx)
LinearOperator<data_t> L2NormPow2<data_t>::getHessianImpl(const DataContainer<data_t>& Rx)
{
return leaf(Identity<data_t>(Rx.getDataDescriptor()));
}
......
......@@ -42,13 +42,13 @@ namespace elsa
protected:
/// the evaluation of the l2 norm (squared)
data_t _evaluate(const DataContainer<data_t>& Rx) override;
data_t evaluateImpl(const DataContainer<data_t>& Rx) override;
/// the computation of the gradient (in place)
void _getGradientInPlace(DataContainer<data_t>& Rx) override;
void getGradientInPlaceImpl(DataContainer<data_t>& Rx) override;
/// the computation of the Hessian
LinearOperator<data_t> _getHessian(const DataContainer<data_t>& Rx) override;
LinearOperator<data_t> getHessianImpl(const DataContainer<data_t>& Rx) override;
/// implement the polymorphic clone operation
L2NormPow2<data_t>* cloneImpl() const override;
......
......@@ -16,19 +16,19 @@ namespace elsa
}
template <typename data_t>
data_t LInfNorm<data_t>::_evaluate(const DataContainer<data_t>& Rx)
data_t LInfNorm<data_t>::evaluateImpl(const DataContainer<data_t>& Rx)
{
return Rx.lInfNorm();
}
template <typename data_t>
void LInfNorm<data_t>::_getGradientInPlace(DataContainer<data_t>& Rx)
void LInfNorm<data_t>::getGradientInPlaceImpl(DataContainer<data_t>& Rx)
{
throw std::logic_error("LInfNorm: not differentiable, so no gradient! (busted!)");
}
template <typename data_t>
LinearOperator<data_t> LInfNorm<data_t>::_getHessian(const DataContainer<data_t>& Rx)
LinearOperator<data_t> LInfNorm<data_t>::getHessianImpl(const DataContainer<data_t>& Rx)
{
throw std::logic_error("LInfNorm: not differentiable, so no Hessian! (busted!)");
}
......
......@@ -43,13 +43,13 @@ namespace elsa
protected:
/// the evaluation of the linf norm
data_t _evaluate(const DataContainer<data_t>& Rx) override;
data_t evaluateImpl(const DataContainer<data_t>& Rx) override;
/// the computation of the gradient (in place)
void _getGradientInPlace(DataContainer<data_t>& Rx) override;
void getGradientInPlaceImpl(DataContainer<data_t>& Rx) override;
/// the computation of the Hessian
LinearOperator<data_t> _getHessian(const DataContainer<data_t>& Rx) override;
LinearOperator<data_t> getHessianImpl(const DataContainer<data_t>& Rx) override;
/// implement the polymorphic clone operation
LInfNorm<data_t>* cloneImpl() const override;
......
......@@ -119,8 +119,8 @@ namespace elsa
}
template <typename data_t>
void LinearResidual<data_t>::_evaluate(const DataContainer<data_t>& x,
DataContainer<data_t>& result)
void LinearResidual<data_t>::evaluateImpl(const DataContainer<data_t>& x,
DataContainer<data_t>& result)
{
if (_hasOperator)
_operator->apply(x, result);
......@@ -132,7 +132,7 @@ namespace elsa
}
template <typename data_t>
LinearOperator<data_t> LinearResidual<data_t>::_getJacobian(const DataContainer<data_t>& x)
LinearOperator<data_t> LinearResidual<data_t>::getJacobianImpl(const DataContainer<data_t>& x)
{
if (_hasOperator)
return leaf(*_operator);
......
......@@ -72,7 +72,7 @@ namespace elsa
bool isEqual(const Residual<data_t>& other) const override;
/// the evaluate method, evaluating the residual at x and placing the value in result
void _evaluate(const DataContainer<data_t>& x, DataContainer<data_t>& result) override;
void evaluateImpl(const DataContainer<data_t>& x, DataContainer<data_t>& result) override;
/**
* \brief return the Jacobian (first derivative) of the linear residual at x.
......@@ -84,7 +84,7 @@ namespace elsa
* If A is set, then the Jacobian is A and this returns a copy of A.
* If A is not set, then an Identity operator is returned.
*/
LinearOperator<data_t> _getJacobian(const DataContainer<data_t>& x) override;
LinearOperator<data_t> getJacobianImpl(const DataContainer<data_t>& x) override;
private:
/// flag if operator A is present
......
......@@ -25,7 +25,7 @@ namespace elsa
}
template <typename data_t>
data_t PseudoHuber<data_t>::_evaluate(const DataContainer<data_t>& Rx)
data_t PseudoHuber<data_t>::evaluateImpl(const DataContainer<data_t>& Rx)
{
// note: this is currently not a reduction in DataContainer, but implemented here "manually"
......@@ -42,7 +42,7 @@ namespace elsa
}
template <typename data_t>
void PseudoHuber<data_t>::_getGradientInPlace(DataContainer<data_t>& Rx)
void PseudoHuber<data_t>::getGradientInPlaceImpl(DataContainer<data_t>& Rx)
{
for (index_t i = 0; i < Rx.getSize(); ++i) {
data_t temp = Rx[i] / _delta;
......@@ -51,7 +51,7 @@ namespace elsa
}
template <typename data_t>
LinearOperator<data_t> PseudoHuber<data_t>::_getHessian(const DataContainer<data_t>& Rx)
LinearOperator<data_t> PseudoHuber<data_t>::getHessianImpl(const DataContainer<data_t>& Rx)
{
DataContainer<data_t> scaleFactors(Rx.getDataDescriptor());
for (index_t i = 0; i < Rx.getSize(); ++i) {
......
......@@ -48,13 +48,13 @@ namespace elsa
protected:
/// the evaluation of the Huber norm
data_t _evaluate(const DataContainer<data_t>& Rx) override;
data_t evaluateImpl(const DataContainer<data_t>& Rx) override;
/// the computation of the gradient (in place)
void _getGradientInPlace(DataContainer<data_t>& Rx) override;
void getGradientInPlaceImpl(DataContainer<data_t>& Rx) override;
/// the computation of the Hessian
LinearOperator<data_t> _getHessian(const DataContainer<data_t>& Rx) override;
LinearOperator<data_t> getHessianImpl(const DataContainer<data_t>& Rx) override;
/// implement the polymorphic clone operation
PseudoHuber<data_t>* cloneImpl() const override;
......
......@@ -36,7 +36,7 @@ namespace elsa
}
template <typename data_t>
data_t Quadric<data_t>::_evaluate(const DataContainer<data_t>& Rx)
data_t Quadric<data_t>::evaluateImpl(const DataContainer<data_t>& Rx)
{
data_t xtAx;
......@@ -55,13 +55,13 @@ namespace elsa
}
template <typename data_t>
void Quadric<data_t>::_getGradientInPlace(DataContainer<data_t>& Rx)
void Quadric<data_t>::getGradientInPlaceImpl(DataContainer<data_t>& Rx)
{
Rx = _linearResidual.evaluate(Rx);
}
template <typename data_t>
LinearOperator<data_t> Quadric<data_t>::_getHessian(const DataContainer<data_t>& Rx)
LinearOperator<data_t> Quadric<data_t>::getHessianImpl(const DataContainer<data_t>& Rx)
{
if (_linearResidual.hasOperator())
return leaf(_linearResidual.getOperator());
......
......@@ -68,13 +68,13 @@ namespace elsa
protected:
/// the evaluation of the Quadric functional
data_t _evaluate(const DataContainer<data_t>& Rx) override;
data_t evaluateImpl(const DataContainer<data_t>& Rx) override;
/// the computation of the gradient (in place)
void _getGradientInPlace(DataContainer<data_t>& Rx) override;
void getGradientInPlaceImpl(DataContainer<data_t>& Rx) override;
/// the computation of the Hessian
LinearOperator<data_t> _getHessian(const DataContainer<data_t>& Rx) override;
LinearOperator<data_t> getHessianImpl(const DataContainer<data_t>& Rx) override;
/// implement the polymorphic clone operation
Quadric<data_t>* cloneImpl() const override;
......
......@@ -38,13 +38,13 @@ namespace elsa
|| result.getDataDescriptor() != getRangeDescriptor())
throw std::invalid_argument("Residual::evaluate: argument sizes do not match residual");
_evaluate(x, result);
evaluateImpl(x, result);
}
template <typename data_t>
LinearOperator<data_t> Residual<data_t>::getJacobian(const DataContainer<data_t>& x)
{
return _getJacobian(x);
return getJacobianImpl(x);
}
template <typename data_t>
......
......@@ -91,9 +91,10 @@ namespace elsa
bool isEqual(const Residual<data_t>& other) const override;
/// the evaluate method that has to be overridden in derived classes
virtual void _evaluate(const DataContainer<data_t>& x, DataContainer<data_t>& result) = 0;
virtual void evaluateImpl(const DataContainer<data_t>& x,
DataContainer<data_t>& result) = 0;
/// the getJacobian method that has to be overriden in derived classes
virtual LinearOperator<data_t> _getJacobian(const DataContainer<data_t>& x) = 0;
virtual LinearOperator<data_t> getJacobianImpl(const DataContainer<data_t>& x) = 0;
};
} // namespace elsa
......@@ -71,7 +71,7 @@ namespace elsa
}
template <typename data_t>
data_t TransmissionLogLikelihood<data_t>::_evaluate(const DataContainer<data_t>& Rx)
data_t TransmissionLogLikelihood<data_t>::evaluateImpl(const DataContainer<data_t>& Rx)
{
auto result = static_cast<data_t>(0.0);
......@@ -87,7 +87,7 @@ namespace elsa
}
template <typename data_t>
void TransmissionLogLikelihood<data_t>::_getGradientInPlace(DataContainer<data_t>& Rx)
void TransmissionLogLikelihood<data_t>::getGradientInPlaceImpl(DataContainer<data_t>& Rx)
{
for (index_t i = 0; i < Rx.getSize(); ++i) {
data_t temp = (*_b)[i] * std::exp(-Rx[i]);
......@@ -102,7 +102,7 @@ namespace elsa
template <typename data_t>
LinearOperator<data_t>
TransmissionLogLikelihood<data_t>::_getHessian(const DataContainer<data_t>& Rx)
TransmissionLogLikelihood<data_t>::getHessianImpl(const DataContainer<data_t>& Rx)
{
DataContainer<data_t> scaleFactors(Rx.getDataDescriptor());
for (index_t i = 0; i < Rx.getSize(); ++i) {
......
......@@ -78,13 +78,13 @@ namespace elsa
protected:
/// the evaluation of the transmission log-likelihood
data_t _evaluate(const DataContainer<data_t>& Rx) override;
data_t evaluateImpl(const DataContainer<data_t>& Rx) override;
/// the computation of the gradient (in place)
void _getGradientInPlace(DataContainer<data_t>& Rx) override;
void getGradientInPlaceImpl(DataContainer<data_t>& Rx) override;