Commit fceada1f authored by Jean-Matthieu Gallard's avatar Jean-Matthieu Gallard
Browse files

KernelGen IPO - fix for good libxsmm bugs with IPO by excluding the gemms from the IPO

parent 42a0d13b
......@@ -27,21 +27,16 @@
# SHAREDMEM None OMP, TBB, CPP14 Shared-memory parallelisation
# DISTRIBUTEDMEM None MPI Distributed-memory parallelisation
# BOUNDARYCONDITIONS None Periodic Type of boundary conditions
# USE_IPO Off On, Unsafe IPO during compilation (intel only)
# USE_IPO Off On IPO during compilation (intel only)
# *********************************************************************************************
SHELL = bash
tolower = $(shell echo $(1) | tr '[:upper:]' '[:lower:]')
#USE_IPO unsafe => on with unsafe flag enabled
USE_IPO_INTERN=$(call tolower,$(USE_IPO))
ifeq ($(USE_IPO),)
USE_IPO_INTERN=off
else ifeq ($(call tolower,$(USE_IPO)),unsafe)
USE_IPO_INTERN=on
IPO_UNSAFE_FLAG=-DUNSAFE_IPO
else
IPO_UNSAFE_FLAG=
endif
......@@ -493,17 +488,19 @@ ifneq (,$(findstring Yes, $(MIXEDLANG) ))
endif
ifneq ($(call tolower,$(USE_IPO_INTERN)),on)
find -H $(EXAHYPE_PATH)'/exahype/solvers' -name '*.cpp' | tr '\n' ' ' >> cfiles.mk
find -L $(PROJECT_PATH) -name '*.cpp' | tr '\n' ' ' >> cfiles.mk
find -L $(PROJECT_PATH) -name '*.cpp' -and ! -name 'gemmsCPP.cpp' | tr '\n' ' ' >> cfiles.mk
endif
# explicitely find compile the gemms without IPO in all case
find -L $(PROJECT_PATH) -name 'gemmsCPP.cpp' | tr '\n' ' ' >> cfiles.mk
# ipo for kernel + application + exahype/solvers
# ipo for kernel + application + exahype/solvers. Explicitely exclude gemms to avoid IPO bug due to assembly code!
cipofiles.mk:
touch cipofiles.mk
echo -n CIPOSOURCES= > cipofiles.mk
ifeq ($(call tolower,$(USE_IPO_INTERN)),on)
find -H $(EXAHYPE_PATH)'/exahype/solvers' -name '*.cpp' | tr '\n' ' ' >> cipofiles.mk
find -L $(PROJECT_PATH) -name '*.cpp' | tr '\n' ' ' >> cipofiles.mk
find -L $(PROJECT_PATH) -name '*.cpp' -and ! -name 'gemmsCPP.cpp' | tr '\n' ' ' >> cipofiles.mk
endif
ffiles.mk:
......@@ -626,7 +623,7 @@ $(COBJECTS): %.o : %.cpp
$(CC) $(COMPILER_CFLAGS) $(NO_IPO_FLAG) $(SYSTEM_CFLAGS) $(PROJECT_CFLAGS) -I$(PEANO_KERNEL_PEANO_PATH)/../ -I$(PEANO_KERNEL_TARCH_PATH)/../ -I$(PROJECT_PATH) -I$(PEANO_TOOLBOX_MPI_BLANCING_PATH)/../ -I$(PEANO_TOOLBOX_SHAREDMEMORY_ORACLES_PATH)/../ -I$(PEANO_TOOLBOX_MULTISCALELINKEDCELL_PATH)/../ -I$(EXAHYPE_PATH) -I$(PROJECT_PATH) -c $< -o $@
$(CIPOOBJECTS): %.o : %.cpp
$(CC) $(COMPILER_CFLAGS) $(IPO_FLAG) -DUSE_IPO $(IPO_UNSAFE_FLAG) $(SYSTEM_CFLAGS) $(PROJECT_CFLAGS) -I$(PEANO_KERNEL_PEANO_PATH)/../ -I$(PEANO_KERNEL_TARCH_PATH)/../ -I$(PROJECT_PATH) -I$(PEANO_TOOLBOX_MPI_BLANCING_PATH)/../ -I$(PEANO_TOOLBOX_SHAREDMEMORY_ORACLES_PATH)/../ -I$(PEANO_TOOLBOX_MULTISCALELINKEDCELL_PATH)/../ -I$(EXAHYPE_PATH) -I$(PROJECT_PATH) -c $< -o $@
$(CC) $(COMPILER_CFLAGS) $(IPO_FLAG) -DUSE_IPO $(SYSTEM_CFLAGS) $(PROJECT_CFLAGS) -I$(PEANO_KERNEL_PEANO_PATH)/../ -I$(PEANO_KERNEL_TARCH_PATH)/../ -I$(PROJECT_PATH) -I$(PEANO_TOOLBOX_MPI_BLANCING_PATH)/../ -I$(PEANO_TOOLBOX_SHAREDMEMORY_ORACLES_PATH)/../ -I$(PEANO_TOOLBOX_MULTISCALELINKEDCELL_PATH)/../ -I$(EXAHYPE_PATH) -I$(PROJECT_PATH) -c $< -o $@
$(FOBJECTS): %.o : %.f90
$(FC) $(FCOMPILER_CFLAGS) $(PROJECT_CFLAGS) -I$(PROJECT_PATH) -c $< -o $@
......@@ -106,14 +106,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral{{nameSuffix}}(
float dudx_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
float negativeDudxT_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
float negativeDudx_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile float doNotOptimizeAway1 = dudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
volatile float doNotOptimizeAway2 = dudx_by_dx[0]; // used to prevent the compiler from optimizing dudx_by_dx away
volatile float doNotOptimizeAway3 = negativeDudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
volatile float doNotOptimizeAway4 = negativeDudx_by_dx[0]; // used to prevent the compiler from optimizing dudx_by_dx away
#endif
{% endif %}
// buffer for output
{{ m.allocateArray("lduh_SP", (nDof**nDim)*nVarPad, precision="float", forceStack=True) }}{##}
......
......@@ -76,11 +76,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral{{nameSuffix}}(
{% endif %}
double tmpArray[{{tmpArraySize}}] __attribute__((aligned(ALIGNMENT))); //used by flux (nDof*nVarPad) and ncp (nVarPad*nDim)
double dudxT_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
double doNotOptimizeAway = dudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
#endif
{% endif %}
{% if useFluxVect or useNCPVect or useMaterialParamVect%}
// transposed F slice for flux_vect, or used by ncp_vect as gradQt
{{m.vectPDEsArrays('Ft', nVar, True) | indent(2)}}{##}
......
......@@ -103,14 +103,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral{{nameSuffix}}(
double dudx_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
double negativeDudxT_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
double negativeDudx_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway1 = dudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
volatile double doNotOptimizeAway2 = dudx_by_dx[0]; // used to prevent the compiler from optimizing dudx_by_dx away
volatile double doNotOptimizeAway3 = negativeDudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
volatile double doNotOptimizeAway4 = negativeDudx_by_dx[0]; // used to prevent the compiler from optimizing dudx_by_dx away
#endif
{% endif %}
{% if usePointSources %}
{ // 0. compute point source contribution
......
......@@ -103,12 +103,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral{{nameSuffix}}(
double tmpArray[{{nVarPad*nDof}}] __attribute__((aligned(ALIGNMENT))); //used by flux and ncp
double dudxT_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
double negativeDudxT_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway1 = dudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
volatile double doNotOptimizeAway2 = negativeDudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
#endif
{% endif %}
{% if usePointSources %}
{ // 0. compute point source contribution
......
......@@ -106,14 +106,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral{{nameSuffix}}(
double dudx_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
double negativeDudxT_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
double negativeDudx_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway1 = dudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
volatile double doNotOptimizeAway2 = dudx_by_dx[0]; // used to prevent the compiler from optimizing dudx_by_dx away
volatile double doNotOptimizeAway3 = negativeDudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
volatile double doNotOptimizeAway4 = negativeDudx_by_dx[0]; // used to prevent the compiler from optimizing dudx_by_dx away
#endif
{% endif %}
{% if usePointSources %}
{ // 0. compute point source contribution
......
......@@ -94,11 +94,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral{{nameSuffix}}(
{% endif %}
double tmpArray[{{tmpArraySize}}] __attribute__((aligned(ALIGNMENT))); //used by flux (nDof*nVarPad) and ncp (nVarPad*nDim)
double dudxT_by_dx[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
double doNotOptimizeAway = dudxT_by_dx[0]; // used to prevent the compiler from optimizing dudxT_by_dx away
#endif
{% endif %}
{% if useVectPDE %}
// todo allocate tmp array
{% endif %}
......
......@@ -117,11 +117,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
for(int it=0;it<{{nDof*nDofPad}};it++) {
dudxT_by_dx[it] = inverseDx * dudx_T[it];
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_dudx_by_dt = dudxT_by_dx[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
{% endif %}
{% if not useCERKGuess %}{# fallback trivial guess #}
......
......@@ -115,12 +115,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
dudx_T_by_dx[it] = inverseDx * dudx_T[it];
dudx_by_dx[it] = inverseDx * dudx[it];
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_dudx_T_by_dt = dudx_T_by_dx[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
volatile double doNotOptimizeAway_dudx_by_dt = dudx_by_dx[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
{% endif %}
{% if useFlux%}
// Set rhs matmul coef matrix
......@@ -138,12 +132,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
rhsCoeff_T[i*{{nDofPad}}+j] = -inverseDx * Kxi_T[i*{{nDofPad}}+j] * iweights1[j];
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_rhsCoeff = rhsCoeff[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
volatile double doNotOptimizeAway_rhsCoeff_T = rhsCoeff_T[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
{% endif %}
// used at the end of the picard loop, integrate coefficient for rhs
double iK1_T_wt_dt[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
......@@ -153,11 +141,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
iK1_T_wt_dt[i*{{nDofPad}}+j] = dt * iK1_T[i*{{nDofPad}}+j] * weights1[j];
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_iK1_T_wt_dt = iK1_T_wt_dt[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
//TODO JMG Inital guess template
......@@ -766,11 +749,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
coeffVolume_T[i*{{nDofPad}}+j] = Kxi[i*{{nDofPad}}+j] * iweights1[j] * inverseDx;
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_coeffVolume_T = coeffVolume_T[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
// Assume equispaced mesh, dx[0] == dx[1] == dx[2]
//x
......@@ -785,11 +763,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
coeffVolume[i*{{nDofPad}}+j] = Kxi_T[i*{{nDofPad}}+j] * iweights1[i] * inverseDx;
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_coeffVolume = coeffVolume[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
//y
for (int zn = 0; zn < {{nDof3D*nVar}}; zn++) {
......
......@@ -108,11 +108,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
for(int it=0;it<{{nDof*nDofPad}};it++) {
dudx_T_by_dx[it] = inverseDx * dudx_T[it];
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_dudx_by_dt = dudx_T_by_dx[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
{% endif %}
{% if useFlux%}
// Set rhs matmul coef matrix
......@@ -123,11 +118,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
rhsCoeff[i*{{nDofPad}}+j] = -inverseDx * Kxi[i*{{nDofPad}}+j] * iweights1[i];
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_rhsCoeff = rhsCoeff[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
{% endif %}
// used at the end of the picard loop, integrate coefficient for rhs
double iK1_T_wt_dt[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
......@@ -137,11 +127,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
iK1_T_wt_dt[i*{{nDofPad}}+j] = dt * iK1_T[i*{{nDofPad}}+j] * weights1[j];
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_iK1_T_wt_dt = iK1_T_wt_dt[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
// 1. Trivial initial guess
std::memset(lQi, 0, sizeof(double)*{{nVarPad*(nDof**nDim)*nDof}});
......@@ -707,11 +692,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
coeffVolume[i*{{nDofPad}}+j] = Kxi_T[i*{{nDofPad}}+j] * iweights1[i] * inverseDx;
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_coeffVolume = coeffVolume[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
// Assume equispaced mesh, dx[0] == dx[1] == dx[2]
......
......@@ -115,12 +115,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
dudx_T_by_dx[it] = inverseDx * dudx_T[it];
dudx_by_dx[it] = inverseDx * dudx[it];
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_dudx_T_by_dt = dudx_T_by_dx[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
volatile double doNotOptimizeAway_dudx_by_dt = dudx_by_dx[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
{% endif %}
{% if useFlux%}
// Set rhs matmul coef matrix
......@@ -138,12 +132,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
rhsCoeff_T[i*{{nDofPad}}+j] = -inverseDx * Kxi_T[i*{{nDofPad}}+j] * iweights1[j];
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_rhsCoeff = rhsCoeff[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
volatile double doNotOptimizeAway_rhsCoeff_T = rhsCoeff_T[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
{% endif %}
// used at the end of the picard loop, integrate coefficient for rhs
double iK1_T_wt_dt[{{nDof*nDofPad}}] __attribute__((aligned(ALIGNMENT)));
......@@ -153,11 +141,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
iK1_T_wt_dt[i*{{nDofPad}}+j] = dt * iK1_T[i*{{nDofPad}}+j] * weights1[j];
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_iK1_T_wt_dt = iK1_T_wt_dt[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
//TODO JMG Inital guess template
......@@ -821,11 +804,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
coeffVolume_T[i*{{nDofPad}}+j] = Kxi[i*{{nDofPad}}+j] * iweights1[j] * inverseDx;
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_coeffVolume_T = coeffVolume_T[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
// Assume equispaced mesh, dx[0] == dx[1] == dx[2]
//x, note transposed n and x
......@@ -840,11 +818,6 @@ int {{codeNamespace}}::fusedSpaceTimePredictorVolumeIntegral(
coeffVolume[i*{{nDofPad}}+j] = Kxi_T[i*{{nDofPad}}+j] * iweights1[i] * inverseDx;
}
}
{% if useLibxsmm %}
#if defined(USE_IPO) && ! defined(UNSAFE_IPO)
volatile double doNotOptimizeAway_coeffVolume = coeffVolume[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}
//y, fuse nx
for (int z = 0; z < {{nDof3D}}; z++) {
......
......@@ -11,6 +11,12 @@
* For the full license text, see LICENSE.txt
**/ #}
// THIS FILE MUST NEVER BE INCLUDED IN THE IPO
// assembly code in the IPO causes optimization bugs (e.g. array wrongfully marked as dead code as only read in the assembly code)
#include "{{pathToOptKernel}}/gemmsCPP.h"
#include "{{pathToOptKernel}}/Kernels.h" //for the libxsmm flop counter
......
......@@ -79,16 +79,10 @@ _mm_prefetch({{array}}+{{offset}}{% if offsetLine != 0 %}+{{offsetLine}}{% endif
{% if prefetchC %}
{{prefetchMatrix(C, conf.M, conf.LDC, conf.N, C_next)}}{##}
{% endif %}{# prefetch C #}
#ifdef USE_IPO
#pragma forceinline
#endif
{{conf.baseroutinename}}({{A}}{% if A_shift != '0' %}+{{A_shift}}{% endif %}, {{B}}{% if B_shift != '0' %}+{{B_shift}}{% endif %}, {{C}}{% if C_shift != '0' %}+{{C_shift}}{% endif %});
{% if false %}{# prefetch in gemm, disabled #}
#ifdef USE_IPO
#pragma forceinline
#endif
{% if prefetchGemm %}
{{conf.baseroutinename}}({{A}}{% if A_shift != '0' %}+{{A_shift}}{% endif %}, {{B}}{% if B_shift != '0' %}+{{B_shift}}{% endif %}, {{C}}{% if C_shift != '0' %}+{{C_shift}}{% endif %}, {{A}}{% if A_next != '0' %}+{{A_next}}{% endif %}, {{B}}{% if B_next != '0' %}+{{B_next}}{% endif %}, {{C}}{% if C_next != '0' %}+{{C_next}}{% endif %});
{% else %}
......
......@@ -72,13 +72,7 @@
for (int it = 0; it < {{conf.LDB*conf.K}}; it++) {
{{B}}[it] = {{trueAlpha}} * {{trueB}}[it];
}
#if defined(USE_IPO) && !defined(UNSAFE_IPO)
volatile {{fpFormat}} doNotOptimizeAway_{{B}} = {{B}}[0]; //used to prevent the compiler from optimizing temp array away. Needs to be volatile
#endif
{% endif %}{# useTrueB #}
#ifdef USE_IPO
#pragma forceinline
#endif
{{conf.baseroutinename}}({{A}}{% if A_shift != '0' %}+{{A_shift}}{% endif %}, {{B}}{% if B_shift != '0' %}+{{B_shift}}{% endif %}, {{C}}{% if C_shift != '0' %}+{{C_shift}}{% endif %});
{#
......
......@@ -20,7 +20,7 @@
# DISTRIBUTEDMEM None MPI Distributed-memory parallelisation
{% endif %}
# BOUNDARYCONDITIONS None Periodic Type of boundary conditions
# USE_IPO Off On, Unsafe IPO during compilation (intel only)
# USE_IPO Off On IPO during compilation (intel only)
# ******************************************************************************************************
# Helper
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment