Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit fd1e8e89 authored by Benedikt Kleinmeier's avatar Benedikt Kleinmeier
Browse files

Merge branch '246-include-to-every-header-name-the-processor-id' of...

Merge branch '246-include-to-every-header-name-the-processor-id' of gitlab.lrz.de:vadere/vadere into 246-include-to-every-header-name-the-processor-id
parents 2d039a02 6c859e21
Pipeline #115327 passed with stages
in 132 minutes and 44 seconds
......@@ -2,6 +2,7 @@
package org.vadere.simulator.projects.dataprocessing.outputfile;
import org.vadere.simulator.projects.dataprocessing.DataProcessingJsonManager;
import org.vadere.simulator.projects.dataprocessing.datakey.DataKey;
import org.vadere.simulator.projects.dataprocessing.processor.DataProcessor;
import org.vadere.simulator.projects.dataprocessing.writer.VadereWriter;
......@@ -54,7 +55,11 @@ public abstract class OutputFile<K extends DataKey<K>> {
private boolean isWriteMetaData;
private String separator;
public final static String nameConflictAdd = "-PID?"; // the # is replaced with the processor id
// Check also the PostVis where there is a dependency
public final static String headerProcSep = "-";
public final static String headerNameAdd = headerProcSep + "PID?"; // the # is replaced with the processor id
private VadereWriterFactory writerFactory;
private VadereWriter writer;
......@@ -129,7 +134,7 @@ public abstract class OutputFile<K extends DataKey<K>> {
// characters
String md = "#IDXCOL=" + dataIndices.length +
",DATACOL="+(getEntireHeader().size()-dataIndices.length)+","+
"SEP=\'"+ this.separator+"\'";
"SEP=\'"+ DataProcessingJsonManager.DEFAULT_SEPARATOR +"\'";
//Make a list with one element to reuse 'writeLine' function
List<String> line = new LinkedList<>();
......@@ -166,16 +171,41 @@ public abstract class OutputFile<K extends DataKey<K>> {
}
public String getHeaderLine() {
return String.join(this.separator, this.getEntireHeader());
return String.join(DataProcessingJsonManager.DEFAULT_SEPARATOR, this.getEntireHeader());
}
public String getIndicesLine() {
return String.join(this.separator, this.getIndices());
return String.join(DataProcessingJsonManager.DEFAULT_SEPARATOR, this.getIndices());
}
public static String addHeaderProcInfo(String columnName, int procId){
return columnName + headerNameAdd.replace("?", "" + procId);
}
private List<String> uniqueHeaderNames(){
// This function adds to every header "headerNameAdd", for ALL headers EVERY time
// (cmp. headersWithNameMangling)
LinkedList<String> headers = new LinkedList<>();
for (DataProcessor dataProcessor: dataProcessors) {
List<String> allProcHeaders = Arrays.asList(dataProcessor.getHeaders());
for (String singleHeader: allProcHeaders) {
// add the processor id to make header unique
String adaptedColumnName = OutputFile.addHeaderProcInfo(singleHeader, dataProcessor.getId());
headers.addLast(adaptedColumnName);
}
}
return headers;
}
private List<String> headersWithNameMangling(){
// This function adds to every header "headerNameAdd", ONLY if there is a name conflict detected
// (cmp. uniqueHeaderNames)
LinkedList<String> headers = new LinkedList<>();
boolean isNameMangle = false; // assume there is no nameing conflict
boolean isNameMangleDetected = false; // assume there is no nameing conflict
mainloop:
for (DataProcessor l: dataProcessors) {
......@@ -183,7 +213,7 @@ public abstract class OutputFile<K extends DataKey<K>> {
for(String el: list) {
if(headers.contains(el)){
isNameMangle = true; // conflict found: stop collecting headers
isNameMangleDetected = true; // conflict found: stop collecting and name make every header unique
break mainloop;
}else{
headers.addLast(el);
......@@ -191,24 +221,16 @@ public abstract class OutputFile<K extends DataKey<K>> {
}
}
if(isNameMangle){
headers.clear(); //start from new...
for (DataProcessor l: dataProcessors) {
List<String> list = Arrays.asList(l.getHeaders());
for (String h: list) {
// ... but now add the processor id
headers.addLast(h +
nameConflictAdd.replace("?", "" + l.getId()+'0'));
}
}
if(isNameMangleDetected){
headers = (LinkedList<String>) uniqueHeaderNames();
}
return headers;
}
private List<String> composeHeaderLine(){
final List<String> allHeaders = new LinkedList<>(Arrays.asList(dataIndices));
List<String> procHeaders = this.headersWithNameMangling();
List<String> procHeaders = this.uniqueHeaderNames();
allHeaders.addAll(procHeaders);
......
......@@ -76,7 +76,9 @@ public abstract class IOOutput {
private static boolean testTrajectories (final VadereProject project, final File directory) {
try {
TrajectoryReader reader = new TrajectoryReader(getPathToOutputFile(project, directory.getName(), IOUtils.TRAJECTORY_FILE_EXTENSION));
return reader.checkFile();
reader.checkFile();
return true;
} catch (IOException | VadereClassNotFoundException e) {
logger.error("Error in output file " + directory.getName());
return false;
......
......@@ -3,6 +3,7 @@ package org.vadere.simulator.projects.io;
import org.apache.commons.math3.util.Pair;
import org.jetbrains.annotations.NotNull;
import org.vadere.simulator.projects.Scenario;
import org.vadere.simulator.projects.dataprocessing.outputfile.OutputFile;
import org.vadere.simulator.projects.dataprocessing.processor.PedestrianPositionProcessor;
import org.vadere.state.attributes.scenario.AttributesAgent;
import org.vadere.state.scenario.Agent;
......@@ -65,6 +66,8 @@ public class TrajectoryReader {
private int groupSizeIndex;
private int stridesIndex;
private static final int notSetColumnIndexIdentifier = -1;
public TrajectoryReader(final Path trajectoryFilePath, final Scenario scenario) {
this(trajectoryFilePath, scenario.getAttributesPedestrian());
}
......@@ -98,26 +101,29 @@ public class TrajectoryReader {
stridesKeys.add("strides");
stridesKeys.add("footSteps");
pedIdIndex = -1;
stepIndex = -1;
xIndex = -1;
yIndex = -1;
targetIdIndex = -1;
groupIdIndex = -1;
groupSizeIndex = -1;
stridesIndex = -1;
pedIdIndex = notSetColumnIndexIdentifier;
stepIndex = notSetColumnIndexIdentifier;
xIndex = notSetColumnIndexIdentifier;
yIndex = notSetColumnIndexIdentifier;
targetIdIndex = notSetColumnIndexIdentifier;
groupIdIndex = notSetColumnIndexIdentifier;
groupSizeIndex = notSetColumnIndexIdentifier;
stridesIndex = notSetColumnIndexIdentifier;
}
public Map<Step, List<Agent>> readFile() throws IOException {
if (checkFile()){
return readStandardTrajectoryFile();
} else {
throw new IOException("could not read trajectory file, some colums are missing.");
checkFile();
return readStandardTrajectoryFile();
}
private void errorWhenNotUniqueColumn(int currentValue, String columnName) throws IOException{
if(currentValue != notSetColumnIndexIdentifier){
throw new IOException("The header " + columnName + " is not unique in the file. This is likely to have " +
"unwanted side effects");
}
}
public boolean checkFile () throws IOException {
public void checkFile () throws IOException {
// 1. Get the correct column
String header;
//read only first line.
......@@ -127,50 +133,58 @@ public class TrajectoryReader {
String[] columns = header.split(SPLITTER);
for (int index = 0; index < columns.length; index++) {
if (pedestrianIdKeys.contains(columns[index])) {
// header name without processor ID
String headerName = columns[index].split(OutputFile.headerProcSep)[0];
if (pedestrianIdKeys.contains(headerName)) {
errorWhenNotUniqueColumn(pedIdIndex, headerName);
pedIdIndex = index;
} else if (stepKeys.contains(columns[index])) {
} else if (stepKeys.contains(headerName)) {
errorWhenNotUniqueColumn(stepIndex, headerName);
stepIndex = index;
} else if (xKeys.contains(columns[index])) {
} else if (xKeys.contains(headerName)) {
errorWhenNotUniqueColumn(xIndex, headerName);
xIndex = index;
} else if (yKeys.contains(columns[index])) {
} else if (yKeys.contains(headerName)) {
errorWhenNotUniqueColumn(yIndex, headerName);
yIndex = index;
} else if (targetIdKeys.contains(columns[index])) {
} else if (targetIdKeys.contains(headerName)) {
errorWhenNotUniqueColumn(targetIdIndex, headerName);
targetIdIndex = index;
} else if (groupIdKeys.contains(columns[index])){
} else if (groupIdKeys.contains(headerName)){
errorWhenNotUniqueColumn(groupIdIndex, headerName);
groupIdIndex = index;
}
else if (groupSizeKeys.contains(columns[index])){
else if (groupSizeKeys.contains(headerName)){
errorWhenNotUniqueColumn(groupSizeIndex, headerName);
groupSizeIndex = index;
}
else if(stridesKeys.contains(columns[index])) {
else if(stridesKeys.contains(headerName)) {
errorWhenNotUniqueColumn(stridesIndex, headerName);
stridesIndex = index;
}
}
try {
if (pedIdIndex != -1 && xIndex != -1 && yIndex != -1 && stepIndex != -1) {
// load default values with no groups
return true;
} else {
return false;
}
} catch (Exception e) {
logger.warn("could not read trajectory file. The file format might not be compatible or it is missing.");
throw e;
}
if (! (pedIdIndex != notSetColumnIndexIdentifier && xIndex != notSetColumnIndexIdentifier &&
yIndex != notSetColumnIndexIdentifier && stepIndex != notSetColumnIndexIdentifier)) {
// load default values with no groups
throw new IOException(String.format("All columns with " + notSetColumnIndexIdentifier + " value could " +
"not be found in the trajectory file pedIdIndex=%d, x-values=%d, y-values=%d, step " +
"values=%d", pedIdIndex, xIndex, yIndex, stepIndex));
}
}
private Map<Step, List<Agent>> readStandardTrajectoryFile() throws IOException {
try (BufferedReader in = IOUtils.defaultBufferedReader(this.trajectoryFilePath)) {
return in.lines() // a stream of lines
.skip(1) // skip the first line i.e. the header
.map(line -> split(line)) // split the line into string tokens
.map(line -> split(line)) // split the line into string tokens
.map(rowTokens -> parseRowTokens(rowTokens)) // transform those tokens into a pair of java objects (step, agent)
.collect(Collectors.groupingBy(Pair::getKey, // group all agent objects by the step.
Collectors.mapping(Pair::getValue, Collectors.toList())));
} catch (Exception e){
logger.warn("could not read trajectory file. The file format might not be compatible or it is missing.");
logger.warn("Could not read trajectory file. The file format might not be compatible or it is missing.");
throw e;
}
}
......
......@@ -7,6 +7,7 @@ import java.io.File;
import java.util.ArrayList;
import java.util.List;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.vadere.simulator.models.MainModel;
import org.vadere.simulator.models.MainModelBuilder;
......@@ -68,13 +69,13 @@ public class TestOutputFile {
List<String> header = testScenario.getDataProcessingJsonManager().getOutputFiles().get(0).getEntireHeader();
//Note these fail if the name conflict is handled differently, for now hard coded.
assertTrue(header.contains("timeStep"));
assertTrue(header.contains("pedestrianId"));
assertTrue(header.contains("x-Proc1"));
assertTrue(header.contains("y-Proc1"));
assertTrue(header.contains("x-Proc2"));
assertTrue(header.contains("y-Proc2"));
assertTrue(header.contains(OutputFile.addHeaderProcInfo("x", 1)));
assertTrue(header.contains(OutputFile.addHeaderProcInfo("y", 1)));
assertTrue(header.contains(OutputFile.addHeaderProcInfo("x", 2)));
assertTrue(header.contains(OutputFile.addHeaderProcInfo("y", 2)));
}
......
package org.vadere.simulator.projects.dataprocessing.processor;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
public class PedestrianLastPositionProcessorTest extends ProcessorTest {
......
......@@ -8,7 +8,6 @@ import org.vadere.simulator.projects.dataprocessing.outputfile.OutputFile;
import org.vadere.simulator.utils.reflection.ReflectionHelper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Base Test for all Processors.
......@@ -51,10 +50,14 @@ public abstract class ProcessorTest {
}
int l = processorTestEnv.getSimStates().size();
p.postLoop(processorTestEnv.getSimStates().get(l - 1));
processorTestEnv.getOutputFile().write();
OutputFile outputFile = processorTestEnv.getOutputFile();
outputFile.write();
// NOTE: these are the column names that have the additional information of the data processor ID.
assertEquals(processorTestEnv.getHeader(), outputFile.getHeaderLine());
String header = String.join(processorTestEnv.getDelimiter(), p.getHeaders());
assertTrue(processorTestEnv.getHeader().contains(header));
if (header.equals("")){
assertEquals(processorTestEnv.getExpectedOutputAsList(), processorTestEnv.getOutput(0));
} else {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment