Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit 819e9600 authored by Benedikt Zoennchen's avatar Benedikt Zoennchen
Browse files

resolve merge conflict.

parents 7e365e57 0f0d9a0a
......@@ -70,7 +70,7 @@ stages:
artifacts:
when: on_failure
paths:
- "log_dir"
- "vadere_logs"
expire_in: 1 week
when: on_success
......
......@@ -69,6 +69,7 @@
* vadere.jar -> vadere-gui.jar
* postvis.jar -> vadere-postvis.jar
* vadere-console.jar -> remained unchanged
- Header in output file have now the following form "[NAME]-PID[ID]". This avoids name conflicts and makes mapping to the relevant processor easy and fast.
- Migration to Java 11 (OpenJDK).
- Removed directory `Documentation/version-control` which contained the Git hooks. The Git hooks are not required anymore. Instead, added `git rev-parse HEAD` to file `VadereSimulator/pom.xml` to create `VadereSimulator/resources/current_commit_hash.txt` during each build via `mvn compile`.
**Note:** The file `current_commit_hash.txt` is created during Maven's validation phase, i.e., before the actual build.
......
......@@ -21,7 +21,7 @@ short_timeout_in_seconds = 2 * 60
def parse_command_line_arguments():
parser = argparse.ArgumentParser(description="Run all scenario files.")
parser.add_argument("--scenario", "-s", type=str, nargs="?", help="Run only the given scenario file and not all. E.g. \"VadereModelTests/TestOSM/scenarios/basic_2_density_discrete_ca.scenario\"")
parser.add_argument("scenario", type=str, nargs="?", help="Run only the given scenario file and not all. E.g., \"VadereModelTests/TestOSM/scenarios/basic_2_density_discrete_ca.scenario\"")
args = parser.parse_args()
......@@ -80,13 +80,10 @@ def find_scenario_files(path="VadereModelTests", scenario_search_pattern = "*.sc
def run_scenario_files_with_vadere_console(scenario_files, vadere_console="VadereSimulator/target/vadere-console.jar", scenario_timeout_in_sec=short_timeout_in_seconds):
output_dir = "output"
log_dir = "log_dir"
log_base_dir = "vadere_logs"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
makedirs_if_non_existing(output_dir)
makedirs_if_non_existing(log_base_dir)
total_scenario_files = len(scenario_files)
......@@ -98,10 +95,18 @@ def run_scenario_files_with_vadere_console(scenario_files, vadere_console="Vader
try:
print("Running scenario file ({}/{}): {}".format(i + 1, total_scenario_files, scenario_file))
# A scenario filename has the form "VadereModelTests/TestOSM/scenarios/chicken_floorfield_ok.scenario"
# Use second-level directory as subdirectory for logging (e.g., "TestOSM").
log_sub_dir = scenario_file.split(os.path.sep)[1]
log_dir = os.path.join(".", log_base_dir, log_sub_dir)
makedirs_if_non_existing(log_dir)
scenario_name = os.path.basename(scenario_file).split('.')[0]
log_file = os.path.join(".", log_dir, scenario_name + ".log")
log_file = os.path.join(log_dir, scenario_name + ".log")
print(log_file)
print(" Log file: " + log_file)
# Measure wall time and not CPU time.
wall_time_start = time.time()
......@@ -124,17 +129,16 @@ def run_scenario_files_with_vadere_console(scenario_files, vadere_console="Vader
except subprocess.TimeoutExpired as exception:
print("Scenario file failed: {}".format(scenario_file))
print("-> Reason: timeout after {} s".format(exception.timeout))
failed_summary.append("Scenario file failed: {}".format(scenario_file))
failed_summary.append("-> Reason: timeout after {} s".format(exception.timeout))
failed_scenarios_with_exception.append((scenario_file, exception))
except subprocess.CalledProcessError as exception:
prefix = ""
if "TestOSM" in scenario_file:
prefix = " * OSM * "
print(prefix + "Scenario file failed: {}".format(scenario_file))
print("Scenario file failed: {}".format(scenario_file))
print("-> Reason: non-zero return value {}".format(exception.returncode))
print(" {}".format(read_first_error_linies(exception.stderr)))
failed_summary.append(prefix + "Scenario file failed: {}".format(scenario_file))
failed_summary.append("Scenario file failed: {}".format(scenario_file))
failed_summary.append("-> Reason: non-zero return value {}".format(exception.returncode))
failed_summary.append(" {}".format(read_first_error_linies(exception.stderr)))
......@@ -145,6 +149,10 @@ def run_scenario_files_with_vadere_console(scenario_files, vadere_console="Vader
return {"passed": passed_scenarios, "failed": failed_scenarios_with_exception, "failed_summary": failed_summary}
def makedirs_if_non_existing(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def read_first_error_linies(error_byte_string):
err_string_lines = error_byte_string.decode('utf-8').split('\n')
if len(err_string_lines) >= 2:
......
# Extract line and branch coverage (in percentage) from HTML coverage reports
# which are created by Maven's jacoco plugin.
# Use top-level pom.xml to search in correct subdirectories.
#
# Wach out: call this script from root directory of project. E.g.
#
# python Tools/my_script.py
import xml.etree.ElementTree as ET
import os
import re
def get_modules_from_pom_file(filename="pom.xml"):
"""Return a list of submodules which where found in passed "pom.xml"."""
xml_name_spaces = {
"default": "http://maven.apache.org/POM/4.0.0"
}
xml_tree = ET.parse(filename)
xml_root = xml_tree.getroot()
exclude_list = ["./VadereAnnotation", "./VadereGui"]
modules = xml_root.findall("default:modules/default:module", xml_name_spaces)
module_names = [module.text for module in modules if module.text not in exclude_list]
return module_names
def extract_line_and_branch_coverage(module_names):
"""Return a dictionary which maps module name to line and branch coverage tuple."""
module_to_coverage = dict()
default_coverage_file = "target/site/coverage-reports/index.html"
for module in module_names:
coverage_path = os.path.join(module, default_coverage_file)
with open(coverage_path, "r") as file:
coverage_report = file.read()
regex_pattern = re.compile(r"Total.*?([0-9]{1,3})%.*?([0-9]{1,3})%")
match = regex_pattern.search(coverage_report)
if match:
line_coverage = float(match.group(1))
branch_coverage = float(match.group(2))
module_to_coverage[module] = (line_coverage, branch_coverage)
else:
raise Exception("Coverage data not found for module: {}".format(module))
return module_to_coverage
def print_averaged_line_coverage(coverage_data):
"""GitLab CI tools read out the stdout output of the build process. Therefore, print coverage info to stdout."""
total_modules = len(coverage_data)
line_coverage_data = [line_coverage for (line_coverage, branch_coverage) in coverage_data.values()]
branch_coverage_data = [branch_coverage for (line_coverage, branch_coverage) in coverage_data.values()]
summed_line_coverage_data = sum(line_coverage_data)
summed_branch_coverage_data = sum(branch_coverage_data)
# By default, GitLab CI parses only integers.
averaged_line_coverage = int(round(summed_line_coverage_data / total_modules, 0))
averaged_branch_coverage = int(round(summed_branch_coverage_data / total_modules, 0))
print("Line Coverage: Total {}%".format(averaged_line_coverage))
print("Branch Coverage: {}%".format(averaged_branch_coverage))
if __name__ == "__main__":
module_names = get_modules_from_pom_file()
module_to_coverage = extract_line_and_branch_coverage(module_names)
print_averaged_line_coverage(module_to_coverage)
......@@ -338,8 +338,13 @@ public class PostvisualizationWindow extends JPanel implements Observer {
public void loadOutputFile(final File trajectoryFile, final Scenario scenario) throws IOException {
Player.getInstance(model).stop();
model.init(IOOutput.readTrajectories(trajectoryFile.toPath(), scenario), scenario, trajectoryFile.getParent());
model.notifyObservers();
try {
model.init(IOOutput.readTrajectories(trajectoryFile.toPath(), scenario), scenario, trajectoryFile.getParent());
model.notifyObservers();
} catch (Exception ex) {
JOptionPane.showMessageDialog(this, ex.getMessage(), Messages.getString("Error.text"), JOptionPane.ERROR_MESSAGE);
}
}
public void loadOutputFile(final Scenario scenario) {
......
......@@ -2,6 +2,7 @@
package org.vadere.simulator.projects.dataprocessing.outputfile;
import org.vadere.simulator.projects.dataprocessing.DataProcessingJsonManager;
import org.vadere.simulator.projects.dataprocessing.datakey.DataKey;
import org.vadere.simulator.projects.dataprocessing.processor.DataProcessor;
import org.vadere.simulator.projects.dataprocessing.writer.VadereWriter;
......@@ -54,7 +55,11 @@ public abstract class OutputFile<K extends DataKey<K>> {
private boolean isWriteMetaData;
private String separator;
private final static String nameConflictAdd = "-Proc?"; // the # is replaced with the processor id
// Check also the PostVis where there is a dependency
public final static String headerProcSep = "-";
public final static String headerNameAdd = headerProcSep + "PID?"; // the # is replaced with the processor id
private VadereWriterFactory writerFactory;
private VadereWriter writer;
......@@ -129,7 +134,7 @@ public abstract class OutputFile<K extends DataKey<K>> {
// characters
String md = "#IDXCOL=" + dataIndices.length +
",DATACOL="+(getEntireHeader().size()-dataIndices.length)+","+
"SEP=\'"+ this.separator+"\'";
"SEP=\'"+ DataProcessingJsonManager.DEFAULT_SEPARATOR +"\'";
//Make a list with one element to reuse 'writeLine' function
List<String> line = new LinkedList<>();
......@@ -166,16 +171,41 @@ public abstract class OutputFile<K extends DataKey<K>> {
}
public String getHeaderLine() {
return String.join(this.separator, this.getEntireHeader());
return String.join(DataProcessingJsonManager.DEFAULT_SEPARATOR, this.getEntireHeader());
}
public String getIndicesLine() {
return String.join(this.separator, this.getIndices());
return String.join(DataProcessingJsonManager.DEFAULT_SEPARATOR, this.getIndices());
}
public static String addHeaderProcInfo(String columnName, int procId){
return columnName + headerNameAdd.replace("?", "" + procId);
}
private List<String> uniqueHeaderNames(){
// This function adds to every header "headerNameAdd", for ALL headers EVERY time
// (cmp. headersWithNameMangling)
LinkedList<String> headers = new LinkedList<>();
for (DataProcessor dataProcessor: dataProcessors) {
List<String> allProcHeaders = Arrays.asList(dataProcessor.getHeaders());
for (String singleHeader: allProcHeaders) {
// add the processor id to make header unique
String adaptedColumnName = OutputFile.addHeaderProcInfo(singleHeader, dataProcessor.getId());
headers.addLast(adaptedColumnName);
}
}
return headers;
}
private List<String> headersWithNameMangling(){
// This function adds to every header "headerNameAdd", ONLY if there is a name conflict detected
// (cmp. uniqueHeaderNames)
LinkedList<String> headers = new LinkedList<>();
boolean isNameMangle = false; // assume there is no nameing conflict
boolean isNameMangleDetected = false; // assume there is no nameing conflict
mainloop:
for (DataProcessor l: dataProcessors) {
......@@ -183,7 +213,7 @@ public abstract class OutputFile<K extends DataKey<K>> {
for(String el: list) {
if(headers.contains(el)){
isNameMangle = true; // conflict found: stop collecting headers
isNameMangleDetected = true; // conflict found: stop collecting and name make every header unique
break mainloop;
}else{
headers.addLast(el);
......@@ -191,24 +221,16 @@ public abstract class OutputFile<K extends DataKey<K>> {
}
}
if(isNameMangle){
headers.clear(); //start from new...
for (DataProcessor l: dataProcessors) {
List<String> list = Arrays.asList(l.getHeaders());
for (String h: list) {
// ... but now add the processor id
headers.addLast(h +
nameConflictAdd.replace('?', (char) (l.getId()+'0')));
}
}
if(isNameMangleDetected){
headers = (LinkedList<String>) uniqueHeaderNames();
}
return headers;
}
private List<String> composeHeaderLine(){
final List<String> allHeaders = new LinkedList<>(Arrays.asList(dataIndices));
List<String> procHeaders = this.headersWithNameMangling();
List<String> procHeaders = this.uniqueHeaderNames();
allHeaders.addAll(procHeaders);
......
......@@ -76,7 +76,9 @@ public abstract class IOOutput {
private static boolean testTrajectories (final VadereProject project, final File directory) {
try {
TrajectoryReader reader = new TrajectoryReader(getPathToOutputFile(project, directory.getName(), IOUtils.TRAJECTORY_FILE_EXTENSION));
return reader.checkFile();
reader.checkFile();
return true;
} catch (IOException | VadereClassNotFoundException e) {
logger.error("Error in output file " + directory.getName());
return false;
......
......@@ -3,6 +3,7 @@ package org.vadere.simulator.projects.io;
import org.apache.commons.math3.util.Pair;
import org.jetbrains.annotations.NotNull;
import org.vadere.simulator.projects.Scenario;
import org.vadere.simulator.projects.dataprocessing.outputfile.OutputFile;
import org.vadere.simulator.projects.dataprocessing.processor.PedestrianPositionProcessor;
import org.vadere.state.attributes.scenario.AttributesAgent;
import org.vadere.state.scenario.Agent;
......@@ -67,6 +68,8 @@ public class TrajectoryReader {
private int groupSizeIndex;
private int stridesIndex;
private static final int notSetColumnIndexIdentifier = -1;
public TrajectoryReader(final Path trajectoryFilePath, final Scenario scenario) {
this(trajectoryFilePath, scenario.getAttributesPedestrian());
}
......@@ -103,27 +106,30 @@ public class TrajectoryReader {
stridesKeys.add("strides");
stridesKeys.add("footSteps");
pedIdIndex = -1;
stepIndex = -1;
simTimeIndex = -1;
xIndex = -1;
yIndex = -1;
targetIdIndex = -1;
groupIdIndex = -1;
groupSizeIndex = -1;
stridesIndex = -1;
pedIdIndex = notSetColumnIndexIdentifier;
stepIndex = notSetColumnIndexIdentifier;
simTimeIndex = notSetColumnIndexIdentifier;
xIndex = notSetColumnIndexIdentifier;
yIndex = notSetColumnIndexIdentifier;
targetIdIndex = notSetColumnIndexIdentifier;
groupIdIndex = notSetColumnIndexIdentifier;
groupSizeIndex = notSetColumnIndexIdentifier;
stridesIndex = notSetColumnIndexIdentifier;
}
public Map<Step, List<Agent>> readFile() throws IOException {
if (checkFile()){
return readStandardTrajectoryFile();
} else {
throw new IOException("could not read trajectory file, some colums are missing.");
checkFile();
return readStandardTrajectoryFile();
}
private void errorWhenNotUniqueColumn(int currentValue, String columnName) throws IOException{
if(currentValue != notSetColumnIndexIdentifier){
throw new IOException("The header " + columnName + " is not unique in the file. This is likely to have " +
"unwanted side effects");
}
}
public boolean checkFile () throws IOException {
public void checkFile () throws IOException {
// 1. Get the correct column
String header;
//read only first line.
......@@ -133,53 +139,61 @@ public class TrajectoryReader {
String[] columns = header.split(SPLITTER);
for (int index = 0; index < columns.length; index++) {
if (pedestrianIdKeys.contains(columns[index])) {
// header name without processor ID
String headerName = columns[index].split(OutputFile.headerProcSep)[0];
if (pedestrianIdKeys.contains(headerName)) {
errorWhenNotUniqueColumn(pedIdIndex, headerName);
pedIdIndex = index;
} else if (stepKeys.contains(columns[index])) {
} else if (stepKeys.contains(headerName)) {
errorWhenNotUniqueColumn(stepIndex, headerName);
stepIndex = index;
} else if (xKeys.contains(columns[index])) {
} else if (xKeys.contains(headerName)) {
errorWhenNotUniqueColumn(xIndex, headerName);
xIndex = index;
} else if (yKeys.contains(columns[index])) {
} else if (yKeys.contains(headerName)) {
errorWhenNotUniqueColumn(yIndex, headerName);
yIndex = index;
} else if (targetIdKeys.contains(columns[index])) {
} else if (targetIdKeys.contains(headerName)) {
errorWhenNotUniqueColumn(targetIdIndex, headerName);
targetIdIndex = index;
} else if (groupIdKeys.contains(columns[index])){
} else if (groupIdKeys.contains(headerName)){
errorWhenNotUniqueColumn(groupIdIndex, headerName);
groupIdIndex = index;
}
else if (groupSizeKeys.contains(columns[index])){
else if (groupSizeKeys.contains(headerName)){
errorWhenNotUniqueColumn(groupSizeIndex, headerName);
groupSizeIndex = index;
}
else if(stridesKeys.contains(columns[index])) {
else if(stridesKeys.contains(headerName)) {
errorWhenNotUniqueColumn(stridesIndex, headerName);
stridesIndex = index;
}
else if(simTimeKeys.contains(columns[index])) {
simTimeIndex = index;
}
}
try {
if (pedIdIndex != -1 && xIndex != -1 && yIndex != -1 && stepIndex != -1) {
// load default values with no groups
return true;
} else {
return false;
}
} catch (Exception e) {
logger.warn("could not read trajectory file. The file format might not be compatible or it is missing.");
throw e;
}
if (! (pedIdIndex != notSetColumnIndexIdentifier && xIndex != notSetColumnIndexIdentifier &&
yIndex != notSetColumnIndexIdentifier && stepIndex != notSetColumnIndexIdentifier)) {
// load default values with no groups
throw new IOException(String.format("All columns with " + notSetColumnIndexIdentifier + " value could " +
"not be found in the trajectory file pedIdIndex=%d, x-values=%d, y-values=%d, step " +
"values=%d", pedIdIndex, xIndex, yIndex, stepIndex));
}
}
private Map<Step, List<Agent>> readStandardTrajectoryFile() throws IOException {
try (BufferedReader in = IOUtils.defaultBufferedReader(this.trajectoryFilePath)) {
return in.lines() // a stream of lines
.skip(1) // skip the first line i.e. the header
.map(line -> split(line)) // split the line into string tokens
.map(line -> split(line)) // split the line into string tokens
.map(rowTokens -> parseRowTokens(rowTokens)) // transform those tokens into a pair of java objects (step, agent)
.collect(Collectors.groupingBy(Pair::getKey, // group all agent objects by the step.
Collectors.mapping(Pair::getValue, Collectors.toList())));
} catch (Exception e){
logger.warn("could not read trajectory file. The file format might not be compatible or it is missing.");
logger.warn("Could not read trajectory file. The file format might not be compatible or it is missing.");
throw e;
}
}
......
......@@ -7,6 +7,7 @@ import java.io.File;
import java.util.ArrayList;
import java.util.List;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.vadere.simulator.models.MainModel;
import org.vadere.simulator.models.MainModelBuilder;
......@@ -68,13 +69,13 @@ public class TestOutputFile {
List<String> header = testScenario.getDataProcessingJsonManager().getOutputFiles().get(0).getEntireHeader();
//Note these fail if the name conflict is handled differently, for now hard coded.
assertTrue(header.contains("timeStep"));
assertTrue(header.contains("pedestrianId"));
assertTrue(header.contains("x-Proc1"));
assertTrue(header.contains("y-Proc1"));
assertTrue(header.contains("x-Proc2"));
assertTrue(header.contains("y-Proc2"));
assertTrue(header.contains(OutputFile.addHeaderProcInfo("x", 1)));
assertTrue(header.contains(OutputFile.addHeaderProcInfo("y", 1)));
assertTrue(header.contains(OutputFile.addHeaderProcInfo("x", 2)));
assertTrue(header.contains(OutputFile.addHeaderProcInfo("y", 2)));
}
......
package org.vadere.simulator.projects.dataprocessing.processor;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
public class PedestrianLastPositionProcessorTest extends ProcessorTest {
......
......@@ -8,7 +8,6 @@ import org.vadere.simulator.projects.dataprocessing.outputfile.OutputFile;
import org.vadere.simulator.utils.reflection.ReflectionHelper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Base Test for all Processors.
......@@ -51,10 +50,14 @@ public abstract class ProcessorTest {
}
int l = processorTestEnv.getSimStates().size();
p.postLoop(processorTestEnv.getSimStates().get(l - 1));
processorTestEnv.getOutputFile().write();
OutputFile outputFile = processorTestEnv.getOutputFile();
outputFile.write();
// NOTE: these are the column names that have the additional information of the data processor ID.
assertEquals(processorTestEnv.getHeader(), outputFile.getHeaderLine());
String header = String.join(processorTestEnv.getDelimiter(), p.getHeaders());
assertTrue(processorTestEnv.getHeader().contains(header));
if (header.equals("")){
assertEquals(processorTestEnv.getExpectedOutputAsList(), processorTestEnv.getOutput(0));
} else {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment