Notice to GitKraken users: A vulnerability has been found in the SSH key generation of GitKraken versions 7.6.0 to 8.0.0 (https://www.gitkraken.com/blog/weak-ssh-key-fix). If you use GitKraken and have generated a SSH key using one of these versions, please remove it both from your local workstation and from your LRZ GitLab profile.

21.10.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit fd7953ae authored by Benedikt Zoennchen's avatar Benedikt Zoennchen
Browse files

Merge branch 'issue#242' into 'master'

fix issue #242.

Closes #242

See merge request !66
parents a303cac0 ea3b983c
Pipeline #122557 passed with stages
in 114 minutes and 28 seconds
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Note\n",
"You can find the required data on the Nextcloud data/Paperdaten/2019/TGF2019-vadere"
]
},
{
"cell_type": "code",
"execution_count": null,
......@@ -16,7 +24,11 @@
"import matplotlib.pyplot as plt\n",
"from matplotlib.lines import Line2D\n",
"import seaborn as sns\n",
"sns.set(style=\"ticks\")\n",
"import functools\n",
"import operator\n",
"sns.set_context(\"poster\")\n",
"sns.set(style=\"whitegrid\", font_scale=1.8)\n",
"\n",
"\n",
"from IPython.core.display import display, HTML\n",
"display(HTML('<style>.container { width:100% !important; }</style>'))"
......@@ -58,19 +70,16 @@
"metadata": {},
"outputs": [],
"source": [
"file = \"./data/TrajectoryMetric/trajectories_simulation.txt\"\n",
"f = open(file, \"r\")\n",
"header = f.readline();\n",
"trajectories = dict({});\n",
"\n",
"for row in f:\n",
" s = row.split(\" \");\n",
" pedId = int(s[0]);\n",
" footsteps = json.loads(s[1]);\n",
" trajectories[pedId] = footsteps[0]['footSteps'];\n",
"\n",
"ptrajectories = trajectories_to_dataframe(trajectories)\n",
"ptrajectories.head()"
"def load_simulation_data(file):\n",
" f = open(file, \"r\")\n",
" header = f.readline();\n",
" trajectories = dict({});\n",
" for row in f:\n",
" s = row.split(\" \");\n",
" pedId = int(s[0]);\n",
" footsteps = json.loads(s[1]);\n",
" trajectories[pedId] = footsteps[0]['footSteps']; \n",
" return trajectories_to_dataframe(trajectories)"
]
},
{
......@@ -102,8 +111,8 @@
" lastX = None\n",
" lastY = None\n",
" for row in data.itertuples():\n",
" endX = row.x / 100 + 18.7\n",
" endY = row.y / 100 + 4.2\n",
" endX = row.x / 100.0 + 18.7\n",
" endY = row.y / 100.0 + 4.2\n",
" startTime = row.timeStep / fps - 1/fps\n",
" endTime = row.timeStep / fps\n",
" if last_ped_id is None or last_ped_id != row.pedestrianId:\n",
......@@ -112,15 +121,15 @@
" distance = np.nan\n",
" velocity = np.nan\n",
" else:\n",
" startX = lastX / 100 + 18.7\n",
" startY = lastY / 100 + 4.2\n",
" startX = lastX / 100.0 + 18.7\n",
" startY = lastY / 100.0 + 4.2\n",
" distance = np.sqrt(np.square(endX - startX) + np.square(endY - startY))\n",
" velocity = distance / (endTime - startTime)\n",
" rows.append([row.pedestrianId, startX, startY, endX, endY, startTime, endTime, distance, velocity])\n",
" last_ped_id = row.pedestrianId\n",
" lastX = row.x\n",
" lastY = row.y\n",
" \n",
" rows.append([row.pedestrianId, startX, startY, endX, endY, startTime, endTime, distance, velocity])\n",
" dataframe = pd.DataFrame(rows, columns=['pedestrianId', 'startX', 'startY', 'endX', 'endY','startTime','endTime','distance','velocity'])\n",
" return dataframe\n",
" \n",
......@@ -146,44 +155,13 @@
" return trajectories"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#times = np.linspace(4,10,10)\n",
"#euclid_d(get_trajectory(1), get_trajectory(1), times)\n",
"#to_trajectories(load_experiment(real_file))[1]\n",
"\n",
"real_file = \"./data/TrajectoryMetric/KO/ko-240-120-240/ko-240-120-240_combined_MB.txt\"\n",
"trajectoriesReal = load_experiment(real_file)\n",
"#trajectoriesReal = to_trajectories(data)\n",
"trajectoriesReal.query('pedestrianId == 1').head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Convert DataFrame to postvis DataFrame"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def to_postVis(df):\n",
" simTimeStep = 0.4\n",
" fps = 16\n",
" df['timeStep'] = np.ceil(df['endTime'] / (1/fps)).astype(np.int)\n",
" df['x'] = df['endX']\n",
" df['y'] = df['endY']\n",
" df['simTime'] = df['endTime']\n",
" df = df.drop(columns=['startX','startY','endX','endY','startTime', 'endTime']) \n",
" return df"
"# Load all data\n",
"\n",
"The following code loads the experiment data as well as the simulated data and transforms everything into the same format (data frame). The simulated trajectories are cut with respect to the camera bounds of the experiment."
]
},
{
......@@ -192,29 +170,86 @@
"metadata": {},
"outputs": [],
"source": [
"to_postVis(trajectoriesReal).to_csv('expteriment_2.trajectories',index=False,sep=' ')\n",
"to_postVis(trajectoriesReal).head(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Calculate evacution time"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Evacuation time = endTime - startTime"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Real data"
"#times = np.linspace(4,10,10)\n",
"#euclid_d(get_trajectory(1), get_trajectory(1), times)\n",
"#to_trajectories(load_experiment(real_file))[1]\n",
"\n",
"trajectories240o050o240 = load_experiment(\"./T-junction-experiment-trajectories-files/KO/ko-240-050-240/ko-240-050-240_combined_MB.txt\")\n",
"trajectories240o060o240 = load_experiment(\"./T-junction-experiment-trajectories-files/KO/ko-240-060-240/ko-240-060-240_combined_MB.txt\")\n",
"trajectories240o080o240 = load_experiment(\"./T-junction-experiment-trajectories-files/KO/ko-240-080-240/ko-240-080-240_combined_MB.txt\")\n",
"trajectories240o100o240 = load_experiment(\"./T-junction-experiment-trajectories-files/KO/ko-240-100-240/ko-240-100-240_combined_MB.txt\")\n",
"trajectories240o120o240 = load_experiment(\"./T-junction-experiment-trajectories-files/KO/ko-240-120-240/ko-240-120-240_combined_MB.txt\")\n",
"trajectories240o150o240 = load_experiment(\"./T-junction-experiment-trajectories-files/KO/ko-240-150-240/ko-240-150-240_combined_MB.txt\")\n",
"trajectories240o240o240 = load_experiment(\"./T-junction-experiment-trajectories-files/KO/ko-240-240-240/ko-240-240-240_combined_MB.txt\")\n",
"\n",
"trajectoriesReal = pd.concat([trajectories240o050o240, trajectories240o060o240, trajectories240o080o240, \n",
" trajectories240o100o240, trajectories240o120o240, trajectories240o150o240, \n",
" trajectories240o240o240], ignore_index=True)\n",
"\n",
"# trajectories starting from left\n",
"cut_minX = trajectoriesReal[trajectoriesReal[\"endX\"] < 15].groupby([\"pedestrianId\"])[\"startX\"].min().max() + 0.12\n",
"\n",
"# trajectories starting from right\n",
"cut_maxX = trajectoriesReal[trajectoriesReal[\"endX\"] > 21].groupby([\"pedestrianId\"])[\"startX\"].max().min() - 0.2\n",
"\n",
"# trajectories ending at top\n",
"cut_maxY = trajectoriesReal.groupby([\"pedestrianId\"])[\"endY\"].max().min() - 0.168\n",
"\n",
"trajectories240o050o240 = cut(trajectories240o050o240, cut_minX, cut_maxX, cut_maxY)\n",
"trajectories240o060o240 = cut(trajectories240o060o240, cut_minX, cut_maxX, cut_maxY)\n",
"trajectories240o080o240 = cut(trajectories240o080o240, cut_minX, cut_maxX, cut_maxY)\n",
"trajectories240o100o240 = cut(trajectories240o100o240, cut_minX, cut_maxX, cut_maxY)\n",
"trajectories240o120o240 = cut(trajectories240o120o240, cut_minX, cut_maxX, cut_maxY)\n",
"trajectories240o150o240 = cut(trajectories240o150o240, cut_minX, cut_maxX, cut_maxY)\n",
"trajectories240o240o240 = cut(trajectories240o240o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"osm_trajectories240o050o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-OSM-240-050-240.txt\")\n",
"osm_trajectories240o050o240 = cut(osm_trajectories240o050o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"osm_trajectories240o060o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-OSM-240-060-240.txt\")\n",
"osm_trajectories240o060o240 = cut(osm_trajectories240o060o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"osm_trajectories240o080o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-OSM-240-080-240.txt\")\n",
"osm_trajectories240o080o240 = cut(osm_trajectories240o080o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"osm_trajectories240o100o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-OSM-240-100-240.txt\")\n",
"osm_trajectories240o100o240 = cut(osm_trajectories240o100o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"osm_trajectories240o120o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-OSM-240-120-240.txt\")\n",
"osm_trajectories240o120o240 = cut(osm_trajectories240o120o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"osm_trajectories240o150o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-OSM-240-150-240.txt\")\n",
"osm_trajectories240o150o240 = cut(osm_trajectories240o150o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"osm_trajectories240o240o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-OSM-240-240-240.txt\")\n",
"osm_trajectories240o240o240 = cut(osm_trajectories240o240o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"trajectoriesOSM = pd.concat([osm_trajectories240o050o240, osm_trajectories240o060o240, osm_trajectories240o080o240, osm_trajectories240o100o240, osm_trajectories240o120o240, osm_trajectories240o150o240, osm_trajectories240o240o240], ignore_index=True)\n",
"\n",
"bhm_trajectories240o050o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-BHM-240-050-240.txt\")\n",
"bhm_trajectories240o050o240 = cut(bhm_trajectories240o050o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"bhm_trajectories240o060o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-BHM-240-060-240.txt\")\n",
"bhm_trajectories240o060o240 = cut(bhm_trajectories240o060o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"bhm_trajectories240o080o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-BHM-240-080-240.txt\")\n",
"bhm_trajectories240o080o240 = cut(bhm_trajectories240o080o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"bhm_trajectories240o100o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-BHM-240-100-240.txt\")\n",
"bhm_trajectories240o100o240 = cut(bhm_trajectories240o100o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"bhm_trajectories240o120o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-BHM-240-120-240.txt\")\n",
"bhm_trajectories240o120o240 = cut(bhm_trajectories240o120o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"bhm_trajectories240o150o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-BHM-240-150-240.txt\")\n",
"bhm_trajectories240o150o240 = cut(bhm_trajectories240o150o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"bhm_trajectories240o240o240 = load_simulation_data(\"./T-junction-sim-trajectory-files/trajectories-BHM-240-240-240.txt\")\n",
"bhm_trajectories240o240o240 = cut(bhm_trajectories240o240o240, cut_minX, cut_maxX, cut_maxY)\n",
"\n",
"trajectoriesBHM = pd.concat([bhm_trajectories240o050o240, bhm_trajectories240o060o240, bhm_trajectories240o080o240, \n",
" bhm_trajectories240o100o240, bhm_trajectories240o120o240, bhm_trajectories240o150o240, \n",
" bhm_trajectories240o240o240], ignore_index=True)"
]
},
{
......@@ -226,35 +261,39 @@
"# Sum up all measured time deltas of a pedestrian to get the final evacuation time\n",
"copy = trajectoriesReal.copy(deep=True)\n",
"copy[\"timeDelta\"] = copy[\"endTime\"] - copy[\"startTime\"]\n",
"evacuation_time = copy.groupby([\"pedestrianId\"])[\"timeDelta\"].sum()\n",
"\n",
"# trajectories starting from left\n",
"cut_minX = trajectoriesReal[trajectoriesReal[\"endX\"] < 15].groupby([\"pedestrianId\"])[\"endX\"].min().max()\n",
"\n",
"# trajectories starting from right\n",
"cut_maxX = trajectoriesReal[trajectoriesReal[\"endX\"] > 21].groupby([\"pedestrianId\"])[\"endX\"].max().min()\n",
"cut_maxY = trajectoriesReal.groupby([\"pedestrianId\"])[\"endY\"].max().min()\n",
"\n",
"et = copy.groupby([\"pedestrianId\"])[\"timeDelta\"].sum()\n",
"print(\"Evacuation time (real data)\")\n",
"print(\"- mean: {:.2f} [s]\".format(evacuation_time.mean()))\n",
"print(\"- std: {:.2f} [s]\".format(evacuation_time.std()))\n",
"print(\"- min: {:.2f} [s]\".format(evacuation_time.min()))\n",
"print(\"- max: {:.2f} [s]\".format(evacuation_time.max()))\n",
"print(\"- mean: {:.2f} [s]\".format(et.mean()))\n",
"print(\"- std: {:.2f} [s]\".format(et.std()))\n",
"print(\"- min: {:.2f} [s]\".format(et.min()))\n",
"print(\"- max: {:.2f} [s]\".format(et.max()))\n",
"print(\"- minX: {:.2f} [m]\".format(cut_minX))\n",
"print(\"- maxX: {:.2f} [m]\".format(cut_maxX))\n",
"print(\"- maxY: {:.2f} [m]\".format(cut_maxY))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Convert DataFrame to postvis DataFrame"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"rightX = trajectoriesReal[trajectoriesReal.endX < 16].groupby([\"pedestrianId\"])[\"endX\"].min().max()\n",
"leftX = trajectoriesReal[trajectoriesReal.endX > 16].groupby([\"pedestrianId\"])[\"endX\"].max().min()\n",
"topY = trajectoriesReal.groupby([\"pedestrianId\"])[\"endY\"].max().min()\n",
"topY"
"def to_postVis(df):\n",
" simTimeStep = 0.4\n",
" fps = 16\n",
" df['timeStep'] = np.ceil(df['endTime'] / (1/fps)).astype(np.int)\n",
" df['x'] = df['endX']\n",
" df['y'] = df['endY']\n",
" df['simTime'] = df['endTime']\n",
" df = df.drop(columns=['startX','startY','endX','endY','startTime', 'endTime']) \n",
" return df"
]
},
{
......@@ -290,7 +329,7 @@
" return trajectories.query(query)\n",
"\n",
"def get_trajectories(t, trajectories):\n",
" return trajectories[np.logical_and(trajectories.startTime <= t, trajectories.endTime >= t)]\n",
" return trajectories[np.logical_and(trajectories.startTime <= t, trajectories.endTime > t)]\n",
"\n",
"def get_pedestrianIds(trajectories):\n",
" return trajectories['pedestrianId'].unique()\n",
......@@ -346,6 +385,11 @@
" #assert (i1 is np.nan and i2 is not np.nan) or (i1 is not np.nan and i2 is np.nan)\n",
" y = ymax if i2 is np.nan or (i1 is not np.nan and i1 < i2) else ymin\n",
" i = i1 if y == ymax else i2\n",
" if i is np.nan:\n",
" print(\"i1:\"+str(i1))\n",
" print(\"i2:\"+str(i2))\n",
" \n",
" assert i is not np.nan\n",
" #print(i)\n",
" # cut the footstep at the tail to exactly fit xmin or xmax\n",
" fs = trajectory.loc[i]\n",
......@@ -372,12 +416,12 @@
"\n",
"def cuttail_trajectory_by(trajectory, xmin, xmax):\n",
" #i1 = trajectory[np.logical_and(trajectory.endX >= xmax, trajectory.startX < xmax)].index.max()\n",
" i1 = trajectory[np.logical_or(trajectory.endX >= xmax, trajectory.startX is np.nan)].index.max()\n",
" i2 = trajectory[np.logical_or(trajectory.endX <= xmin, trajectory.startX is np.nan)].index.max()\n",
" i1 = trajectory[trajectory.startX >= xmax].index.max()\n",
" i2 = trajectory[trajectory.startX <= xmin].index.max()\n",
" #assert (i1 is np.nan and i2 is not np.nan) or (i1 is not np.nan and i2 is np.nan)\n",
" x = xmax if i2 is np.nan or (i1 is not np.nan and i1 > i2) else xmin\n",
" i = i1 if x == xmax else i2\n",
" i = i+1\n",
" assert i is not np.nan\n",
" # cut the footstep at the tail to exactly fit xmin or xmax\n",
" fs = trajectory.loc[i]\n",
" start = np.array([fs[\"startX\"], fs[\"startY\"]])\n",
......@@ -388,21 +432,16 @@
" velocity = fs[\"velocity\"]\n",
" d = end - start\n",
" if abs(fs[\"endX\"] - fs[\"startX\"]) > 0.00001:\n",
" r = (x - fs[\"startX\"]) / (fs[\"endX\"] - fs[\"startX\"])\n",
" end = start + (d * r)\n",
" r = (fs[\"endX\"] - x) / (fs[\"endX\"] - fs[\"startX\"])\n",
" start = end - (d * r)\n",
" time = fs[\"endTime\"] - fs[\"startTime\"]\n",
" endTime = fs[\"startTime\"] + (time * r)\n",
" startTime = fs[\"endTime\"] - (time * r)\n",
" distance = np.linalg.norm(end - start)\n",
" if abs(endTime - startTime) < 0.00001:\n",
" if distance < 0.00001:\n",
" velocity = 0\n",
" else:\n",
" raise exception\n",
" else:\n",
" velocity = distance / (endTime - startTime)\n",
" velocity = distance / (endTime - startTime)\n",
" assert startTime <= endTime\n",
" \n",
" df = trajectory.loc[i+1:]\n",
" llist = [[fs[\"pedestrianId\"],fs[\"startX\"],fs[\"startY\"],fs[\"startTime\"],end[0],end[1],endTime,distance,velocity]]\n",
" llist = [[fs[\"pedestrianId\"],start[0],start[1],startTime,fs[\"endX\"],fs[\"endY\"],fs[\"endTime\"],distance,velocity]]\n",
" df_tail = pd.DataFrame(llist, columns=['pedestrianId','startX','startY','startTime','endX','endY','endTime','distance','velocity'])\n",
" df_tail = df_tail.append(df, ignore_index=True)\n",
" return df_tail\n",
......@@ -421,20 +460,10 @@
" df = df.append(cuttail_trajectory_by(get_trajectory(pedId, trajectories), xmin, xmax), ignore_index=True)\n",
" return df\n",
"\n",
"def cut(trajectories):\n",
"def cut(trajectories, cut_minX, cut_maxX, cut_maxY):\n",
" df = cuttail_by(trajectories, cut_minX, cut_maxX)\n",
" df = cuthead_by(df, -1000, cut_maxY)\n",
" return df\n",
"\n",
"traj = get_trajectory(2, trajectoriesReal)\n",
"ts = traj[traj.endX > 22].index.max()\n",
"ts\n",
"#cut(trajectoriesReal)\n",
"cuttail_by(trajectoriesReal, cut_minX, cut_maxX).head()\n",
"#cuthead_by(trajectoriesReal, 0, 4).tail()\n",
"#traj.loc[100]\n",
"#trajectoriesReal.tail()\n",
"trajectoriesReal.head()"
" return df"
]
},
{
......@@ -463,6 +492,20 @@
"def mean_velocity_at(t, trajectories):\n",
" return get_trajectories(t, trajectories)['velocity'].mean()\n",
"\n",
"def evacuation_times(trajectories):\n",
" pedIds = get_pedestrianIds(trajectories)\n",
" rows = []\n",
" for pedId in pedIds:\n",
" evacTime = evacuation_time(pedId, trajectories)\n",
" rows.append([pedId, evacTime])\n",
" return pd.DataFrame(rows, columns=['pedestrianId', 'evacuationTime'])\n",
" \n",
"def evacuation_time(pedId, trajectories):\n",
" traj = get_trajectory(pedId, trajectories)\n",
" start = traj.iloc[0]['endTime']\n",
" end = traj.iloc[len(traj)-1]['endTime']\n",
" return end - start\n",
"\n",
"def trajectory_length(trajectory):\n",
" \"\"\"Euclidean length of a trajectory.\"\"\"\n",
" dx = trajectory['startX']-trajectory['endX']\n",
......@@ -602,26 +645,16 @@
"metadata": {},
"outputs": [],
"source": [
"#start_time(get_trajectory(1, ptrajectories))\n",
"#max_start_time(ptrajectories)\n",
"#end_time(get_trajectory(1, ptrajectories))\n",
"#foot_step_length(get_footstep(get_trajectory(1, ptrajectories), 0))\n",
"#trajectory_length(get_trajectory(1, ptrajectories))\n",
"#trajectory_speed(get_trajectory(1, ptrajectories))\n",
"#cutTraj.mask(cutTraj['startTime'] <= 4 and 4 > cutTraj['endTime'])\n",
"#start_time(get_trajectory(1, ptrajectories))\n",
"#trajectories_position(ptrajectories, [1,2,3,4]).head()\n",
"trajPos1 = trajectories_position(get_trajectory(2, ptrajectories), [1,2,3,4,5,6,8,9,10,11,12,13])\n",
"trajPos2 = trajectories_position(get_trajectory(7, ptrajectories), [1,2,3,4,5,6,8,9,10,11,12,13])\n",
"trajPos1 = trajectories_position(get_trajectory(2, osm_trajectories240o120o240), [1,2,3,4,5,6,8,9,10,11,12,13])\n",
"trajPos2 = trajectories_position(get_trajectory(7, osm_trajectories240o120o240), [1,2,3,4,5,6,8,9,10,11,12,13])\n",
"trajPos1 = trajPos1[~np.isnan(trajPos1.x)]\n",
"trajPos2 = trajPos2[~np.isnan(trajPos2.x)]\n",
"euclid_path_length(trajPos1, trajPos2)\n",
"euclid_len(ptrajectories,0,10000)\n",
"#print(total_inter_agent(ptrajectories, ptrajectories, [1,2]))\n",
"euclid_len(osm_trajectories240o120o240,0,10000)\n",
"t = 0.5\n",
"ttraj = ptrajectories[np.logical_and(ptrajectories.startTime <= t, ptrajectories.endTime >= t)]\n",
"#ptrajectories[\"velocity\"] = numpy.linalg.norm(\n",
"get_trajectories(0.5, ptrajectories).head()"
"ttraj = osm_trajectories240o120o240[np.logical_and(osm_trajectories240o120o240.startTime <= t, osm_trajectories240o120o240.endTime >= t)]\n",
"get_trajectories(0.5, osm_trajectories240o120o240).head()\n",
"#osm_trajectories240o120o240"
]
},
{
......@@ -655,23 +688,6 @@
" return match"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we cut all trajectory data such each left trajectory starts at the same $x$-coordinate and each right trajectory starts at the same $x$-coordinate. In addition each trajectory ends a the same $y$-coordinate."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"c_real_trajectories = cut(trajectoriesReal)\n",
"c_sim_trajecotories = cut(ptrajectories)"
]
},
{
"cell_type": "markdown",
"metadata": {},
......@@ -696,39 +712,401 @@
" c = current_palette[2]\n",
" else:\n",
" c = current_palette[0]\n",
" return x, y, Line2D(x, y, color=c, linewidth=0.3)\n",
" return x, y, Line2D(x, y, color=c, linewidth=0.4)\n",
"\n",
"def add_lines(trajectories, xleft, ax):\n",
" grouped = trajectories.groupby(['pedestrianId'])\n",
" for name, group in grouped:\n",
" x, y, line = to_line(group, xleft)\n",
" ax.add_line(line)"
" ax.add_line(line)\n",
" \n",
"def contains(x,y,rect):\n",
" #ma = mpl.patches.Rectangle((16.3,6.0), 2.4, 2.0)\n",
" return x >= rect.get_x() and y >= rect.get_y() and x <= rect.get_x() + rect.get_width() and y <= rect.get_y() + rect.get_height()\n",
"\n",
"def filter_by_time_and_place(t, rect, trajectories):\n",
" \"\"\"returns a subset of trajectories i.e. at most one footstep for each pedestrian / agent such that the footstep the position (x,y) is the position of the\n",
" agent at the time t contained in the rectanlge rect. Two new colums will be added for x and y.\"\"\"\n",
" traj = get_trajectories(t, trajectories)\n",
" #TODO: this is very very memory expensive!\n",
" traj.loc[:,'x'] = traj.loc[:,'startX'] + (traj.loc[:,'endX'] - traj.loc[:,'startX']) * (t - traj.loc[:,'startTime']) / (traj.loc[:,'endTime'] - traj.loc[:,'startTime'])\n",
" traj.loc[:,'y'] = traj.loc[:,'startY'] + (traj.loc[:,'endY'] - traj.loc[:,'startY']) * (t - traj.loc[:,'startTime']) / (traj.loc[:,'endTime'] - traj.loc[:,'startTime'])\n",
" #traj.loc[:,'x'] = traj.loc[:,'startX']\n",
" #traj.loc[:,'y'] = traj.loc[:,'startY']\n",
" traj = traj[traj.apply(lambda x: contains(x['x'], x['y'],rect), axis=1)]\n",
" return traj\n",
"\n",
"def density_velocity(t, rect, trajectories):\n",
" area = rect.get_width() * rect.get_height()\n",
" traj = filter_by_time_and_place(t, rect, trajectories)\n",
" meanVelocity = traj.loc[:,'velocity'].mean();\n",
" number_of_peds = len(traj)\n",
" traj = None\n",
" #gc.collect()\n",
" if number_of_peds == 0:\n",
" return np.nan, np.nan\n",
" else:\n",
" return number_of_peds / area, meanVelocity\n",
"\n",
"def density(t, rect, trajectories):\n",
" area = rect.get_width() * rect.get_height()\n",
" traj = filter_by_time_and_place(t, rect, trajectories)\n",
" number_of_peds = len(traj)\n",
" traj = None\n",
" #gc.collect()\n",
" if number_of_peds == 0:\n",
" return np.nan\n",
" else:\n",
" return number_of_peds / area"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"x_vcenter = 17.5\n",
"y_vcenter = 5.2\n",
"\n",
"fig_trajectories = plt.figure(figsize=(10,10))\n",
"ax1_trajectories = fig_trajectories.add_subplot(121)\n",
"add_lines(c_real_trajectories, 16, ax1_trajectories)\n",
"ax1_trajectories.set_xlim(x_vcenter-5, x_vcenter+6)\n",
"ax1_trajectories.set_ylim(y_vcenter-4, y_vcenter+4)\n",
"xmin = x_vcenter-4\n",
"xmax = x_vcenter+5\n",
"ymin = y_vcenter-3.2\n",
"ymax = y_vcenter+3\n",
"\n",
"alp = 0.549020\n",
"measurementArea_front = mpl.patches.Rectangle((16.3,6.0), 2.4, 2.0, color='r', alpha = alp)\n",
"measurementArea_left = mpl.patches.Rectangle((14.2,1.8), 2.0, 2.4, color='r', alpha = alp)\n",
"measurementArea_right = mpl.patches.Rectangle((19.7,1.8), 2.0, 2.4, color='r', alpha = alp)\n",
"\n",
"fig_trajectories = plt.figure(figsize=(20,20))\n",
"ax1_trajectories = fig_trajectories.add_subplot(131)\n",
"ax1_trajectories.add_patch(measurementArea_left)\n",
"ax1_trajectories.add_patch(measurementArea_right)\n",
"ax1_trajectories.add_patch(measurementArea_front)\n",
"add_lines(trajectories240o150o240, 16, ax1_trajectories)\n",
"ax1_trajectories.set_title(\"Experiment\")\n",
"ax1_trajectories.set_xlim(xmin, xmax)\n",
"ax1_trajectories.set_ylim(ymin, ymax)\n",
"ax1_trajectories.set_aspect(1)\n",
"\n",
"ax2_trajectories = fig_trajectories.add_subplot(122, sharey=ax1)\n",
"add_lines(c_sim_trajecotories, 16, ax2_trajectories)\n",
"ax2_trajectories = fig_trajectories.add_subplot(132, sharey=ax1_trajectories)\n",
"ax2_trajectories.add_patch(mpl.patches.Rectangle((16.3,6.0), 2.4, 2.0, color='r', alpha = alp))\n",
"ax2_trajectories.add_patch(mpl.patches.Rectangle((14.2,1.8), 2.0, 2.4, color='r', alpha = alp))\n",
"ax2_trajectories.add_patch(mpl.patches.Rectangle((19.7,1.8), 2.0, 2.4, color='r', alpha = alp))\n",
"add_lines(osm_trajectories240o150o240, 16, ax2_trajectories)\n",
"plt.setp(ax2_trajectories.get_yticklabels(), visible=False)\n",
"ax2_trajectories.set_xlim(x_vcenter-5, x_vcenter+6)\n",
"ax2_trajectories.set_ylim(y_vcenter-4, y_vcenter+4)\n",
"ax2_trajectories.set_title(\"OSM\")\n",
"ax2_trajectories.set_xlim(xmin, xmax)\n",
"ax2_trajectories.set_ylim(ymin, ymax)\n",
"ax2_trajectories.set_aspect(1)\n",
"\n",
"plt.show()"
"ax3_trajectories = fig_trajectories.add_subplot(133, sharey=ax2_trajectories)\n",
"ax3_trajectories.add_patch(mpl.patches.Rectangle((16.3,6.0), 2.4, 2.0, color='r', alpha = alp))\n",
"ax3_trajectories.add_patch(mpl.patches.Rectangle((14.2,1.8), 2.0, 2.4, color='r', alpha = alp))\n",
"ax3_trajectories.add_patch(mpl.patches.Rectangle((19.7,1.8), 2.0, 2.4, color='r', alpha = alp))\n",
"add_lines(bhm_trajectories240o150o240, 16, ax3_trajectories)\n",
"plt.setp(ax3_trajectories.get_yticklabels(), visible=False)\n",
"ax3_trajectories.set_title(\"BHM\")\n",
"ax3_trajectories.set_xlim(xmin, xmax)\n",
"ax3_trajectories.set_ylim(ymin, ymax)\n",
"ax3_trajectories.set_aspect(1)\n",
"plt.savefig('./trajectories.pdf', bbox_inches='tight')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Computation of evacuation times"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#240-050-240\n",
"evacTime240o050o240 = evacuation_times(trajectories240o050o240)\n",
"evacTime240o050o240['scenario'] = '$b_{in} = 50$'\n",
"evacTime240o050o240['model'] = 'Experiment'\n",
"\n",
"osm_evacTime240o050o240 = evacuation_times(osm_trajectories240o050o240)\n",
"osm_evacTime240o050o240['scenario'] = '$b_{in} = 50$'\n",
"osm_evacTime240o050o240['model'] = 'OSM'\n",
"\n",
"bhm_evacTime240o050o240 = evacuation_times(bhm_trajectories240o050o240)\n",
"bhm_evacTime240o050o240['scenario'] = '$b_{in} = 50$'\n",
"bhm_evacTime240o050o240['model'] = 'BHM'\n",
"\n",
"#240-060-240\n",
"evacTime240o060o240 = evacuation_times(trajectories240o060o240)\n",
"evacTime240o060o240['scenario'] = '$b_{in} = 60$'\n",
"evacTime240o060o240['model'] = 'Experiment'\n",
"\n",
"osm_evacTime240o060o240 = evacuation_times(osm_trajectories240o060o240)\n",
"osm_evacTime240o060o240['scenario'] = '$b_{in} = 60$'\n",
"osm_evacTime240o060o240['model'] = 'OSM'\n",
"\n",
"bhm_evacTime240o060o240 = evacuation_times(bhm_trajectories240o060o240)\n",
"bhm_evacTime240o060o240['scenario'] = '$b_{in} = 60$'\n",
"bhm_evacTime240o060o240['model'] = 'BHM'\n",
"\n",
"#240-080-240\n",
"evacTime240o080o240 = evacuation_times(trajectories240o080o240)\n",
"evacTime240o080o240['scenario'] = '$b_{in} = 80$'\n",
"evacTime240o080o240['model'] = 'Experiment'\n",
"\n",
"osm_evacTime240o080o240 = evacuation_times(osm_trajectories240o080o240)\n",
"osm_evacTime240o080o240['scenario'] = '$b_{in} = 80$'\n",
"osm_evacTime240o080o240['model'] = 'OSM'\n",
"\n",
"bhm_evacTime240o080o240 = evacuation_times(bhm_trajectories240o080o240)\n",
"bhm_evacTime240o080o240['scenario'] = '$b_{in} = 80$'\n",
"bhm_evacTime240o080o240['model'] = 'BHM'\n",
"\n",