diff --git a/Pipfile b/Pipfile index b723d01..d6d268f 100644 --- a/Pipfile +++ b/Pipfile @@ -6,6 +6,14 @@ verify_ssl = true [dev-packages] [packages] +pandas = "*" +numpy = "*" +ipython = "*" +altair = "*" +vegascope = "*" +jupyter = "*" +pyqt5 = "*" +vega = "*" [requires] python_version = "3.7" diff --git a/benchmark-idcbs-hour-long.ipynb b/benchmark-idcbs-hour-long.ipynb new file mode 100644 index 0000000..56fb3fe --- /dev/null +++ b/benchmark-idcbs-hour-long.ipynb @@ -0,0 +1,1149 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "import altair as alt\n", + "from altair import datum, expr\n", + "alt.data_transformers.enable('json')\n", + "alt.data_transformers.disable_max_rows()\n", + "alt.renderers.enable('notebook')\n", + "\n", + "filenames_and_names_tuples = [('benchmark_cbsh_no_lpa_timeout_3600.csv', 'CBS',\n", + " 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair_timeout_3600-in-container.csv', 'IDCBS',\n", + " True, 3600, ),\n", + " ('benchmark_ecbsh_1_01_no_lpa.csv', 'ECBS(1.01)', \n", + " 'benchmark_ecbsh_1_01_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv', 'ECBS(1.01) w/ LCA-jumping',\n", + " 'benchmark_ecbsh_1_05_no_lpa.csv', 'ECBS(1.05)', \n", + " 'benchmark_ecbsh_1_05_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv', 'ECBS(1.05) w/ LCA-jumping', \n", + "# 'benchmark_cbsh_no_lpa.csv', 'CBS',\n", + "# 'benchmark_ecbsh_1_00_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv', 'CBS w/ LCA-jumping', \n", + " True, 60),\n", + " ]\n", + "filenames_and_names_tuple = filenames_and_names_tuples[0]\n", + "\n", + "only_first_scenario = filenames_and_names_tuple[-2]\n", + "data_parts = []\n", + "solver_names = []\n", + "for i in range(int(len(filenames_and_names_tuple) / 2 - 1)):\n", + " data_part = pd.read_csv(filenames_and_names_tuple[2*i],\n", + " na_values=['=NA()'], # Additional #NA values\n", + " )\n", + " data_part.solver = filenames_and_names_tuple[2*i + 1]\n", + " solver_names.append(filenames_and_names_tuple[2*i + 1])\n", + " data_parts.append(data_part)\n", + " \n", + "# cbs = pd.read_csv(filenames_and_names_tuple[0],\n", + "# na_values=['=NA()'], # Additional #NA values\n", + "# )\n", + "# baseline_name = filenames_and_names_tuple[2]\n", + "# cbs.solver = baseline_name\n", + "# idcbs_lpa = pd.read_csv(filenames_and_names_tuple[1], na_values=['=NA()'])\n", + "# improved_name = filenames_and_names_tuple[3]\n", + "# idcbs_lpa.solver = improved_name\n", + "# data = pd.concat([cbs, idcbs_lpa])\n", + "data = pd.concat(data_parts)\n", + "data.Runtime.fillna(data['Wall Runtime'], inplace=True) # When memory runs out we only have the wall runtime (timed externally by the script)\n", + "#data['Max Mem (kB)'].replace(to_replace={'8g': 8000000}, inplace=True)\n", + "data.drop(\n", + " columns=set(data.columns) - {'Runtime', 'instance', 'solver', 'HL Expanded', 'HL Generated',\n", + " 'Num of Agents', 'MDD Time', 'HL Heuristic Time', 'Up&Down Time',\n", + " 'HL runtime', 'LL runtime', 'CAT Time', 'HL Node Verification Time',\n", + " 'Max Mem (kB)', 'Cost'\n", + " }, inplace=True) # Drop columns we aren't using to speed things up later\n", + "data['timeout'] = filenames_and_names_tuple[-1]\n", + "data['mem_usage_cap'] = 8000 # In mB\n", + "map_open_cells_counts = { # From the benchmarks page\n", + " 'ost003d':13214,\n", + " 'den502d':27235,\n", + " 'den520d':28178,\n", + " 'brc202d':43151,\n", + " 'empty-8-8':64,\n", + " 'empty-16-16':256,\n", + " 'empty-32-32':1024,\n", + " 'empty-48-48':2304,\n", + " 'random-32-32-10':922,\n", + " 'random-32-32-20':819,\n", + " 'random-64-64-10':3687,\n", + " 'random-64-64-20':3270,\n", + " 'maze-128-128-1':8191,\n", + " 'maze-128-128-2':10858,\n", + " 'maze-128-128-10':14818,\n", + " 'maze-32-32-2':666,\n", + " 'room-32-32-4':682,\n", + " 'room-64-64-8':3232,\n", + " 'room-64-64-16':3648,\n", + " 'den312d':2445,\n", + " 'orz900d':96603,\n", + " 'ht_chantry':7461,\n", + " 'ht_mansion_n':8959,\n", + " 'lak303d':14784,\n", + " 'lt_gallowstemplar_n':10021,\n", + " 'w_woundedcoast':34020,\n", + " 'Berlin_1_256':47540,\n", + " 'Boston_0_256':47768,\n", + " 'Paris_1_256':47240,\n", + " 'warehouse-10-20-10-2-1':5669,\n", + " 'warehouse-10-20-10-2-2':9776,\n", + " 'warehouse-20-40-10-2-1':22599,\n", + " 'warehouse-20-40-10-2-2':38756,\n", + "}\n", + "map_groups = {\n", + " 'ost003d':'Games',\n", + " 'den502d':'Games',\n", + " 'den520d':'Games',\n", + " 'brc202d':'Games',\n", + " 'empty-8-8':'Empty',\n", + " 'empty-16-16':'Empty',\n", + " 'empty-32-32':'Empty',\n", + " 'empty-48-48':'Empty',\n", + " 'random-32-32-10':'Random',\n", + " 'random-32-32-20':'Random',\n", + " 'random-64-64-10':'Random',\n", + " 'random-64-64-20':'Random',\n", + " 'maze-128-128-1':'Mazes',\n", + " 'maze-128-128-2':'Mazes',\n", + " 'maze-128-128-10':'Mazes',\n", + " 'maze-32-32-2':'Mazes',\n", + " 'room-32-32-4':'Rooms',\n", + " 'room-64-64-8':'Rooms',\n", + " 'room-64-64-16':'Rooms',\n", + " 'den312d':'Games',\n", + " 'orz900d':'Games',\n", + " 'ht_chantry':'Games',\n", + " 'ht_mansion_n':'Games',\n", + " 'lak303d':'Games',\n", + " 'lt_gallowstemplar_n':'Games',\n", + " 'w_woundedcoast':'Games',\n", + " 'Berlin_1_256':'City',\n", + " 'Boston_0_256':'City',\n", + " 'Paris_1_256':'City',\n", + " 'warehouse-10-20-10-2-1':'Warehouse',\n", + " 'warehouse-10-20-10-2-2':'Warehouse',\n", + " 'warehouse-20-40-10-2-1':'Warehouse',\n", + " 'warehouse-20-40-10-2-2':'Warehouse',\n", + "}\n", + "data['open_map_cells'] = data['instance'].apply(lambda x: x.rsplit('-', 2)[0]).str.partition('scen/')[2].map(map_open_cells_counts)\n", + "data['map_group'] = data['instance'].apply(lambda x: x.rsplit('-', 2)[0]).str.partition('scen/')[2].map(map_groups)\n", + "data['mem_usage_per_second'] = data['Max Mem (kB)'] / data['Runtime']\n", + "data.rename(columns={'HL Expanded': 'HL_Expanded'}, inplace=True)\n", + "data.rename(columns={'HL Generated': 'HL_Generated'}, inplace=True)\n", + "data.rename(columns={'Max Mem (kB)': 'Max_Mem'}, inplace=True)\n", + "data.Max_Mem /= 1000_000 # Gigabytes (https://en.wikipedia.org/wiki/Mebibyte)\n", + "\n", + "instance_dropdown = alt.binding_select(options=[None] + list(data.instance.unique()), )\n", + "instance_selection = alt.selection_single(fields=['instance'], bind=instance_dropdown, name='Selected',\n", + "# empty='none', # When enabled the chart is drawn in size 0x0 and isn't updated later\n", + " )\n", + "\n", + "runtime_nearest_x = alt.selection(type='single', nearest=True, on='mouseover',\n", + " fields=['Num of Agents'], empty='none')\n", + "\n", + "\n", + "runtime_chart = alt.Chart().mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('Num of Agents',\n", + " type='ordinal',\n", + " scale=alt.Scale(\n", + "# domain=(2,XXX), Set Values here\n", + " ),\n", + " axis=alt.Axis(titleFontSize=14, \n", + " #labelFontWeight=alt.FontWeight(500)\n", + " #labelFontStyle=alt.FontStyle('bold')\n", + " labelFontSize=14, \n", + " ) # labelFontWeight=400 is normal, 700 is bold, multiples of 100 only\n", + " ),\n", + " alt.Y('Runtime',\n", + " type='quantitative',\n", + " scale=alt.Scale(\n", + "# domain=(0,3650), # Doesn't work well with log scale\n", + " clamp=True,\n", + " type='log',\n", + " ),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Runtime (s)', titleFontSize=14,\n", + " labelFontSize=14\n", + " ), # Force the title not to include \",timeout\" from the other layer\n", + " #impute=alt.ImputeParams(value=36000),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(#title=\"Solver\",\n", + " title='',\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=200,\n", + " symbolStrokeWidth=3, labelLimit=600, orient='bottom',\n", + " offset=0,)))\n", + "# .transform_filter(datum.Runtime<3600)\\\n", + "# .transform_filter(datum.Runtime>=1) Doesn't work well with impute if one of the solvers is above and the other isn't\n", + "\n", + "\n", + "\n", + "runtime_timeout_rule = alt.Chart()\\\n", + " .mark_rule(color='firebrick')\\\n", + " .encode(\n", + " y=alt.Y(\n", + " 'timeout',\n", + " ),\n", + " size=alt.SizeValue(2)\n", + " )\n", + "\n", + "# runtime_timeout_text = alt.Chart()\\\n", + "# .mark_text(\n", + "# align='right',\n", + "# baseline='middle',\n", + "# dx=0,\n", + "# dy=-5, # Slightly above the line\n", + "# size=8,\n", + "# fontWeight=200, # Half of weight of fontStyle='normal'\n", + "# )\\\n", + "# .encode(\n", + "# text=alt.value('timeout'),\n", + "# color=alt.value('black')\n", + "# )\n", + "\n", + "# Transparent selectors across the chart. This is what tells us\n", + "# the x-value of the cursor when it isn't immediately on a line.\n", + "runtime_selectors = alt.Chart().mark_point().encode(\n", + " x='Num of Agents:O',\n", + " opacity=alt.value(0),\n", + ").add_selection( # Make the selection select \n", + " runtime_nearest_x\n", + ")\n", + "\n", + "runtime_nearest_points = runtime_chart.mark_point()\\\n", + " .encode(opacity=alt.condition(runtime_nearest_x, alt.value(1), alt.value(0)),)\\\n", + " .transform_filter(runtime_nearest_x)\n", + "\n", + "runtime_nearest_text = runtime_nearest_points.mark_text(align='left', dx=5, dy=-5).encode(\n", + " text=alt.condition(runtime_nearest_x, 'Runtime:Q', alt.value(''))\n", + ")\n", + "\n", + "runtime_nearest_rule = runtime_nearest_text.mark_rule(color='gray')\\\n", + " .encode(x='Num of Agents:O')\\\n", + " .transform_filter(runtime_nearest_x)\\\n", + " .add_selection(instance_selection) # Add the selection on the latest mark of the data (it may only be added once)\n", + "\n", + "runtime_chart = alt.layer(runtime_chart,\n", + " runtime_timeout_rule,\n", + "# runtime_timeout_text,\n", + " runtime_selectors,\n", + " runtime_nearest_points,\n", + " runtime_nearest_text,\n", + " runtime_nearest_rule,\n", + " data=data)\\\n", + " .transform_filter(instance_selection)\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "\n", + "mem_usage_chart = alt.Chart()\\\n", + " .mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('Num of Agents', type='ordinal',\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14,\n", + " )\n", + " ),\n", + " alt.Y('Max_Mem',\n", + " type='quantitative',\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Memory Usage (GB)',\n", + " titleFontSize=14, \n", + " labelFontSize=14, \n", + " ),\n", + " scale=alt.Scale(type='log',)\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(#title=\"Solver\",\n", + " title='',\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=200,\n", + " symbolStrokeWidth=3, labelLimit=600, orient='bottom',\n", + " offset=0,)))\\\n", + " .transform_filter(instance_selection)\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "\n", + "mem_usage_cap_rule = alt.Chart()\\\n", + " .mark_rule(color='firebrick')\\\n", + " .encode(\n", + " y=alt.Y(\n", + " 'mem_usage_cap',\n", + " type='quantitative',\n", + " ),\n", + " size=alt.SizeValue(2)\n", + " )\n", + "\n", + "mem_usage_chart = alt.layer(mem_usage_chart,\n", + " mem_usage_cap_rule,\n", + " data=data\n", + " )\n", + "\n", + "hl_expanded_chart = alt.Chart(data)\\\n", + " .mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('Num of Agents', type='ordinal',\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14, \n", + " )\n", + " ),\n", + " alt.Y('HL_Expanded',\n", + " type='quantitative',\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='HL Expanded Nodes', titleFontSize=14,\n", + " labelFontSize=14,\n", + " ),\n", + " #scale=alt.Scale(type='log',\n", + " #domain=(0, 1000), clamp=True,\n", + " # ),\n", + " #impute=alt.ImputeParams(value=10000), # There isn't a way in vegalite 3, which altair is based on, to impute with a non-preset function\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(#title='Solver'\n", + " title='', titleFontSize=16,\n", + " labelFontSize=16, symbolSize=200,\n", + " symbolStrokeWidth=3, labelLimit=600, orient='bottom',\n", + " offset=0,)))\\\n", + " .transform_filter(instance_selection)\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "# .transform_filter(datum.Runtime<60)\\\n", + "\n", + "# hl_expanded_points = hl_expanded_chart.mark_point()\\\n", + "# .encode(opacity=alt.condition(nearest, alt.value(1), alt.value(0)))\\\n", + "# .add_selection(nearest)\n", + "# hl_expanded_chart = alt.layer(hl_expanded_chart, hl_expanded_points)\n", + "\n", + "hl_generated_chart = alt.Chart(data)\\\n", + " .mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('Num of Agents', type='ordinal',\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14, \n", + " )\n", + " ),\n", + " alt.Y('HL_Generated',\n", + " type='quantitative',\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='HL Generated Nodes', titleFontSize=14,\n", + " labelFontSize=14,\n", + " ),\n", + " scale=alt.Scale(type='log',\n", + " #domain=(0, 1000), clamp=True,\n", + " ),\n", + " #impute=alt.ImputeParams(value=10000),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(title='', labelFontSize=16, symbolSize=200,\n", + " symbolStrokeWidth=3, labelLimit=600, orient='bottom',\n", + " offset=0,)))\\\n", + " .transform_filter(instance_selection)\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "# .transform_filter(datum.Runtime<60)\\\n", + "# .interactive() not good\n", + "\n", + "# TODO: Add selectors and rules for the bottom two charts\n", + "\n", + "joint_chart = alt.vconcat(runtime_chart, mem_usage_chart,\n", + "# hl_expanded_chart,\n", + " hl_generated_chart)\n", + "joint_chart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "all_success_row_groups = []\n", + "success_rate_rows = []\n", + "from collections import Counter\n", + "successfully_solved = Counter()\n", + "num_problem_instances = num_all_failures = num_actual_instances = 0\n", + "max_num_agents = data['Num of Agents'].max()\n", + "solver_names_set = set(solver_names)\n", + "for instance, instance_rows in data.groupby(['instance',]):\n", + " if only_first_scenario and not instance.endswith('-1.scen'):\n", + " print(f'Skipping scenario {instance}')\n", + " continue\n", + " print(f'scenario {instance}')\n", + " map_size = instance_rows['open_map_cells'].values[0]\n", + " map_group = instance_rows['map_group'].values[0]\n", + " for num_agents in range(2, max_num_agents + 1):\n", + " num_problem_instances += 1\n", + " instance_num_agents_rows = instance_rows.loc[instance_rows['Num of Agents'] == num_agents]\n", + " if all(instance_num_agents_rows.Cost.values < 0): # Also works if there are zero rows\n", + " num_all_failures += 1\n", + " if len(instance_num_agents_rows) != 0:\n", + " num_actual_instances += 1\n", + " instance_num_agents_success_rows = instance_num_agents_rows.loc[instance_num_agents_rows.Cost >= 0]\n", + " instance_num_agents_explicit_failures_rows = instance_num_agents_rows.loc[instance_num_agents_rows.Cost < 0]\n", + " if len(instance_num_agents_success_rows) == len(solver_names):\n", + " all_success_row_groups.append(instance_num_agents_success_rows)\n", + " for solver_name in list(instance_num_agents_success_rows.solver.values):\n", + " success_rate_rows.append((num_agents, instance, 1, map_size, solver_name, map_group, None))\n", + " successfully_solved[solver_name] += 1\n", + " for index, row in instance_num_agents_explicit_failures_rows.iterrows():\n", + " success_rate_rows.append((num_agents, instance, 0, map_size, row.solver, map_group, {-2:'Memory failure', -1:'Timeout failure'}[row.Cost]))\n", + " for solver_name in solver_names_set - set(instance_num_agents_success_rows.solver.values) - set(instance_num_agents_explicit_failures_rows):\n", + " success_rate_rows.append((num_agents, instance, 0, map_size, solver_name, map_group, 'Implicit failure'))\n", + "all_success_data = pd.concat(all_success_row_groups)\n", + "success_rate_data = pd.DataFrame(success_rate_rows, columns=('Num of Agents', 'instance', 'success', 'map_size', 'solver', 'map_group', 'failure_reason'))\n", + "num_all_success_groups = len(all_success_row_groups)\n", + "del all_success_row_groups\n", + "del success_rate_rows\n", + "print(f'num_problem_instances={num_problem_instances}, '\n", + " f'all solved: {num_all_success_groups}, '\n", + " f'none solved:{num_all_failures}, '\n", + " f'actual instances: {num_actual_instances}'\n", + ")\n", + "for solver_name in solver_names:\n", + " print(f'{solver_name} solved: {successfully_solved[solver_name]}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "success_rate_order = {\n", + " 'ECBS(1.05) w/ LCA-jumping': 1,\n", + " 'ECBS(1.01) w/ LCA-jumping': 2,\n", + " 'ECBS(1.05)': 3,\n", + " 'ECBS(1.01)': 4,\n", + " 'CBS w/ LCA-jumping': 5,\n", + " 'CBS': 6,\n", + " \n", + " 'IDCBS': 101,\n", + " 'CBS': 102,\n", + "}\n", + "success_rate_data['success_rate_order'] = success_rate_data['solver'].map(success_rate_order)\n", + "\n", + "success_rate_by_agents = alt.Chart(success_rate_data)\\\n", + " .mark_line(strokeWidth=5)\\\n", + " .encode(alt.X('Num of Agents', type='quantitative',\n", + "# bin=alt.BinParams(step=5),\n", + " axis=alt.Axis(titleFontSize=16, \n", + " labelFontSize=16,\n", + "# title='Num of Agents (5-agent bins)',\n", + " title='Num of Agents',\n", + " titlePadding=-3,\n", + " ),\n", + "# scale=alt.Scale(type='log')\n", + " ),\n", + " alt.Y('success', type='quantitative', aggregate='mean',\n", + " #domain=(0, 3650), clamp=True),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'),\n", + " title='Success Rate',\n", + " titleFontSize=16,\n", + " labelFontSize=16,\n", + " )\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(title='',\n", + " #title=\"Solver:\",\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=200, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='right',\n", + " offset=0, #symbolType='circle',\n", + " #symbolFillColor=,\n", + " ),\n", + " sort=alt.Sort(field='success_rate_order', op='mean'),\n", + " ),\n", + " )\\\n", + " .properties(\n", + " width=750,\n", + " height=180,\n", + " )\n", + "\n", + "success_rate_by_map_size = alt.Chart(success_rate_data)\\\n", + " .mark_line(strokeWidth=2.5)\\\n", + " .encode(alt.X('map_size', type='quantitative', bin=alt.BinParams(step=1500),\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14,\n", + " title='#Open (Unblocked) Map Cells',\n", + " titlePadding=0,\n", + " ),\n", + "# scale=alt.Scale(type='log')\n", + " ),\n", + " alt.Y('success', type='quantitative', aggregate='mean',\n", + " #domain=(0, 3650), clamp=True),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Success Rate', titleFontSize=14,\n", + " labelFontSize=14,\n", + " )\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(title='',\n", + " #title=\"Solver:\",\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=200, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='bottom',\n", + " offset=0,\n", + " ),\n", + " sort=alt.Order(field='success_rate_order', op='mean')),\n", + " \n", + " )\\\n", + " .properties(\n", + " width=500,\n", + " height=750,\n", + " )\n", + "\n", + "# success_rate = alt.vconcat(success_rate_by_agents, success_rate_by_map_size)\n", + "# success_rate_by_map_size\n", + "success_rate_by_agents\n", + "# success_rate" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rates = []\n", + "print('group,', end='')\n", + "for solver_name in solver_names:\n", + " print(f'{solver_name} solved,', end='')\n", + "for solver_name in solver_names:\n", + " print(f'{solver_name} timeouts,', end='')\n", + "for solver_name in solver_names:\n", + " print(f'{solver_name} memory failures,', end='')\n", + "for solver_name in solver_names:\n", + " print(f'{solver_name} implicit failures,', end='')\n", + "print()\n", + "for group, rows in success_rate_data.groupby(['map_group']):\n", + " print(f'{group},', end='')\n", + " for solver_name in solver_names:\n", + " print(f'{len(rows.loc[(rows.solver == solver_name) & (rows.success > 0)])},', end='')\n", + " for solver_name in solver_names:\n", + " print(f'{len(rows.loc[(rows.solver == solver_name) & (rows.failure_reason == \"Timeout failure\")])},', end='')\n", + " for solver_name in solver_names:\n", + " print(f'{len(rows.loc[(rows.solver == solver_name) & (rows.failure_reason == \"Memory failure\")])},', end='')\n", + " for solver_name in solver_names:\n", + " print(f'{len(rows.loc[(rows.solver == solver_name) & (rows.failure_reason == \"Implicit failure\")])},', end='')\n", + " print()\n", + " rates.append(alt.Chart(rows)\\\n", + " .mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('Num of Agents', type='quantitative', bin=alt.BinParams(step=5),\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14,\n", + " title='Num of Agents',\n", + " #titlePadding=0, # Works but not pretty\n", + " ),\n", + " ),\n", + " alt.Y('success', type='quantitative', aggregate='mean',\n", + " #domain=(0, 3650), clamp=True),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Success Rate', titleFontSize=14,\n", + " labelFontSize=14,\n", + " )\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(title='',\n", + " #title=\"Solver:\",\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=200, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='bottom',\n", + " offset=0, #symbolType='circle',\n", + " #symbolFillColor=,\n", + " ),\n", + " sort=alt.Sort(field='success_rate_order', op='mean')),\n", + " )\\\n", + " .properties(\n", + " width=750,\n", + " height=200,\n", + " title=f'{group}'\n", + " ))\n", + " \n", + "alt.vconcat(*rates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# All data:\n", + "average_runtime = alt.Chart(all_success_data)\\\n", + " .mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('Num of Agents', type='ordinal',\n", + " axis=alt.Axis(titleFontSize=16, \n", + " labelFontSize=16,\n", + " titlePadding=-3,\n", + " ),\n", + " ),\n", + " alt.Y('Runtime', type='quantitative', aggregate='mean',\n", + " scale=alt.Scale(type='log',\n", + "# domain=(0, 1800), clamp=True,\n", + " ),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Average Runtime (s)',\n", + " titleFontSize=16,\n", + " labelFontSize=16,\n", + " )\n", + " #impute=alt.ImputeParams(value=36000),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(title='',\n", + " #title=\"Solver:\",\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='right',\n", + "# offset=0,\n", + " )))\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "\n", + "# runtime_timeout_rule = alt.Chart(all_success_data)\\\n", + "# .mark_rule(color='firebrick')\\\n", + "# .encode(\n", + "# y=alt.Y(\n", + "# 'timeout',\n", + "# ),\n", + "# size=alt.SizeValue(2)\n", + "# )\n", + "\n", + "# average_runtime = alt.layer(average_runtime, runtime_timeout_rule)\\\n", + "# .properties(\n", + "# width=400,\n", + "# height=250\n", + "# )\n", + "\n", + "average_mem_usage = alt.Chart(all_success_data)\\\n", + " .mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('Num of Agents', type='ordinal',\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14, \n", + " ),\n", + " ),\n", + " alt.Y('Max_Mem', type='quantitative', aggregate='median',\n", + "# scale=alt.Scale(type='log',),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Median Memory Usage (GB)', titleFontSize=14,\n", + " labelFontSize=14,\n", + " ),\n", + " #impute=alt.ImputeParams(value=36000),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(#title=\"Solver:\",\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='bottom', offset=0,)))\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "\n", + "average_expanded = alt.Chart(all_success_data)\\\n", + " .mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('Num of Agents', type='ordinal',\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14, \n", + " ),\n", + " ),\n", + " alt.Y('HL_Expanded', type='quantitative', aggregate='median',\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Median Expanded Nodes', titleFontSize=14,\n", + " labelFontSize=14,\n", + " ),\n", + "# scale=alt.Scale(type='log',),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(#title=\"Solver:\",\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='bottom', offset=0)))\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "\n", + "average_generated = alt.Chart(all_success_data)\\\n", + " .mark_line(strokeWidth=2.5)\\\n", + " .encode(alt.X('Num of Agents', type='ordinal',\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14,\n", + " titlePadding=-3,\n", + " ),\n", + " ),\n", + " alt.Y('HL_Generated', type='quantitative', aggregate='mean',\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Average Generated Nodes', titleFontSize=14,\n", + " labelFontSize=14,\n", + " ),\n", + " scale=alt.Scale(type='log',),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(title='',\n", + " #title=\"Solver:\",\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='bottom', offset=0, )))\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "\n", + "alt.vconcat(average_runtime,\n", + "# average_mem_usage,\n", + "# average_expanded,\n", + "# average_generated,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(f'#agents,#instances,', end='')\n", + "for solver_name in solver_names:\n", + " print(f'{solver_name} average #generated,', end='')\n", + "print()\n", + "for num_agents, rows in all_success_data.groupby(['Num of Agents', ]):\n", + " print(f'{num_agents},{int(len(rows) / len(solver_names))},', end='')\n", + " for solver_name in solver_names:\n", + " print(f\"{np.average(rows.loc[rows.solver==solver_name]['HL_Generated'])},\", end='')\n", + " print()\n", + "# print(rows)\n", + "# break\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#data['log_hl_generated'] = np.log(data['HL_Generated'])\n", + "average_mem_usage_by_generated = alt.Chart(data)\\\n", + " .mark_line(strokeWidth=2.5)\\\n", + " .encode(alt.X('HL_Generated', bin=alt.BinParams(step=100000),\n", + "# 'log_hl_generated', bin=alt.BinParams(step=0.100000),\n", + " type='quantitative', \n", + "# scale=alt.Scale(type='log'),\n", + " axis=alt.Axis(title='HL Generated Nodes', titleFontSize=14, \n", + " labelFontSize=14, \n", + " ),\n", + " ),\n", + " alt.Y('Max_Mem', type='quantitative', aggregate='mean',\n", + "# scale=alt.Scale(type='log',),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'), title='Average Memory Usage (GB)', titleFontSize=14,\n", + " labelFontSize=14,\n", + " ),\n", + " #impute=alt.ImputeParams(value=36000),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(#title=\"Solver:\",\n", + " title='',\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='bottom',)))\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "\n", + "average_mem_usage_by_time = alt.Chart(data)\\\n", + " .mark_line(strokeWidth=5)\\\n", + " .encode(alt.X('Runtime', type='quantitative',\n", + " bin=alt.BinParams(step=120),\n", + " axis=alt.Axis(title='Final Runtime (120-second bins)',\n", + " titleFontSize=16, \n", + " labelFontSize=16,\n", + " titlePadding=-2,\n", + " ),\n", + " ),\n", + " alt.Y('Max_Mem', type='quantitative', aggregate='mean',\n", + "# scale=alt.Scale(type='log',),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'),\n", + " title='Average Memory Usage (GB)',\n", + " titleFontSize=16,\n", + " labelFontSize=16,\n", + " ),\n", + " #impute=alt.ImputeParams(value=36000),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(#title=\"Solver:\",\n", + " title='',\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500, symbolStrokeWidth=3,\n", + " labelLimit=600, orient='right',\n", + "# offset=0,\n", + " ),\n", + " ),\n", + " )\\\n", + " .properties(\n", + " width=400,\n", + " height=200*22./18*22/21.5,\n", + " )\n", + "\n", + "average_mem_usage_by_map_size = alt.Chart(data)\\\n", + " .mark_line(strokeWidth=4)\\\n", + " .encode(alt.X('open_map_cells', type='quantitative', #bin=alt.BinParams(step=60),\n", + " scale=alt.Scale(type='log'), # Doesn't do anything: zero=False, domain=[64,100000], nice=True,\n", + " axis=alt.Axis(title='#Open (Unblocked) Map Cells', titleFontSize=14, \n", + " labelFontSize=14, \n", + " ),\n", + " ),\n", + " alt.Y('Max_Mem',\n", + " #'mem_usage_per_second',\n", + " type='quantitative', aggregate='mean',\n", + "# scale=alt.Scale(type='log',),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'),\n", + " title='Average Memory Usage (GB)',\n", + "# title='Median Memory Usage Per Second (kB/s)',\n", + " titleFontSize=14,\n", + " labelFontSize=14,\n", + " ),\n", + " #impute=alt.ImputeParams(value=36000),\n", + " ),\n", + " alt.Color('solver', type='nominal',\n", + " legend=alt.Legend(#title=\"Solver:\",\n", + " title='',\n", + " titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500, symbolStrokeWidth=3,\n", + "# offset=0,\n", + " labelLimit=600, orient='bottom',)))\\\n", + " .properties(\n", + " width=400,\n", + " height=250\n", + " )\n", + "\n", + "#alt.vconcat(average_mem_usage_by_generated, average_mem_usage_by_time, average_mem_usage_by_map_size)\n", + "average_mem_usage_by_time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "runtime_components_data = data.loc[(data.Cost != -1) & (data.Cost != -2)].melt(\n", + " id_vars=set(data.columns) - {'HL runtime', 'LL runtime',\n", + " 'Up&Down Time',\n", + " 'MDD Time', 'HL Heuristic Time',\n", + " 'CAT Time', 'HL Node Verification Time', #'FAILED',\n", + " },\n", + " value_vars=['HL runtime', 'LL runtime',\n", + " 'Up&Down Time',\n", + " 'MDD Time', 'HL Heuristic Time',\n", + " 'CAT Time', 'HL Node Verification Time', #'FAILED',\n", + " ],\n", + " var_name='Component',\n", + " value_name='Component_Runtime')\n", + "\n", + "\n", + "runtime_components_data['Component_Runtime_Per_Generated_Node'] = runtime_components_data['Component_Runtime'] / runtime_components_data['HL_Generated'] * 1000\n", + "# runtime_components_data['Component_Runtime_Per_Expanded_Node'] = runtime_components_data['Component_Runtime'] / runtime_components_data['HL_Expanded'] * 1000 # HL_Expanded can be zero - another reason to use HL_Generated\n", + "\n", + "# Better names\n", + "runtime_components_data.Component.replace({'HL runtime': 'High-level Work',\n", + " 'LL runtime': 'Path Finding',\n", + " 'MDD Time': 'Building MDDs',\n", + " 'HL Heuristic Time': 'HL Heuristic',\n", + " 'CAT Time': 'Building CAT',\n", + " 'HL Node Verification Time': 'Finding Conflicts',\n", + " 'Up&Down Time': 'LCA-Jumping'\n", + " }, inplace=True)\n", + "\n", + "components_instance_dropdown = alt.binding_select(options=[None] + list(runtime_components_data.instance.unique()))\n", + "components_instance_selection = alt.selection_single(fields=['instance'],\n", + " bind=components_instance_dropdown, name='Instance',\n", + "# empty='none', # See top cell for why this isn't enabled\n", + " )\n", + "\n", + "layer_order_top_to_bottom = {\n", + " 'Building CAT': 7, # First because it's shrunk the most\n", + " 'LCA-Jumping': 6,\n", + " 'Finding Conflicts': 5, # Very thin anyway so order not so important\n", + " 'High-level Work': 4,\n", + " 'HL Heuristic': 3, # On top of MDD Time because it builds on it\n", + " 'Building MDDs': 2,\n", + " 'Path Finding': 1, # Last because it's the biggest and shrinks a lot, also, MDD work is on top of it\n", + "}\n", + "runtime_components_data['layer_order'] = runtime_components_data['Component'].map(layer_order_top_to_bottom)\n", + "\n", + "stacked_runtime_charts = []\n", + "for solver_name in solver_names:\n", + " stacked_runtime_charts.append(alt.Chart(runtime_components_data)\\\n", + " .mark_area()\\\n", + " .encode(alt.X('Num of Agents',\n", + " type='ordinal',\n", + " # scale=alt.Scale(\n", + " # domain=list(range(2,79)), #TODO: Check if this can be set interactively, if yes, try to connect a trigger to update the domain based on that of the other chart\n", + " # ),\n", + " axis=alt.Axis(titleFontSize=14, \n", + " labelFontSize=14, \n", + " ),\n", + " ),\n", + " alt.Y(\n", + " #'Component_Runtime',\n", + " 'Component_Runtime_Per_Generated_Node',\n", + " type='quantitative',\n", + " # scale=alt.Scale(domain=(0, 3650), clamp=True,),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'),\n", + " title='Time Per Generated Node (ms)', titleFontSize=14,\n", + " labelFontSize=14,\n", + " ),\n", + " ),\n", + " alt.Color('Component', type='nominal', scale=alt.Scale(\n", + " range=[\n", + " \"#e41a1c\",\n", + " # \"#4daf4a\",\n", + " \"#377eb8\",\n", + " \"#984ea3\",\n", + " \"#ff7f00\",\n", + " # \"#ffff33\",\n", + " \"#f781bf\",\n", + " \"#a65628\",\n", + " \"#999999\",\n", + " ],\n", + " # scheme='category10',\n", + " #scheme='set1',\n", + " #scheme='tableau20'\n", + " ),\n", + " legend=alt.Legend(title=\"Component\", titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500,\n", + " symbolStrokeWidth=3, labelLimit=600, orient='right',\n", + " values=list(layer_order_top_to_bottom.keys()),\n", + " ),\n", + " ),\n", + " order=alt.Order(field='layer_order', type='ordinal'),\n", + " )\\\n", + " .transform_filter(components_instance_selection)\\\n", + " .transform_filter(datum.solver == solver_name)\\\n", + " .properties(\n", + " width=400,\n", + " height=250,\n", + " title=f'{solver_name} Runtime Components'\n", + " ))\n", + " \n", + " \n", + "from functools import reduce\n", + "import operator\n", + "# joint_chart = reduce(operator.and_, stacked_runtime_charts)\\\n", + "# .add_selection(components_instance_selection)\\\n", + "# .resolve_scale(x='shared', y='shared', )\n", + "joint_chart = reduce(operator.or_, stacked_runtime_charts)\\\n", + " .add_selection(components_instance_selection)\\\n", + " .resolve_scale(x='shared', y='shared', )\n", + "\n", + "joint_chart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "all_success_runtime_components_data = all_success_data.melt(\n", + " id_vars=set(data.columns) - {'HL runtime', 'LL runtime',\n", + " 'Up&Down Time',\n", + " 'MDD Time', 'HL Heuristic Time',\n", + " 'CAT Time', 'HL Node Verification Time',\n", + " },\n", + " value_vars=['HL runtime', 'LL runtime',\n", + " 'Up&Down Time',\n", + " 'MDD Time', 'HL Heuristic Time',\n", + " 'CAT Time', 'HL Node Verification Time',\n", + " ],\n", + " var_name='Component',\n", + " value_name='Component_Runtime')\n", + "\n", + "\n", + "all_success_runtime_components_data['Component_Runtime_Per_Generated_Node'] = all_success_runtime_components_data['Component_Runtime'] / all_success_runtime_components_data['HL_Generated'] * 1000\n", + "\n", + "\n", + "# Better names\n", + "all_success_runtime_components_data.Component.replace({'HL runtime': 'High-level Work',\n", + " 'LL runtime': 'Path Finding',\n", + " 'MDD Time': 'Building MDDs',\n", + " 'HL Heuristic Time': 'HL Heuristic',\n", + " 'CAT Time': 'Building CAT',\n", + " 'HL Node Verification Time': 'Finding Conflicts',\n", + " 'Up&Down Time': 'LCA-Jumping'\n", + " }, inplace=True)\n", + "\n", + "layer_order_top_to_bottom = {\n", + " 'Building CAT': 7, # First because it's shrunk the most\n", + " 'LCA-Jumping': 6, # Very thin anyway\n", + " 'Finding Conflicts': 5, # Very thin anyway so order not so important\n", + " 'High-level Work': 4,\n", + " 'HL Heuristic': 3, # On top of MDD Time because it builds on it\n", + " 'Building MDDs': 2,\n", + " 'Path Finding': 1, # Last because it's the biggest and shrinks a lot, also, MDD work is on top of it\n", + "}\n", + "all_success_runtime_components_data['layer_order'] = all_success_runtime_components_data['Component'].map(layer_order_top_to_bottom)\n", + "\n", + "\n", + "stacked_runtime_charts = []\n", + "for solver_name in solver_names:\n", + " stacked_runtime_charts.append(alt.Chart(all_success_runtime_components_data)\\\n", + " .mark_area()\\\n", + " .encode(alt.X('Num of Agents',\n", + " type='quantitative',\n", + "# bin=alt.BinParams(step=5),\n", + " scale=alt.Scale(\n", + " domain=(2,110), #TODO: Check if this can be set interactively, if yes, try to connect a trigger to update the domain based on that of the other chart\n", + " clamp=True,\n", + " ),\n", + " axis=alt.Axis(titleFontSize=16, \n", + " labelFontSize=16,\n", + " titlePadding=-2,\n", + " ),\n", + " ),\n", + " alt.Y(\n", + " #'Component_Runtime',\n", + " 'Component_Runtime_Per_Generated_Node', aggregate='median',\n", + " type='quantitative',\n", + " scale=alt.Scale(domain=(0, 22), clamp=True,),\n", + " axis=alt.Axis(orient=alt.AxisOrient('left'),\n", + " title='Mean Time Per Gen. Node (ms)',\n", + " titleFontSize=16,\n", + " labelFontSize=16,\n", + " titlePadding=0,\n", + " ),\n", + " ),\n", + " alt.Color('Component', type='nominal', scale=alt.Scale(\n", + " range=[\n", + " \"#e41a1c\",\n", + "# \"#4daf4a\",\n", + " \"#377eb8\",\n", + " \"#984ea3\",\n", + " \"#ff7f00\",\n", + "# \"#ffff33\",\n", + " \"#f781bf\",\n", + " \"#a65628\",\n", + " \"#999999\",\n", + " ],\n", + "# scheme='category10',\n", + " #scheme='set1',\n", + " #scheme='tableau20'\n", + " ),\n", + " legend=alt.Legend(title=\"Component\", titleFontSize=16,\n", + " labelFontSize=16, symbolSize=500,\n", + " symbolStrokeWidth=3, labelLimit=600, orient='right',\n", + " values=list(layer_order_top_to_bottom.keys()),\n", + " ),\n", + " ),\n", + " order=alt.Order(field='layer_order', type='ordinal'),\n", + " )\\\n", + " .transform_filter(datum.solver == solver_name)\\\n", + " .properties(\n", + " width=400,\n", + " height=250,\n", + " title=f'{solver_name} Runtime Breakdown',\n", + " )\n", + " )\n", + " \n", + " \n", + "\n", + "from functools import reduce\n", + "import operator\n", + "# joint_chart = reduce(operator.and_, stacked_runtime_charts)\\\n", + "# .resolve_scale(x='shared', y='shared', )\n", + "joint_chart = reduce(operator.or_, stacked_runtime_charts)\\\n", + " .resolve_scale(x='shared', y='shared', )\\\n", + " .configure_title(fontSize=20)\n", + "\n", + "joint_chart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data.loc[data['solver']==solver_names[0]]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data.columns" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "runtime_components_data.loc[runtime_components_data['instance'] == '/scen/den520d-random-2.scen']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "runtime_components_data.loc[runtime_components_data.solver==solver_names[1]]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.average(\n", + "all_success_data.loc[(all_success_data['solver']==solver_names[1]) & (all_success_data['Num of Agents']==70)].HL_Generated)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data.loc[data.Runtime>3800]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.1" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/run_on_benchmark_instances.py b/run_on_benchmark_instances.py new file mode 100644 index 0000000..e33fcfd --- /dev/null +++ b/run_on_benchmark_instances.py @@ -0,0 +1,243 @@ +from itertools import product +import subprocess +from os.path import basename, exists, join, abspath +from os import getcwd +import time +import csv + +#build = 'debug' +build = 'release' +timeout_seconds = 3600 +in_container = True +mem_limit = '8g' +seed = 123 +use_heuristic = True +focal_w = 1#.50 +child_pref_budget = 5 +max_child_pref_options = 20 +prefer_f_cardinal = False +prefer_goal_conflicts = False +skip_according_to_output_file = True + +agents_dir = abspath('agents') +maps_dir = abspath('maps') +scen_dir = abspath('scen') +# output_file_name = 'benchmark_cbsh_no_lpa.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_timeout_3600.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_prefer_goal_conflicts.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_goal_conflict_aware_heuristic.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_goal_conflict_aware_heuristic_prefer_goal_conflicts.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_goal_conflict_aware_heuristic_prefer_f_cardinal.csv' +# output_file_name = 'benchmark_cbsh_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_ecbsh_1_50_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_ecbsh_1_05_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_ecbsh_1_01_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_ecbsh_1_00_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair_timeout_3600.csv' +# output_file_name = 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair_timeout_3600-in-container.csv' +output_file_name = 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair_timeout_3600-in-container-with-conflict-choice-fix.csv' +# output_file_name = 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair_timeout_3600-in-container-with-conflict-choice-fix2.csv' +# output_file_name = 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair_timeout_3600-in-container-with-conflict-choice-fix3.csv' +# output_file_name = 'benchmark_ecbsh_1_05_no_lpa.csv' +# output_file_name = 'benchmark_ecbsh_1_01_no_lpa.csv' +# output_file_name = 'benchmark_cbsh_lpa_up_and_down_with_lpmdd_and_path_repair_goal_conflict_heuristic.csv' +# output_file_name = 'benchmark_cbsh_lpa_up_and_down_5_with_lpmdd_and_path_repair_goal_conflict_heuristic_prefer_f_cardinals.csv' +# output_file_name = 'dao_cbsh_no_lpa_fcardinal.csv' +#output_file_name = 'dao_cbsh_no_lpa_latest_conflict.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_0_budget.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_0_budget_latest_conflict.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_5_budget_latest_conflict.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_5_budget_min_tree_distance_20_options_.csv' +#output_file_name = 'dao_cbsh_lpa.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_0_budget.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_0_budget_not_latest_conflict.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_20_budget.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_5_budget_not_latest_conflict.csv' +#output_file_name = 'dao_ecbs_no_h_1_05_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_ecbsh_1_05_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_ecbs_no_h_1_05_no_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_ecbsh_1_05_no_lpa_up_and_down_5_budget.csv' +# output_file_name = 'dao_ecbsh_1_05_no_lpa_up_and_down_5_budget_latest_conflict.csv' +#output_file_name = 'dao_ecbsh_1_05_lpa.csv' +#output_file_name = 'dao_ecbsh_1_05_no_lpa.csv' +#output_file_name = 'dao_idcbsh_no_lpa.csv' +#output_file_name = 'dao_idcbsh_no_lpa_latest_conflict.csv' +#output_file_name = 'dao_idcbsh_lpa.csv' +#output_file_name = 'dao_ecbs_no_h_1_05_no_lpa.csv' +output_dir = getcwd() +output_file_path = join(output_dir, output_file_name) +# executable_name = "ECBSH_no_lpa" +# executable_name = "ECBSH_no_lpa_goal_conflict_heuristic" +#executable_name = "ECBSH_no_lpa_latest_conflict" +#executable_name = "ECBSH_no_lpa_up_and_down" +# executable_name = "ECBSH_no_lpa_up_and_down_latest_conflict" +# executable_name = "ECBSH_lpa" +# executable_name = "ECBSH_lpa_not_latest_conflict" +#executable_name = "ECBSH_lpa_up_and_down" +# executable_name = "ECBSH_lpa_up_and_down_with_lpmdd_and_path_repair" +# executable_name = "ECBSH_lpa_up_and_down_with_lpmdd_and_path_repair_goal_conflict_heuristic" +#executable_name = "ECBSH_lpa_up_and_down_not_latest_conflict" +# executable_name = "IDCBSH_no_lpa" +# executable_name = "IDCBSH_no_lpa_latest_conflict" +# executable_name = "IDCBSH_lpa" +# executable_name = "IDCBSH_lpa_not_latest_conflict" +executable_name = "IDECBSH_lpa_with_lpmdd_and_path_repair" +executable_path = join('/lpa', executable_name) + +map_names = ( + 'ost003d', + 'den502d', + 'den520d', + 'brc202d', + 'empty-8-8', + 'empty-16-16', + 'empty-32-32', + 'empty-48-48', + 'random-32-32-10', + 'random-32-32-20', + 'random-64-64-10', + # 'random-64-64-20', + # 'maze-128-128-1', + # 'maze-128-128-2', + # 'maze-128-128-10', + # 'maze-32-32-2', + # 'room-32-32-4', + # 'room-64-64-8', + # 'room-64-64-16', + # 'den312d', + # 'orz900d', + # 'ht_chantry', + # 'ht_mansion_n', + # 'lak303d', + # 'lt_gallowstemplar_n', + # 'w_woundedcoast', + # 'Berlin_1_256', + # 'Boston_0_256', + # 'Paris_1_256', + # 'warehouse-10-20-10-2-1', + # 'warehouse-10-20-10-2-2', + # 'warehouse-20-40-10-2-1', + # 'warehouse-20-40-10-2-2', + ) +# USC maps (with .agents files): +# 'kiva_0.map', +# 'roundabout_2.map', +# 'maze_3.map', +# 'maze_2.map', +# 'roundabout_3.map', +# 'roundabout_1.map', +# 'roundabout_4.map', +# 'roundabout_5.map', +# 'maze_1_2.map', +# 'maze_1.map', +# 'maze_1_3.map', +# 'maze_4.map', + +last_scen_type = None +last_map_name = None +last_scen_index = None +last_num_agents = None +last_cost = None +if exists(output_file_path) and skip_according_to_output_file: + with open(output_file_path) as f: + output_csv = csv.DictReader(f) + lines = list(output_csv) + last_line = lines[-1] + del output_csv + del lines + instance_file = last_line['instance'] + assert instance_file.endswith('.scen') + instance_name = instance_file[:-len('.scen')] + last_scen_type = instance_name.rsplit('-', 2)[-2] + last_map_name = instance_name.rsplit('-', 2)[-3].partition('scen/')[2] + last_scen_index = int(instance_name.rsplit('-', 2)[-1]) + last_num_agents = int(last_line['Num of Agents']) + last_cost = int(last_line['Cost']) + print(f'Existing file ends with:') + print(f'instance={instance_file} - last_scen_type={last_scen_type}, last_map_name={last_map_name}, last_scen_index={last_scen_index}') + + +for split_strategy, scen_index, map_name, scen_type in product( + ( + #'MVC_BASED', + 'NON_DISJOINT', + ), range(1, 26), map_names, + ( + 'even', + 'random', + )): + # for split_strategy in ('WIDTH', ): # Looks like the best one + + num_agents_start = 2 + + # Skip instances we've already done. TODO: support multiple split strategies + if last_scen_type is not None: + if scen_type == last_scen_type and map_name == last_map_name and scen_index == last_scen_index: + last_scen_type = None + last_map_name = None + last_scen_index = None + num_agents_start = last_num_agents + 1 + last_num_agents = None + if last_cost >= 0: + print(f'Resuming the run, starting from {num_agents_start}') + else: + num_agents_start = 2 + print(f'Skipping ({scen_index},{map_name},{scen_type})') + print(f'Resuming the run') + continue + else: + print(f'Skipping ({scen_index},{map_name},{scen_type})') + continue + + map_file_name = f'{map_name}.map' + map_file_path = join(maps_dir, map_file_name) + if not exists(map_file_path): + print(f'No map file {map_file_path}') + continue + scen_file_name = f'{map_name}-{scen_type}-{scen_index}.scen' + scen_file_path = join(scen_dir, scen_file_name) + if not exists(scen_file_path): + print(f'No scenario file {scen_file_path}') + continue + for num_agents in range(num_agents_start, 2000): # We stop on the first failure + # GLOG_logtostderr=1 ./cmake-... + # docker run --memory-swap=10g to avoid swapping + + if in_container: + cmd = f'docker run --rm -it --net=host --memory={mem_limit} --memory-swap={mem_limit} -v {scen_dir}:/scen ' \ + f'-v {maps_dir}:/maps -v {output_dir}:/output ' \ + f'search/mapf:cbs-lpa ' \ + f'{executable_path} -m "/maps/{map_file_name}" ' \ + f'-a "/scen/{scen_file_name}" ' \ + f'-k {num_agents} ' \ + f'-o /output/{output_file_name} -p {split_strategy} --screen 0 --seed {seed} ' \ + f'--cutoffTime={timeout_seconds} --verbosity 0 --heuristic {1 if use_heuristic else 0} ' \ + f'--focalW {focal_w} --childPrefBudget {child_pref_budget} --maxChildPrefOptions {max_child_pref_options} ' \ + f'--prefer_f_cardinal {1 if prefer_f_cardinal else 0} ' \ + f'--prefer_goal_conflicts {1 if prefer_goal_conflicts else 0}' + else: + cmd = f'./{executable_name} -m "maps/{map_file_name}" ' \ + f'-a "scen/{scen_file_name}" ' \ + f'-k {num_agents} ' \ + f'-o {output_file_name} -p {split_strategy} --screen 0 --seed {seed} ' \ + f'--cutoffTime={timeout_seconds} --verbosity 0 --heuristic {1 if use_heuristic else 0} ' \ + f'--focalW {focal_w} --childPrefBudget {child_pref_budget} --maxChildPrefOptions {max_child_pref_options} ' \ + f'--prefer_f_cardinal {1 if prefer_f_cardinal else 0} ' \ + f'--prefer_goal_conflicts {1 if prefer_goal_conflicts else 0}' + + print(time.strftime('%Y-%m-%dT%H:%M:%S: ') + cmd) + start_time = time.time() + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + if e.returncode == 1: # Solution not found - probably due to a timeout. No point in adding more agents + break + elif e.returncode == 137: # Killed by the cgroup's OOM killer + with open(output_file_path, 'a') as f: + f.write('-2,' + '=NA(),' * 23 + f'{time.time() - start_time},=NA(),{mem_limit},same as above,/scen/{scen_file_name},{num_agents}\n') + break + else: + raise diff --git a/run_on_benchmark_instances_missing_fields.py b/run_on_benchmark_instances_missing_fields.py new file mode 100644 index 0000000..0b72b1f --- /dev/null +++ b/run_on_benchmark_instances_missing_fields.py @@ -0,0 +1,205 @@ +from itertools import product +import subprocess +from os.path import basename, exists, join, abspath +from os import getcwd +import time +import csv + +#build = 'debug' +build = 'release' +timeout_seconds = 3600 +mem_limit = '8g' +seed = 123 +use_heuristic = True +focal_w = 1#.01 +child_pref_budget = 5 +max_child_pref_options = 20 +prefer_f_cardinal = False +prefer_goal_conflicts = False + +agents_dir = abspath('agents') +maps_dir = abspath('maps') +scen_dir = abspath('scen') +# output_file_name = 'benchmark_cbsh_no_lpa.csv' +output_file_name = 'benchmark_cbsh_no_lpa_timeout_3600.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_prefer_goal_conflicts.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_goal_conflict_aware_heuristic.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_goal_conflict_aware_heuristic_prefer_goal_conflicts.csv' +# output_file_name = 'benchmark_cbsh_no_lpa_goal_conflict_aware_heuristic_prefer_f_cardinal.csv' +# output_file_name = 'benchmark_cbsh_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_ecbsh_1_05_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_ecbsh_1_01_lpa_up_and_down_5_child_pref_budget_20_pref_options_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair.csv' +# output_file_name = 'benchmark_idcbsh_lpa_with_lpmdd_and_path_repair_timeout_3600.csv' +# output_file_name = 'benchmark_ecbsh_1_05_no_lpa.csv' +# output_file_name = 'benchmark_ecbsh_1_01_no_lpa.csv' +# output_file_name = 'benchmark_cbsh_lpa_up_and_down_with_lpmdd_and_path_repair_goal_conflict_heuristic.csv' +# output_file_name = 'benchmark_cbsh_lpa_up_and_down_5_with_lpmdd_and_path_repair_goal_conflict_heuristic_prefer_f_cardinals.csv' +# output_file_name = 'dao_cbsh_no_lpa_fcardinal.csv' +#output_file_name = 'dao_cbsh_no_lpa_latest_conflict.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_0_budget.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_0_budget_latest_conflict.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_5_budget_latest_conflict.csv' +#output_file_name = 'dao_cbsh_no_lpa_up_and_down_5_budget_min_tree_distance_20_options_.csv' +#output_file_name = 'dao_cbsh_lpa.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_0_budget.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_0_budget_not_latest_conflict.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_20_budget.csv' +#output_file_name = 'dao_cbsh_lpa_up_and_down_5_budget_not_latest_conflict.csv' +#output_file_name = 'dao_ecbs_no_h_1_05_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_ecbsh_1_05_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_ecbs_no_h_1_05_no_lpa_up_and_down_5_budget.csv' +#output_file_name = 'dao_ecbsh_1_05_no_lpa_up_and_down_5_budget.csv' +# output_file_name = 'dao_ecbsh_1_05_no_lpa_up_and_down_5_budget_latest_conflict.csv' +#output_file_name = 'dao_ecbsh_1_05_lpa.csv' +#output_file_name = 'dao_ecbsh_1_05_no_lpa.csv' +#output_file_name = 'dao_idcbsh_no_lpa.csv' +#output_file_name = 'dao_idcbsh_no_lpa_latest_conflict.csv' +#output_file_name = 'dao_idcbsh_lpa.csv' +#output_file_name = 'dao_ecbs_no_h_1_05_no_lpa.csv' +output_dir = getcwd() +output_file_path = join(output_dir, output_file_name) +executable_name = "ECBSH_no_lpa" +# executable_name = "ECBSH_no_lpa_goal_conflict_heuristic" +#executable_name = "ECBSH_no_lpa_latest_conflict" +#executable_name = "ECBSH_no_lpa_up_and_down" +# executable_name = "ECBSH_no_lpa_up_and_down_latest_conflict" +# executable_name = "ECBSH_lpa" +# executable_name = "ECBSH_lpa_not_latest_conflict" +#executable_name = "ECBSH_lpa_up_and_down" +# executable_name = "ECBSH_lpa_up_and_down_with_lpmdd_and_path_repair" +# executable_name = "ECBSH_lpa_up_and_down_with_lpmdd_and_path_repair_goal_conflict_heuristic" +#executable_name = "ECBSH_lpa_up_and_down_not_latest_conflict" +# executable_name = "IDCBSH_no_lpa" +# executable_name = "IDCBSH_no_lpa_latest_conflict" +# executable_name = "IDCBSH_lpa" +# executable_name = "IDCBSH_lpa_not_latest_conflict" +# executable_name = "IDECBSH_lpa_with_lpmdd_and_path_repair" +executable_path = join('/lpa', executable_name) + +map_names = ( + 'ost003d', + 'den502d', + 'den520d', + 'brc202d', + 'empty-8-8', + 'empty-16-16', + 'empty-32-32', + 'empty-48-48', + 'random-32-32-10', + 'random-32-32-20', + 'random-64-64-10', + 'random-64-64-20', + 'maze-128-128-1', + 'maze-128-128-2', + 'maze-128-128-4', + 'maze-128-128-10', + 'maze-32-32-2', + 'room-32-32-4', + 'room-64-64-8', + 'room-64-64-16', + 'den312d', + 'orz900d', + 'ht_chantry', + 'ht_mansion_n', + 'lak303d', + 'lt_gallowstemplar_n', + 'w_woundedcoast', + 'Berlin_1_256', + 'Boston_0_256', + 'Paris_1_256', + 'warehouse-10-20-10-2-1', + 'warehouse-10-20-10-2-2', + 'warehouse-20-40-10-2-1', + 'warehouse-20-40-10-2-2', + ) +# USC maps (with .agents files): +# 'kiva_0.map', +# 'roundabout_2.map', +# 'maze_3.map', +# 'maze_2.map', +# 'roundabout_3.map', +# 'roundabout_1.map', +# 'roundabout_4.map', +# 'roundabout_5.map', +# 'maze_1_2.map', +# 'maze_1.map', +# 'maze_1_3.map', +# 'maze_4.map', + + +with open(output_file_path) as f: + output_csv = csv.DictReader(f) + for line in output_csv: + if line['Max Mem (kB)'] != '' and line['Wall Runtime'] != '#N/A': + continue + + instance_file = line['instance'] + assert instance_file.endswith('.scen') + instance_name = instance_file[:-len('.scen')] + scen_type = instance_name.rsplit('-', 2)[-2] + map_name = instance_name.rsplit('-', 2)[-3][len('/scen/'):] + scen_index = int(instance_name.rsplit('-', 2)[-1]) + num_agents = int(line['Num of Agents']) + print(f'found line with missing memory usage data or wall runtime:') + print(f'instance={instance_file} - last_scen_type={scen_type}, last_map_name={map_name}, last_scen_index={scen_index}') + + split_strategy = 'NON_DISJOINT' + # for , scen_index, map_name, scen_type in product( + # ( + # #'MVC_BASED', + # , + # ), range(1, 26), map_names, ('even', 'random')): + # # for split_strategy in ('WIDTH', ): # Looks like the best one + # + # num_agents_start = 2 + + map_file_name = f'{map_name}.map' + map_file_path = join(maps_dir, map_file_name) + if not exists(map_file_path): + continue + scen_file_name = f'{map_name}-{scen_type}-{scen_index}.scen' + scen_file_path = join(scen_dir, scen_file_name) + if not exists(scen_file_path): + continue + + # GLOG_logtostderr=1 ./cmake-... + # docker run --memory-swap=10g to avoid swapping + + cmd = f'docker run --rm -it --net=host --memory={mem_limit} --memory-swap={mem_limit} -v {scen_dir}:/scen ' \ + f'-v {maps_dir}:/maps -v {output_dir}:/output ' \ + f'search/mapf:cbs-lpa ' \ + f'{executable_path} -m "/maps/{map_file_name}" ' \ + f'-a "/scen/{scen_file_name}" ' \ + f'-k {num_agents} ' \ + f'-o /output/{output_file_name} -p {split_strategy} --screen 0 --seed {seed} ' \ + f'--cutoffTime={timeout_seconds} --verbosity 0 --heuristic {1 if use_heuristic else 0} ' \ + f'--focalW {focal_w} --childPrefBudget {child_pref_budget} --maxChildPrefOptions {max_child_pref_options} ' \ + f'--prefer_f_cardinal {1 if prefer_f_cardinal else 0} ' \ + f'--prefer_goal_conflicts {1 if prefer_goal_conflicts else 0}' + + # cmd = f'./{executable_name} -m "maps/{map_file_name}" ' \ + # f'-a "scen/{scen_file_name}" ' \ + # f'-k {num_agents} ' \ + # f'-o {output_file_name} -p {split_strategy} --screen 0 --seed {seed} ' \ + # f'--cutoffTime={timeout_seconds} --verbosity 0 --heuristic {1 if use_heuristic else 0} ' \ + # f'--focalW {focal_w} ' \ + # f'--childPrefBudget {child_pref_budget} ' \ + # f'--maxChildPrefOptions {max_child_pref_options} ' \ + # f'--prefer_f_cardinal {1 if prefer_f_cardinal else 0} ' \ + # f'--prefer_goal_conflicts {1 if prefer_goal_conflicts else 0}' + + print(time.strftime('%Y-%m-%dT%H:%M:%S: ') + cmd) + start_time = time.time() + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + if e.returncode == 1: # Solution not found - probably due to a timeout. No point in adding more agents + pass + elif e.returncode == 137: # Killed by the OOM killer + with open(output_file_path, 'a') as f: + f.write('-2,' + '=NA(),' * 23 + f'{time.time() - start_time},=NA(),{mem_limit},same as above,/scen/{scen_file_name},{num_agents}\n') + else: + raise