diff --git a/.gitignore b/.gitignore
index d31fe08c68..5264d4d9da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,9 @@ __pycache__/
/experiments/issue*/data/
/misc/.tox/
/misc/autodoc/downward-xmlrpc.secret
+*.cc.o
+/src/bin
+/src/CMakeFiles
+/src/Makefile
+/src/search/Makefile
+/src/search/CMakeFiles
\ No newline at end of file
diff --git a/src/javascript/README.md b/src/javascript/search/README.md
similarity index 100%
rename from src/javascript/README.md
rename to src/javascript/search/README.md
diff --git a/src/javascript/configure_cmake.sh b/src/javascript/search/configure_cmake.sh
similarity index 97%
rename from src/javascript/configure_cmake.sh
rename to src/javascript/search/configure_cmake.sh
index f52a247f96..d1de955ad4 100755
--- a/src/javascript/configure_cmake.sh
+++ b/src/javascript/search/configure_cmake.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-cd "$(dirname "$0")/../"
+cd "$(dirname "$0")/../../"
if [ $# -eq 0 ]; then
echo "No arguments provided"
echo "Run either with "
diff --git a/src/javascript/index.html b/src/javascript/search/index.html
similarity index 100%
rename from src/javascript/index.html
rename to src/javascript/search/index.html
diff --git a/src/javascript/usage_example.js b/src/javascript/search/usage_example.js
similarity index 96%
rename from src/javascript/usage_example.js
rename to src/javascript/search/usage_example.js
index 7f4b66a907..ef6baea3de 100644
--- a/src/javascript/usage_example.js
+++ b/src/javascript/search/usage_example.js
@@ -18,7 +18,7 @@ downwardscript.onload = function() {
let stream = FS.open('output.sas', 'w+');
FS.write(stream, data, 0, data.length, 0);
FS.close(stream);
- console.log('wrote to output.sas');
+ console.log('wrote to' + PATH_TO_INPUT);
}
}
inputXHR.send();
diff --git a/src/javascript/translator/README.md b/src/javascript/translator/README.md
new file mode 100644
index 0000000000..d4c219d170
--- /dev/null
+++ b/src/javascript/translator/README.md
@@ -0,0 +1,18 @@
+# Translator in the browser
+We offer to build the translator as python wheel.
+This is done by executing the `setup.py` script.
+**Pyodide** can then be used to load the python wheel in the browser.
+
+## How to execute the translator using pyodide
+The following steps are exemplarily presented in `usage_example.html` and `usage_example.js`.
+- in your javascript file, load the current pyodide version
+- load pyodide's *micropip* package
+- load the *.pddl* files into two javascript strings
+- use micropip to install the translator-wheel created by `setup.py` into the python environment
+- store the pddl strings to the browser's virtual filesystem using python
+- import and execute the translator in python:
+ ```Python
+from translator.translate import run
+run(["domain.pddl", "problem.pddl", "--sas-file", "output.sas"])
+ ```
+- load the result from the python environment to the javascript environment for further use
\ No newline at end of file
diff --git a/src/javascript/translator/exampleFiles/domain.pddl b/src/javascript/translator/exampleFiles/domain.pddl
new file mode 100644
index 0000000000..4fa3829928
--- /dev/null
+++ b/src/javascript/translator/exampleFiles/domain.pddl
@@ -0,0 +1,34 @@
+(define (domain gripper-strips)
+ (:predicates (room ?r)
+ (ball ?b)
+ (gripper ?g)
+ (at-robby ?r)
+ (at ?b ?r)
+ (free ?g)
+ (carry ?o ?g))
+
+ (:action move
+ :parameters (?from ?to)
+ :precondition (and (room ?from) (room ?to) (at-robby ?from))
+ :effect (and (at-robby ?to)
+ (not (at-robby ?from))))
+
+
+
+ (:action pick
+ :parameters (?obj ?room ?gripper)
+ :precondition (and (ball ?obj) (room ?room) (gripper ?gripper)
+ (at ?obj ?room) (at-robby ?room) (free ?gripper))
+ :effect (and (carry ?obj ?gripper)
+ (not (at ?obj ?room))
+ (not (free ?gripper))))
+
+
+ (:action drop
+ :parameters (?obj ?room ?gripper)
+ :precondition (and (ball ?obj) (room ?room) (gripper ?gripper)
+ (carry ?obj ?gripper) (at-robby ?room))
+ :effect (and (at ?obj ?room)
+ (free ?gripper)
+ (not (carry ?obj ?gripper)))))
+
diff --git a/src/javascript/translator/exampleFiles/problem.pddl b/src/javascript/translator/exampleFiles/problem.pddl
new file mode 100644
index 0000000000..c518fed473
--- /dev/null
+++ b/src/javascript/translator/exampleFiles/problem.pddl
@@ -0,0 +1,22 @@
+(define (problem strips-gripper-x-1)
+ (:domain gripper-strips)
+ (:objects rooma roomb ball4 ball3 ball2 ball1 left right)
+ (:init (room rooma)
+ (room roomb)
+ (ball ball4)
+ (ball ball3)
+ (ball ball2)
+ (ball ball1)
+ (at-robby rooma)
+ (free left)
+ (free right)
+ (at ball4 rooma)
+ (at ball3 rooma)
+ (at ball2 rooma)
+ (at ball1 rooma)
+ (gripper left)
+ (gripper right))
+ (:goal (and (at ball4 roomb)
+ (at ball3 roomb)
+ (at ball2 roomb)
+ (at ball1 roomb))))
\ No newline at end of file
diff --git a/src/javascript/translator/setup.py b/src/javascript/translator/setup.py
new file mode 100755
index 0000000000..12b1689a9f
--- /dev/null
+++ b/src/javascript/translator/setup.py
@@ -0,0 +1,35 @@
+#! /usr/bin/python3
+import os
+import sys
+
+sys.argv.append("bdist_wheel")
+
+home = os.getcwd()
+
+# go to translator root folder
+os.chdir("../../translate")
+
+# setup for bdist_wheel
+from setuptools import setup, find_packages
+setup(
+ name = 'translator',
+ version='1.0',
+ # Use one of the below approach to define package and/or module names:
+
+ #if there are only handful of modules placed in root directory, and no packages/directories exist then can use below syntax
+# packages=[''], #have to import modules directly in code after installing this wheel, like import mod2 (respective file name in this case is mod2.py) - no direct use of distribution name while importing
+
+ #can list down each package names - no need to keep __init__.py under packages / directories
+ packages=['translator', 'translator/pddl', 'translator/pddl_parser'], #importing is like: from package1 import mod2, or import package1.mod2 as m2
+
+ # this approach automatically finds out all directories (packages) - those must contain a file named __init__.py (can be empty)
+ # packages=find_packages(), #include/exclude arguments take * as wildcard, . for any sub-package names
+)
+
+# move files to home directory and remove unnecessary files
+import shutil
+shutil.rmtree('build/')
+shutil.rmtree('translator.egg-info/')
+for file in os.listdir('dist'):
+ shutil.move('dist/' + file, os.path.join(home, file))
+shutil.rmtree('dist/')
diff --git a/src/javascript/translator/usage_example.html b/src/javascript/translator/usage_example.html
new file mode 100644
index 0000000000..c7ce496551
--- /dev/null
+++ b/src/javascript/translator/usage_example.html
@@ -0,0 +1,43 @@
+
+
+
+
+ Translator
+
+
+
+
+Translator in the browser
+Usage example
+
+
+
+
+
+
+
Domain:
+ ...
+
+
+
Problem:
+ ...
+
+
+
+
+
+
+
+
+
+
diff --git a/src/javascript/translator/usage_example.js b/src/javascript/translator/usage_example.js
new file mode 100644
index 0000000000..31120b75d4
--- /dev/null
+++ b/src/javascript/translator/usage_example.js
@@ -0,0 +1,85 @@
+var domain;
+var problem;
+var pyodide;
+
+let loadFile = async (path) => {
+ return new Promise(resolve => {
+ console.log(`requesting ${path}`)
+ let request = new XMLHttpRequest()
+ request.open('GET', path)
+ request.responseType = 'text';
+ request.onload = () => {
+ result = request.responseText;
+ console.log(result);
+ resolve(result);
+ }
+ request.send();
+ })
+}
+
+let pythonCode_InstallTranslator = `
+import micropip
+micropip.install("translator-1.0-py3-none-any.whl")
+`
+let pythonCode_storePDDLToFilesystem = `
+import js
+problem = js.window.problem
+domain = js.window.domain
+
+with open('domain.pddl', 'w') as file:
+ file.write(domain)
+
+with open('problem.pddl', 'w') as file:
+ file.write(problem)
+
+`
+let pythonCode_runTranslator = `
+from translator.translate import run
+run(["domain.pddl", "problem.pddl", "--sas-file", "output.sas"])
+
+with open('output.sas', 'r') as file:
+ output_sas = file.read()
+`
+
+let showSourceFiles = (domain, problem) => {
+ domainHolder = document.querySelector("div[class='domain']").children[1];
+ problemHolder = document.querySelector("div[class='problem']").children[1];
+ domainHolder.innerText = domain;
+ problemHolder.innerText = problem;
+}
+
+let main = async () => {
+ let installTranslator = async () => {
+ return pyodide.runPython(pythonCode_InstallTranslator)
+ }
+ let storePDDLToFilesystem = async () => {
+ return pyodide.runPython(pythonCode_storePDDLToFilesystem)
+ }
+ let runTranslator = async () => {
+ return pyodide.runPython(pythonCode_runTranslator)
+ }
+
+ // load pyodide
+ pyodide = await loadPyodide({ indexURL : "https://cdn.jsdelivr.net/pyodide/v0.18.1/full/" });
+
+ // load micropip
+ await pyodide.loadPackage('micropip');
+
+ // load pddl files
+ domain = await loadFile('exampleFiles/domain.pddl');
+ problem = await loadFile('exampleFiles/problem.pddl');
+ showSourceFiles(domain, problem);
+
+ // run python
+ await installTranslator();
+ await storePDDLToFilesystem();
+ runTranslator();
+
+ // read result
+ let r = pyodide.globals.output_sas;
+ console.log(r);
+ resultHolder = document.querySelector("div[class='result']").children[0];
+ resultHolder.innerText = r;
+};
+
+main();
diff --git a/src/translate/translate.py b/src/translate/translate.py
index fa96cc74d1..4fb21fcddd 100755
--- a/src/translate/translate.py
+++ b/src/translate/translate.py
@@ -1,735 +1,4 @@
-#! /usr/bin/env python3
-
-
-import os
+#! /usr/bin/python3
+from translator.translate import run
import sys
-import traceback
-
-def python_version_supported():
- return sys.version_info >= (3, 6)
-
-if not python_version_supported():
- sys.exit("Error: Translator only supports Python >= 3.6.")
-
-
-from collections import defaultdict
-from copy import deepcopy
-from itertools import product
-
-import axiom_rules
-import fact_groups
-import instantiate
-import normalize
-import options
-import pddl
-import pddl_parser
-import sas_tasks
-import signal
-import simplify
-import timers
-import tools
-import variable_order
-
-# TODO: The translator may generate trivial derived variables which are always
-# true, for example if there ia a derived predicate in the input that only
-# depends on (non-derived) variables which are detected as always true.
-# Such a situation was encountered in the PSR-STRIPS-DerivedPredicates domain.
-# Such "always-true" variables should best be compiled away, but it is
-# not clear what the best place to do this should be. Similar
-# simplifications might be possible elsewhere, for example if a
-# derived variable is synonymous with another variable (derived or
-# non-derived).
-
-DEBUG = False
-
-
-## For a full list of exit codes, please see driver/returncodes.py. Here,
-## we only list codes that are used by the translator component of the planner.
-TRANSLATE_OUT_OF_MEMORY = 20
-TRANSLATE_OUT_OF_TIME = 21
-
-simplified_effect_condition_counter = 0
-added_implied_precondition_counter = 0
-
-
-def strips_to_sas_dictionary(groups, assert_partial):
- dictionary = {}
- for var_no, group in enumerate(groups):
- for val_no, atom in enumerate(group):
- dictionary.setdefault(atom, []).append((var_no, val_no))
- if assert_partial:
- assert all(len(sas_pairs) == 1
- for sas_pairs in dictionary.values())
- return [len(group) + 1 for group in groups], dictionary
-
-
-def translate_strips_conditions_aux(conditions, dictionary, ranges):
- condition = {}
- for fact in conditions:
- if fact.negated:
- # we handle negative conditions later, because then we
- # can recognize when the negative condition is already
- # ensured by a positive condition
- continue
- for var, val in dictionary.get(fact, ()):
- # The default () here is a bit of a hack. For goals (but
- # only for goals!), we can get static facts here. They
- # cannot be statically false (that would have been
- # detected earlier), and hence they are statically true
- # and don't need to be translated.
- # TODO: This would not be necessary if we dealt with goals
- # in the same way we deal with operator preconditions etc.,
- # where static facts disappear during grounding. So change
- # this when the goal code is refactored (also below). (**)
- if (condition.get(var) is not None and
- val not in condition.get(var)):
- # Conflicting conditions on this variable: Operator invalid.
- return None
- condition[var] = {val}
-
- def number_of_values(var_vals_pair):
- var, vals = var_vals_pair
- return len(vals)
-
- for fact in conditions:
- if fact.negated:
- ## Note: here we use a different solution than in Sec. 10.6.4
- ## of the thesis. Compare the last sentences of the third
- ## paragraph of the section.
- ## We could do what is written there. As a test case,
- ## consider Airport ADL tasks with only one airport, where
- ## (occupied ?x) variables are encoded in a single variable,
- ## and conditions like (not (occupied ?x)) do occur in
- ## preconditions.
- ## However, here we avoid introducing new derived predicates
- ## by treat the negative precondition as a disjunctive
- ## precondition and expanding it by "multiplying out" the
- ## possibilities. This can lead to an exponential blow-up so
- ## it would be nice to choose the behaviour as an option.
- done = False
- new_condition = {}
- atom = pddl.Atom(fact.predicate, fact.args) # force positive
- for var, val in dictionary.get(atom, ()):
- # see comment (**) above
- poss_vals = set(range(ranges[var]))
- poss_vals.remove(val)
-
- if condition.get(var) is None:
- assert new_condition.get(var) is None
- new_condition[var] = poss_vals
- else:
- # constrain existing condition on var
- prev_possible_vals = condition.get(var)
- done = True
- prev_possible_vals.intersection_update(poss_vals)
- if len(prev_possible_vals) == 0:
- # Conflicting conditions on this variable:
- # Operator invalid.
- return None
-
- if not done and len(new_condition) != 0:
- # we did not enforce the negative condition by constraining
- # an existing condition on one of the variables representing
- # this atom. So we need to introduce a new condition:
- # We can select any from new_condition and currently prefer the
- # smallest one.
- candidates = sorted(new_condition.items(), key=number_of_values)
- var, vals = candidates[0]
- condition[var] = vals
-
- def multiply_out(condition): # destroys the input
- sorted_conds = sorted(condition.items(), key=number_of_values)
- flat_conds = [{}]
- for var, vals in sorted_conds:
- if len(vals) == 1:
- for cond in flat_conds:
- cond[var] = vals.pop() # destroys the input here
- else:
- new_conds = []
- for cond in flat_conds:
- for val in vals:
- new_cond = deepcopy(cond)
- new_cond[var] = val
- new_conds.append(new_cond)
- flat_conds = new_conds
- return flat_conds
-
- return multiply_out(condition)
-
-
-def translate_strips_conditions(conditions, dictionary, ranges,
- mutex_dict, mutex_ranges):
- if not conditions:
- return [{}] # Quick exit for common case.
-
- # Check if the condition violates any mutexes.
- if translate_strips_conditions_aux(conditions, mutex_dict,
- mutex_ranges) is None:
- return None
-
- return translate_strips_conditions_aux(conditions, dictionary, ranges)
-
-
-def translate_strips_operator(operator, dictionary, ranges, mutex_dict,
- mutex_ranges, implied_facts):
- conditions = translate_strips_conditions(operator.precondition, dictionary,
- ranges, mutex_dict, mutex_ranges)
- if conditions is None:
- return []
- sas_operators = []
- for condition in conditions:
- op = translate_strips_operator_aux(operator, dictionary, ranges,
- mutex_dict, mutex_ranges,
- implied_facts, condition)
- if op is not None:
- sas_operators.append(op)
- return sas_operators
-
-
-def negate_and_translate_condition(condition, dictionary, ranges, mutex_dict,
- mutex_ranges):
- # condition is a list of lists of literals (DNF)
- # the result is the negation of the condition in DNF in
- # finite-domain representation (a list of dictionaries that map
- # variables to values)
- negation = []
- if [] in condition: # condition always satisfied
- return None # negation unsatisfiable
- for combination in product(*condition):
- cond = [l.negate() for l in combination]
- cond = translate_strips_conditions(cond, dictionary, ranges,
- mutex_dict, mutex_ranges)
- if cond is not None:
- negation.extend(cond)
- return negation if negation else None
-
-
-def translate_strips_operator_aux(operator, dictionary, ranges, mutex_dict,
- mutex_ranges, implied_facts, condition):
-
- # collect all add effects
- effects_by_variable = defaultdict(lambda: defaultdict(list))
- # effects_by_variables: var -> val -> list(FDR conditions)
- add_conds_by_variable = defaultdict(list)
- for conditions, fact in operator.add_effects:
- eff_condition_list = translate_strips_conditions(conditions, dictionary,
- ranges, mutex_dict,
- mutex_ranges)
- if eff_condition_list is None: # Impossible condition for this effect.
- continue
- for var, val in dictionary[fact]:
- effects_by_variable[var][val].extend(eff_condition_list)
- add_conds_by_variable[var].append(conditions)
-
- # collect all del effects
- del_effects_by_variable = defaultdict(lambda: defaultdict(list))
- for conditions, fact in operator.del_effects:
- eff_condition_list = translate_strips_conditions(conditions, dictionary,
- ranges, mutex_dict,
- mutex_ranges)
- if eff_condition_list is None: # Impossible condition for this effect.
- continue
- for var, val in dictionary[fact]:
- del_effects_by_variable[var][val].extend(eff_condition_list)
-
- # add effect var=none_of_those for all del effects with the additional
- # condition that the deleted value has been true and no add effect triggers
- for var in del_effects_by_variable:
- no_add_effect_condition = negate_and_translate_condition(
- add_conds_by_variable[var], dictionary, ranges, mutex_dict,
- mutex_ranges)
- if no_add_effect_condition is None: # there is always an add effect
- continue
- none_of_those = ranges[var] - 1
- for val, conds in del_effects_by_variable[var].items():
- for cond in conds:
- # add guard
- if var in cond and cond[var] != val:
- continue # condition inconsistent with deleted atom
- cond[var] = val
- # add condition that no add effect triggers
- for no_add_cond in no_add_effect_condition:
- new_cond = dict(cond)
- # This is a rather expensive step. We try every no_add_cond
- # with every condition of the delete effect and discard the
- # overal combination if it is unsatisfiable. Since
- # no_add_effect_condition is precomputed it can contain many
- # no_add_conds in which a certain literal occurs. So if cond
- # plus the literal is already unsatisfiable, we still try
- # all these combinations. A possible optimization would be
- # to re-compute no_add_effect_condition for every delete
- # effect and to unfold the product(*condition) in
- # negate_and_translate_condition to allow an early break.
- for cvar, cval in no_add_cond.items():
- if cvar in new_cond and new_cond[cvar] != cval:
- # the del effect condition plus the deleted atom
- # imply that some add effect on the variable
- # triggers
- break
- new_cond[cvar] = cval
- else:
- effects_by_variable[var][none_of_those].append(new_cond)
-
- return build_sas_operator(operator.name, condition, effects_by_variable,
- operator.cost, ranges, implied_facts)
-
-
-def build_sas_operator(name, condition, effects_by_variable, cost, ranges,
- implied_facts):
- if options.add_implied_preconditions:
- implied_precondition = set()
- for fact in condition.items():
- implied_precondition.update(implied_facts[fact])
- prevail_and_pre = dict(condition)
- pre_post = []
- for var, effects_on_var in effects_by_variable.items():
- orig_pre = condition.get(var, -1)
- added_effect = False
- for post, eff_conditions in effects_on_var.items():
- pre = orig_pre
- # if the effect does not change the variable value, we ignore it
- if pre == post:
- continue
- eff_condition_lists = [sorted(eff_cond.items())
- for eff_cond in eff_conditions]
- if ranges[var] == 2:
- # Apply simplifications for binary variables.
- if prune_stupid_effect_conditions(var, post,
- eff_condition_lists,
- effects_on_var):
- global simplified_effect_condition_counter
- simplified_effect_condition_counter += 1
- if (options.add_implied_preconditions and pre == -1 and
- (var, 1 - post) in implied_precondition):
- global added_implied_precondition_counter
- added_implied_precondition_counter += 1
- pre = 1 - post
- for eff_condition in eff_condition_lists:
- # we do not need to represent a precondition as effect condition
- # and we do not want to keep an effect whose condition contradicts
- # a pre- or prevail condition
- filtered_eff_condition = []
- eff_condition_contradicts_precondition = False
- for variable, value in eff_condition:
- if variable in prevail_and_pre:
- if prevail_and_pre[variable] != value:
- eff_condition_contradicts_precondition = True
- break
- else:
- filtered_eff_condition.append((variable, value))
- if eff_condition_contradicts_precondition:
- continue
- pre_post.append((var, pre, post, filtered_eff_condition))
- added_effect = True
- if added_effect:
- # the condition on var is not a prevail condition but a
- # precondition, so we remove it from the prevail condition
- condition.pop(var, -1)
- if not pre_post: # operator is noop
- return None
- prevail = list(condition.items())
- return sas_tasks.SASOperator(name, prevail, pre_post, cost)
-
-
-def prune_stupid_effect_conditions(var, val, conditions, effects_on_var):
- ## (IF THEN := ) is a conditional effect.
- ## is guaranteed to be a binary variable.
- ## is in DNF representation (list of lists).
- ##
- ## We simplify by applying two rules:
- ## 1. Conditions of the form "var = dualval" where var is the
- ## effect variable and dualval != val can be omitted.
- ## (If var != dualval, then var == val because it is binary,
- ## which means that in such situations the effect is a no-op.)
- ## The condition can only be omitted if there is no effect
- ## producing dualval (see issue736).
- ## 2. If conditions contains any empty list, it is equivalent
- ## to True and we can remove all other disjuncts.
- ##
- ## returns True when anything was changed
- if conditions == [[]]:
- return False # Quick exit for common case.
- assert val in [0, 1]
- dual_val = 1 - val
- dual_fact = (var, dual_val)
- if dual_val in effects_on_var:
- return False
- simplified = False
- for condition in conditions:
- # Apply rule 1.
- while dual_fact in condition:
- # print "*** Removing dual condition"
- simplified = True
- condition.remove(dual_fact)
- # Apply rule 2.
- if not condition:
- conditions[:] = [[]]
- simplified = True
- break
- return simplified
-
-
-def translate_strips_axiom(axiom, dictionary, ranges, mutex_dict, mutex_ranges):
- conditions = translate_strips_conditions(axiom.condition, dictionary,
- ranges, mutex_dict, mutex_ranges)
- if conditions is None:
- return []
- if axiom.effect.negated:
- [(var, _)] = dictionary[axiom.effect.positive()]
- effect = (var, ranges[var] - 1)
- else:
- [effect] = dictionary[axiom.effect]
- axioms = []
- for condition in conditions:
- axioms.append(sas_tasks.SASAxiom(condition.items(), effect))
- return axioms
-
-
-def translate_strips_operators(actions, strips_to_sas, ranges, mutex_dict,
- mutex_ranges, implied_facts):
- result = []
- for action in actions:
- sas_ops = translate_strips_operator(action, strips_to_sas, ranges,
- mutex_dict, mutex_ranges,
- implied_facts)
- result.extend(sas_ops)
- return result
-
-
-def translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict,
- mutex_ranges):
- result = []
- for axiom in axioms:
- sas_axioms = translate_strips_axiom(axiom, strips_to_sas, ranges,
- mutex_dict, mutex_ranges)
- result.extend(sas_axioms)
- return result
-
-
-def dump_task(init, goals, actions, axioms, axiom_layer_dict):
- old_stdout = sys.stdout
- with open("output.dump", "w") as dump_file:
- sys.stdout = dump_file
- print("Initial state")
- for atom in init:
- print(atom)
- print()
- print("Goals")
- for goal in goals:
- print(goal)
- for action in actions:
- print()
- print("Action")
- action.dump()
- for axiom in axioms:
- print()
- print("Axiom")
- axiom.dump()
- print()
- print("Axiom layers")
- for atom, layer in axiom_layer_dict.items():
- print("%s: layer %d" % (atom, layer))
- sys.stdout = old_stdout
-
-
-def translate_task(strips_to_sas, ranges, translation_key,
- mutex_dict, mutex_ranges, mutex_key,
- init, goals,
- actions, axioms, metric, implied_facts):
- with timers.timing("Processing axioms", block=True):
- axioms, axiom_layer_dict = axiom_rules.handle_axioms(actions, axioms, goals,
- options.layer_strategy)
-
- if options.dump_task:
- # Remove init facts that don't occur in strips_to_sas: they're constant.
- nonconstant_init = filter(strips_to_sas.get, init)
- dump_task(nonconstant_init, goals, actions, axioms, axiom_layer_dict)
-
- init_values = [rang - 1 for rang in ranges]
- # Closed World Assumption: Initialize to "range - 1" == Nothing.
- for fact in init:
- pairs = strips_to_sas.get(fact, []) # empty for static init facts
- for var, val in pairs:
- curr_val = init_values[var]
- if curr_val != ranges[var] - 1 and curr_val != val:
- assert False, "Inconsistent init facts! [fact = %s]" % fact
- init_values[var] = val
- init = sas_tasks.SASInit(init_values)
-
- goal_dict_list = translate_strips_conditions(goals, strips_to_sas, ranges,
- mutex_dict, mutex_ranges)
- if goal_dict_list is None:
- # "None" is a signal that the goal is unreachable because it
- # violates a mutex.
- return unsolvable_sas_task("Goal violates a mutex")
-
- assert len(goal_dict_list) == 1, "Negative goal not supported"
- ## we could substitute the negative goal literal in
- ## normalize.substitute_complicated_goal, using an axiom. We currently
- ## don't do this, because we don't run into this assertion, if the
- ## negative goal is part of finite domain variable with only two
- ## values, which is most of the time the case, and hence refrain from
- ## introducing axioms (that are not supported by all heuristics)
- goal_pairs = list(goal_dict_list[0].items())
- if not goal_pairs:
- return solvable_sas_task("Empty goal")
- goal = sas_tasks.SASGoal(goal_pairs)
-
- operators = translate_strips_operators(actions, strips_to_sas, ranges,
- mutex_dict, mutex_ranges,
- implied_facts)
- axioms = translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict,
- mutex_ranges)
-
- axiom_layers = [-1] * len(ranges)
- for atom, layer in axiom_layer_dict.items():
- assert layer >= 0
- [(var, val)] = strips_to_sas[atom]
- axiom_layers[var] = layer
- variables = sas_tasks.SASVariables(ranges, axiom_layers, translation_key)
- mutexes = [sas_tasks.SASMutexGroup(group) for group in mutex_key]
- return sas_tasks.SASTask(variables, mutexes, init, goal,
- operators, axioms, metric)
-
-
-def trivial_task(solvable):
- variables = sas_tasks.SASVariables(
- [2], [-1], [["Atom dummy(val1)", "Atom dummy(val2)"]])
- # We create no mutexes: the only possible mutex is between
- # dummy(val1) and dummy(val2), but the preprocessor would filter
- # it out anyway since it is trivial (only involves one
- # finite-domain variable).
- mutexes = []
- init = sas_tasks.SASInit([0])
- if solvable:
- goal_fact = (0, 0)
- else:
- goal_fact = (0, 1)
- goal = sas_tasks.SASGoal([goal_fact])
- operators = []
- axioms = []
- metric = True
- return sas_tasks.SASTask(variables, mutexes, init, goal,
- operators, axioms, metric)
-
-def solvable_sas_task(msg):
- print("%s! Generating solvable task..." % msg)
- return trivial_task(solvable=True)
-
-def unsolvable_sas_task(msg):
- print("%s! Generating unsolvable task..." % msg)
- return trivial_task(solvable=False)
-
-def pddl_to_sas(task):
- with timers.timing("Instantiating", block=True):
- (relaxed_reachable, atoms, actions, axioms,
- reachable_action_params) = instantiate.explore(task)
-
- if not relaxed_reachable:
- return unsolvable_sas_task("No relaxed solution")
-
- # HACK! Goals should be treated differently.
- if isinstance(task.goal, pddl.Conjunction):
- goal_list = task.goal.parts
- else:
- goal_list = [task.goal]
- for item in goal_list:
- assert isinstance(item, pddl.Literal)
-
- with timers.timing("Computing fact groups", block=True):
- groups, mutex_groups, translation_key = fact_groups.compute_groups(
- task, atoms, reachable_action_params)
-
- with timers.timing("Building STRIPS to SAS dictionary"):
- ranges, strips_to_sas = strips_to_sas_dictionary(
- groups, assert_partial=options.use_partial_encoding)
-
- with timers.timing("Building dictionary for full mutex groups"):
- mutex_ranges, mutex_dict = strips_to_sas_dictionary(
- mutex_groups, assert_partial=False)
-
- if options.add_implied_preconditions:
- with timers.timing("Building implied facts dictionary..."):
- implied_facts = build_implied_facts(strips_to_sas, groups,
- mutex_groups)
- else:
- implied_facts = {}
-
- with timers.timing("Building mutex information", block=True):
- if options.use_partial_encoding:
- mutex_key = build_mutex_key(strips_to_sas, mutex_groups)
- else:
- # With our current representation, emitting complete mutex
- # information for the full encoding can incur an
- # unacceptable (quadratic) blowup in the task representation
- # size. See issue771 for details.
- print("using full encoding: between-variable mutex information skipped.")
- mutex_key = []
-
- with timers.timing("Translating task", block=True):
- sas_task = translate_task(
- strips_to_sas, ranges, translation_key,
- mutex_dict, mutex_ranges, mutex_key,
- task.init, goal_list, actions, axioms, task.use_min_cost_metric,
- implied_facts)
-
- print("%d effect conditions simplified" %
- simplified_effect_condition_counter)
- print("%d implied preconditions added" %
- added_implied_precondition_counter)
-
- if options.filter_unreachable_facts:
- with timers.timing("Detecting unreachable propositions", block=True):
- try:
- simplify.filter_unreachable_propositions(sas_task)
- except simplify.Impossible:
- return unsolvable_sas_task("Simplified to trivially false goal")
- except simplify.TriviallySolvable:
- return solvable_sas_task("Simplified to empty goal")
-
- if options.reorder_variables or options.filter_unimportant_vars:
- with timers.timing("Reordering and filtering variables", block=True):
- variable_order.find_and_apply_variable_order(
- sas_task, options.reorder_variables,
- options.filter_unimportant_vars)
-
- return sas_task
-
-
-def build_mutex_key(strips_to_sas, groups):
- assert options.use_partial_encoding
- group_keys = []
- for group in groups:
- group_key = []
- for fact in group:
- represented_by = strips_to_sas.get(fact)
- if represented_by:
- assert len(represented_by) == 1
- group_key.append(represented_by[0])
- else:
- print("not in strips_to_sas, left out:", fact)
- group_keys.append(group_key)
- return group_keys
-
-
-def build_implied_facts(strips_to_sas, groups, mutex_groups):
- ## Compute a dictionary mapping facts (FDR pairs) to lists of FDR
- ## pairs implied by that fact. In other words, in all states
- ## containing p, all pairs in implied_facts[p] must also be true.
- ##
- ## There are two simple cases where a pair p implies a pair q != p
- ## in our FDR encodings:
- ## 1. p and q encode the same fact
- ## 2. p encodes a STRIPS proposition X, q encodes a STRIPS literal
- ## "not Y", and X and Y are mutex.
- ##
- ## The first case cannot arise when we use partial encodings, and
- ## when we use full encodings, I don't think it would give us any
- ## additional information to exploit in the operator translation,
- ## so we only use the second case.
- ##
- ## Note that for a pair q to encode a fact "not Y", Y must form a
- ## fact group of size 1. We call such propositions Y "lonely".
-
- ## In the first step, we compute a dictionary mapping each lonely
- ## proposition to its variable number.
- lonely_propositions = {}
- for var_no, group in enumerate(groups):
- if len(group) == 1:
- lonely_prop = group[0]
- assert strips_to_sas[lonely_prop] == [(var_no, 0)]
- lonely_propositions[lonely_prop] = var_no
-
- ## Then we compute implied facts as follows: for each mutex group,
- ## check if prop is lonely (then and only then "not prop" has a
- ## representation as an FDR pair). In that case, all other facts
- ## in this mutex group imply "not prop".
- implied_facts = defaultdict(list)
- for mutex_group in mutex_groups:
- for prop in mutex_group:
- prop_var = lonely_propositions.get(prop)
- if prop_var is not None:
- prop_is_false = (prop_var, 1)
- for other_prop in mutex_group:
- if other_prop is not prop:
- for other_fact in strips_to_sas[other_prop]:
- implied_facts[other_fact].append(prop_is_false)
-
- return implied_facts
-
-
-def dump_statistics(sas_task):
- print("Translator variables: %d" % len(sas_task.variables.ranges))
- print("Translator derived variables: %d" %
- len([layer for layer in sas_task.variables.axiom_layers
- if layer >= 0]))
- print("Translator facts: %d" % sum(sas_task.variables.ranges))
- print("Translator goal facts: %d" % len(sas_task.goal.pairs))
- print("Translator mutex groups: %d" % len(sas_task.mutexes))
- print("Translator total mutex groups size: %d" %
- sum(mutex.get_encoding_size() for mutex in sas_task.mutexes))
- print("Translator operators: %d" % len(sas_task.operators))
- print("Translator axioms: %d" % len(sas_task.axioms))
- print("Translator task size: %d" % sas_task.get_encoding_size())
- try:
- peak_memory = tools.get_peak_memory_in_kb()
- except Warning as warning:
- print(warning)
- else:
- print("Translator peak memory: %d KB" % peak_memory)
-
-
-def main():
- timer = timers.Timer()
- with timers.timing("Parsing", True):
- task = pddl_parser.open(
- domain_filename=options.domain, task_filename=options.task)
-
- with timers.timing("Normalizing task"):
- normalize.normalize(task)
-
- if options.generate_relaxed_task:
- # Remove delete effects.
- for action in task.actions:
- for index, effect in reversed(list(enumerate(action.effects))):
- if effect.literal.negated:
- del action.effects[index]
-
- sas_task = pddl_to_sas(task)
- dump_statistics(sas_task)
-
- with timers.timing("Writing output"):
- with open(options.sas_file, "w") as output_file:
- sas_task.output(output_file)
- print("Done! %s" % timer)
-
-
-def handle_sigxcpu(signum, stackframe):
- print()
- print("Translator hit the time limit")
- # sys.exit() is not safe to be called from within signal handlers, but
- # os._exit() is.
- os._exit(TRANSLATE_OUT_OF_TIME)
-
-
-if __name__ == "__main__":
- try:
- signal.signal(signal.SIGXCPU, handle_sigxcpu)
- except AttributeError:
- print("Warning! SIGXCPU is not available on your platform. "
- "This means that the planner cannot be gracefully terminated "
- "when using a time limit, which, however, is probably "
- "supported on your platform anyway.")
- try:
- # Reserve about 10 MB of emergency memory.
- # https://stackoverflow.com/questions/19469608/
- emergency_memory = b"x" * 10**7
- main()
- except MemoryError:
- del emergency_memory
- print()
- print("Translator ran out of memory, traceback:")
- print("=" * 79)
- traceback.print_exc(file=sys.stdout)
- print("=" * 79)
- sys.exit(TRANSLATE_OUT_OF_MEMORY)
+run(sys.argv[1:])
diff --git a/src/translate/tests/__init__.py b/src/translate/translator/__init__.py
similarity index 100%
rename from src/translate/tests/__init__.py
rename to src/translate/translator/__init__.py
diff --git a/src/translate/axiom_rules.py b/src/translate/translator/axiom_rules.py
similarity index 99%
rename from src/translate/axiom_rules.py
rename to src/translate/translator/axiom_rules.py
index 036ee93596..ce45908090 100644
--- a/src/translate/axiom_rules.py
+++ b/src/translate/translator/axiom_rules.py
@@ -1,7 +1,7 @@
-import options
-import pddl
-import sccs
-import timers
+from . import options
+from . import pddl
+from . import sccs
+from . import timers
from collections import defaultdict
from itertools import chain
diff --git a/src/translate/build_model.py b/src/translate/translator/build_model.py
similarity index 99%
rename from src/translate/build_model.py
rename to src/translate/translator/build_model.py
index 2fed5a9eb8..5bb6685636 100755
--- a/src/translate/build_model.py
+++ b/src/translate/translator/build_model.py
@@ -4,8 +4,8 @@
import sys
import itertools
-import pddl
-import timers
+from . import pddl
+from . import timers
from functools import reduce
def convert_rules(prog):
diff --git a/src/translate/constraints.py b/src/translate/translator/constraints.py
similarity index 100%
rename from src/translate/constraints.py
rename to src/translate/translator/constraints.py
diff --git a/src/translate/fact_groups.py b/src/translate/translator/fact_groups.py
similarity index 98%
rename from src/translate/fact_groups.py
rename to src/translate/translator/fact_groups.py
index 3925e58bdd..bf28f761e3 100644
--- a/src/translate/fact_groups.py
+++ b/src/translate/translator/fact_groups.py
@@ -1,7 +1,7 @@
-import invariant_finder
-import options
-import pddl
-import timers
+from . import invariant_finder
+from . import options
+from . import pddl
+from . import timers
DEBUG = False
diff --git a/src/translate/graph.py b/src/translate/translator/graph.py
similarity index 100%
rename from src/translate/graph.py
rename to src/translate/translator/graph.py
diff --git a/src/translate/greedy_join.py b/src/translate/translator/greedy_join.py
similarity index 98%
rename from src/translate/greedy_join.py
rename to src/translate/translator/greedy_join.py
index bb548d75c4..d8919eaadd 100644
--- a/src/translate/greedy_join.py
+++ b/src/translate/translator/greedy_join.py
@@ -1,7 +1,7 @@
import sys
-import pddl
-import pddl_to_prolog
+from . import pddl
+from . import pddl_to_prolog
class OccurrencesTracker:
"""Keeps track of the number of times each variable appears
diff --git a/src/translate/instantiate.py b/src/translate/translator/instantiate.py
similarity index 97%
rename from src/translate/instantiate.py
rename to src/translate/translator/instantiate.py
index 01904a8521..a4cc8f4b6f 100755
--- a/src/translate/instantiate.py
+++ b/src/translate/translator/instantiate.py
@@ -3,10 +3,10 @@
from collections import defaultdict
-import build_model
-import pddl_to_prolog
-import pddl
-import timers
+from . import build_model
+from . import pddl_to_prolog
+from . import pddl
+from . import timers
def get_fluent_facts(task, model):
fluent_predicates = set()
diff --git a/src/translate/invariant_finder.py b/src/translate/translator/invariant_finder.py
similarity index 98%
rename from src/translate/invariant_finder.py
rename to src/translate/translator/invariant_finder.py
index e167f12bfb..9af2d25fa7 100755
--- a/src/translate/invariant_finder.py
+++ b/src/translate/translator/invariant_finder.py
@@ -5,10 +5,10 @@
import itertools
import time
-import invariants
-import options
-import pddl
-import timers
+from . import invariants
+from . import options
+from . import pddl
+from . import timers
class BalanceChecker:
def __init__(self, task, reachable_action_params):
diff --git a/src/translate/invariants.py b/src/translate/translator/invariants.py
similarity index 99%
rename from src/translate/invariants.py
rename to src/translate/translator/invariants.py
index 81cb8adda8..7710b95f66 100644
--- a/src/translate/invariants.py
+++ b/src/translate/translator/invariants.py
@@ -1,9 +1,9 @@
from collections import defaultdict
import itertools
-import constraints
-import pddl
-import tools
+from . import constraints
+from . import pddl
+from . import tools
# Notes:
# All parts of an invariant always use all non-counted variables
diff --git a/src/translate/normalize.py b/src/translate/translator/normalize.py
similarity index 99%
rename from src/translate/normalize.py
rename to src/translate/translator/normalize.py
index 375dc67e9c..78718670a6 100755
--- a/src/translate/normalize.py
+++ b/src/translate/translator/normalize.py
@@ -2,7 +2,7 @@
import copy
-import pddl
+from . import pddl
class ConditionProxy:
def clone_owner(self):
diff --git a/src/translate/options.py b/src/translate/translator/options.py
similarity index 96%
rename from src/translate/options.py
rename to src/translate/translator/options.py
index 091e8e1c83..fa0c42566b 100644
--- a/src/translate/options.py
+++ b/src/translate/translator/options.py
@@ -2,7 +2,7 @@
import sys
-def parse_args():
+def parse_args(args):
argparser = argparse.ArgumentParser()
argparser.add_argument(
"domain", help="path to domain pddl file")
@@ -57,7 +57,7 @@ def parse_args():
help="How to assign layers to derived variables. 'min' attempts to put as "
"many variables into the same layer as possible, while 'max' puts each variable "
"into its own layer unless it is part of a cycle.")
- return argparser.parse_args()
+ return argparser.parse_args(args)
def copy_args_to_module(args):
@@ -66,9 +66,7 @@ def copy_args_to_module(args):
module_dict[key] = value
-def setup():
- args = parse_args()
+def setup(args):
+ args = parse_args(args)
copy_args_to_module(args)
-
-setup()
diff --git a/src/translate/pddl/__init__.py b/src/translate/translator/pddl/__init__.py
similarity index 100%
rename from src/translate/pddl/__init__.py
rename to src/translate/translator/pddl/__init__.py
diff --git a/src/translate/pddl/actions.py b/src/translate/translator/pddl/actions.py
similarity index 100%
rename from src/translate/pddl/actions.py
rename to src/translate/translator/pddl/actions.py
diff --git a/src/translate/pddl/axioms.py b/src/translate/translator/pddl/axioms.py
similarity index 100%
rename from src/translate/pddl/axioms.py
rename to src/translate/translator/pddl/axioms.py
diff --git a/src/translate/pddl/conditions.py b/src/translate/translator/pddl/conditions.py
similarity index 100%
rename from src/translate/pddl/conditions.py
rename to src/translate/translator/pddl/conditions.py
diff --git a/src/translate/pddl/effects.py b/src/translate/translator/pddl/effects.py
similarity index 100%
rename from src/translate/pddl/effects.py
rename to src/translate/translator/pddl/effects.py
diff --git a/src/translate/pddl/f_expression.py b/src/translate/translator/pddl/f_expression.py
similarity index 100%
rename from src/translate/pddl/f_expression.py
rename to src/translate/translator/pddl/f_expression.py
diff --git a/src/translate/pddl/functions.py b/src/translate/translator/pddl/functions.py
similarity index 100%
rename from src/translate/pddl/functions.py
rename to src/translate/translator/pddl/functions.py
diff --git a/src/translate/pddl/pddl_types.py b/src/translate/translator/pddl/pddl_types.py
similarity index 100%
rename from src/translate/pddl/pddl_types.py
rename to src/translate/translator/pddl/pddl_types.py
diff --git a/src/translate/pddl/predicates.py b/src/translate/translator/pddl/predicates.py
similarity index 100%
rename from src/translate/pddl/predicates.py
rename to src/translate/translator/pddl/predicates.py
diff --git a/src/translate/pddl/tasks.py b/src/translate/translator/pddl/tasks.py
similarity index 100%
rename from src/translate/pddl/tasks.py
rename to src/translate/translator/pddl/tasks.py
diff --git a/src/translate/pddl_parser/__init__.py b/src/translate/translator/pddl_parser/__init__.py
similarity index 100%
rename from src/translate/pddl_parser/__init__.py
rename to src/translate/translator/pddl_parser/__init__.py
diff --git a/src/translate/pddl_parser/lisp_parser.py b/src/translate/translator/pddl_parser/lisp_parser.py
similarity index 100%
rename from src/translate/pddl_parser/lisp_parser.py
rename to src/translate/translator/pddl_parser/lisp_parser.py
diff --git a/src/translate/pddl_parser/parsing_functions.py b/src/translate/translator/pddl_parser/parsing_functions.py
similarity index 99%
rename from src/translate/pddl_parser/parsing_functions.py
rename to src/translate/translator/pddl_parser/parsing_functions.py
index fdc0b9dc80..56c94e201e 100644
--- a/src/translate/pddl_parser/parsing_functions.py
+++ b/src/translate/translator/pddl_parser/parsing_functions.py
@@ -1,7 +1,7 @@
import sys
-import graph
-import pddl
+from .. import graph
+from .. import pddl
def parse_typed_list(alist, only_variables=False,
diff --git a/src/translate/pddl_parser/pddl_file.py b/src/translate/translator/pddl_parser/pddl_file.py
similarity index 98%
rename from src/translate/pddl_parser/pddl_file.py
rename to src/translate/translator/pddl_parser/pddl_file.py
index 294a0b4d85..c344f2dc72 100644
--- a/src/translate/pddl_parser/pddl_file.py
+++ b/src/translate/translator/pddl_parser/pddl_file.py
@@ -1,4 +1,4 @@
-import options
+from .. import options
from . import lisp_parser
from . import parsing_functions
diff --git a/src/translate/pddl_to_prolog.py b/src/translate/translator/pddl_to_prolog.py
similarity index 98%
rename from src/translate/pddl_to_prolog.py
rename to src/translate/translator/pddl_to_prolog.py
index fee70f7c3b..db1edb2b70 100755
--- a/src/translate/pddl_to_prolog.py
+++ b/src/translate/translator/pddl_to_prolog.py
@@ -3,9 +3,10 @@
import itertools
-import normalize
-import pddl
-import timers
+from . import normalize
+from . import pddl
+from . import timers
+
class PrologProgram:
def __init__(self):
@@ -36,7 +37,7 @@ def normalize(self):
self.split_duplicate_arguments()
self.convert_trivial_rules()
def split_rules(self):
- import split_rules
+ from . import split_rules
# Splits rules whose conditions can be partitioned in such a way that
# the parts have disjoint variable sets, then split n-ary joins into
# a number of binary joins, introducing new pseudo-predicates for the
diff --git a/src/translate/regression-tests/README b/src/translate/translator/regression-tests/README
similarity index 100%
rename from src/translate/regression-tests/README
rename to src/translate/translator/regression-tests/README
diff --git a/src/translate/regression-tests/issue34-domain.pddl b/src/translate/translator/regression-tests/issue34-domain.pddl
similarity index 100%
rename from src/translate/regression-tests/issue34-domain.pddl
rename to src/translate/translator/regression-tests/issue34-domain.pddl
diff --git a/src/translate/regression-tests/issue34-problem.pddl b/src/translate/translator/regression-tests/issue34-problem.pddl
similarity index 100%
rename from src/translate/regression-tests/issue34-problem.pddl
rename to src/translate/translator/regression-tests/issue34-problem.pddl
diff --git a/src/translate/regression-tests/issue405-domain.pddl b/src/translate/translator/regression-tests/issue405-domain.pddl
similarity index 100%
rename from src/translate/regression-tests/issue405-domain.pddl
rename to src/translate/translator/regression-tests/issue405-domain.pddl
diff --git a/src/translate/regression-tests/issue405-problem.pddl b/src/translate/translator/regression-tests/issue405-problem.pddl
similarity index 100%
rename from src/translate/regression-tests/issue405-problem.pddl
rename to src/translate/translator/regression-tests/issue405-problem.pddl
diff --git a/src/translate/regression-tests/issue49-falsegoal-domain.pddl b/src/translate/translator/regression-tests/issue49-falsegoal-domain.pddl
similarity index 100%
rename from src/translate/regression-tests/issue49-falsegoal-domain.pddl
rename to src/translate/translator/regression-tests/issue49-falsegoal-domain.pddl
diff --git a/src/translate/regression-tests/issue49-falsegoal-problem.pddl b/src/translate/translator/regression-tests/issue49-falsegoal-problem.pddl
similarity index 100%
rename from src/translate/regression-tests/issue49-falsegoal-problem.pddl
rename to src/translate/translator/regression-tests/issue49-falsegoal-problem.pddl
diff --git a/src/translate/regression-tests/issue49-orig-domain.pddl b/src/translate/translator/regression-tests/issue49-orig-domain.pddl
similarity index 100%
rename from src/translate/regression-tests/issue49-orig-domain.pddl
rename to src/translate/translator/regression-tests/issue49-orig-domain.pddl
diff --git a/src/translate/regression-tests/issue49-orig-problem.pddl b/src/translate/translator/regression-tests/issue49-orig-problem.pddl
similarity index 100%
rename from src/translate/regression-tests/issue49-orig-problem.pddl
rename to src/translate/translator/regression-tests/issue49-orig-problem.pddl
diff --git a/src/translate/regression-tests/issue49-truegoal-domain.pddl b/src/translate/translator/regression-tests/issue49-truegoal-domain.pddl
similarity index 100%
rename from src/translate/regression-tests/issue49-truegoal-domain.pddl
rename to src/translate/translator/regression-tests/issue49-truegoal-domain.pddl
diff --git a/src/translate/regression-tests/issue49-truegoal-problem.pddl b/src/translate/translator/regression-tests/issue49-truegoal-problem.pddl
similarity index 100%
rename from src/translate/regression-tests/issue49-truegoal-problem.pddl
rename to src/translate/translator/regression-tests/issue49-truegoal-problem.pddl
diff --git a/src/translate/regression-tests/issue58-domain.pddl b/src/translate/translator/regression-tests/issue58-domain.pddl
similarity index 100%
rename from src/translate/regression-tests/issue58-domain.pddl
rename to src/translate/translator/regression-tests/issue58-domain.pddl
diff --git a/src/translate/regression-tests/issue58-problem.pddl b/src/translate/translator/regression-tests/issue58-problem.pddl
similarity index 100%
rename from src/translate/regression-tests/issue58-problem.pddl
rename to src/translate/translator/regression-tests/issue58-problem.pddl
diff --git a/src/translate/regression-tests/issue7-domain.pddl b/src/translate/translator/regression-tests/issue7-domain.pddl
similarity index 100%
rename from src/translate/regression-tests/issue7-domain.pddl
rename to src/translate/translator/regression-tests/issue7-domain.pddl
diff --git a/src/translate/regression-tests/issue7-problem.pddl b/src/translate/translator/regression-tests/issue7-problem.pddl
similarity index 100%
rename from src/translate/regression-tests/issue7-problem.pddl
rename to src/translate/translator/regression-tests/issue7-problem.pddl
diff --git a/src/translate/regression-tests/issue73-domain.pddl b/src/translate/translator/regression-tests/issue73-domain.pddl
similarity index 100%
rename from src/translate/regression-tests/issue73-domain.pddl
rename to src/translate/translator/regression-tests/issue73-domain.pddl
diff --git a/src/translate/regression-tests/issue73-problem.pddl b/src/translate/translator/regression-tests/issue73-problem.pddl
similarity index 100%
rename from src/translate/regression-tests/issue73-problem.pddl
rename to src/translate/translator/regression-tests/issue73-problem.pddl
diff --git a/src/translate/sas_tasks.py b/src/translate/translator/sas_tasks.py
similarity index 100%
rename from src/translate/sas_tasks.py
rename to src/translate/translator/sas_tasks.py
diff --git a/src/translate/sccs.py b/src/translate/translator/sccs.py
similarity index 100%
rename from src/translate/sccs.py
rename to src/translate/translator/sccs.py
diff --git a/src/translate/simplify.py b/src/translate/translator/simplify.py
similarity index 99%
rename from src/translate/simplify.py
rename to src/translate/translator/simplify.py
index 43236814b0..6057960045 100644
--- a/src/translate/simplify.py
+++ b/src/translate/translator/simplify.py
@@ -26,7 +26,7 @@
from collections import defaultdict
from itertools import count
-import sas_tasks
+from . import sas_tasks
DEBUG = False
diff --git a/src/translate/split_rules.py b/src/translate/translator/split_rules.py
similarity index 84%
rename from src/translate/split_rules.py
rename to src/translate/translator/split_rules.py
index 4a0d3e1f39..fdabfb60f6 100644
--- a/src/translate/split_rules.py
+++ b/src/translate/translator/split_rules.py
@@ -2,14 +2,14 @@
# components" (where to conditions are related if they share a variabe) into
# several rules, one for each connected component and one high-level rule.
-from pddl_to_prolog import Rule, get_variables
-import graph
-import greedy_join
-import pddl
+from . import pddl_to_prolog
+from . import graph
+from . import greedy_join
+from . import pddl
def get_connected_conditions(conditions):
agraph = graph.Graph(conditions)
- var_to_conditions = {var: [] for var in get_variables(conditions)}
+ var_to_conditions = {var: [] for var in pddl_to_prolog.get_variables(conditions)}
for cond in conditions:
for var in cond.args:
if var[0] == "?":
@@ -23,9 +23,9 @@ def get_connected_conditions(conditions):
def project_rule(rule, conditions, name_generator):
predicate = next(name_generator)
- effect_variables = set(rule.effect.args) & get_variables(conditions)
+ effect_variables = set(rule.effect.args) & pddl_to_prolog.get_variables(conditions)
effect = pddl.Atom(predicate, sorted(effect_variables))
- projected_rule = Rule(conditions, effect)
+ projected_rule = pddl_to_prolog.Rule(conditions, effect)
return projected_rule
def split_rule(rule, name_generator):
@@ -53,7 +53,7 @@ def split_rule(rule, name_generator):
conditions = ([proj_rule.effect for proj_rule in projected_rules] +
trivial_conditions)
- combining_rule = Rule(conditions, rule.effect)
+ combining_rule = pddl_to_prolog.Rule(conditions, rule.effect)
if len(conditions) >= 2:
combining_rule.type = "product"
else:
diff --git a/src/translate/translator/tests/__init__.py b/src/translate/translator/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/src/translate/tests/test_normalization.py b/src/translate/translator/tests/test_normalization.py
similarity index 100%
rename from src/translate/tests/test_normalization.py
rename to src/translate/translator/tests/test_normalization.py
diff --git a/src/translate/tests/test_scripts.py b/src/translate/translator/tests/test_scripts.py
similarity index 100%
rename from src/translate/tests/test_scripts.py
rename to src/translate/translator/tests/test_scripts.py
diff --git a/src/translate/timers.py b/src/translate/translator/timers.py
similarity index 100%
rename from src/translate/timers.py
rename to src/translate/translator/timers.py
diff --git a/src/translate/tools.py b/src/translate/translator/tools.py
similarity index 100%
rename from src/translate/tools.py
rename to src/translate/translator/tools.py
diff --git a/src/translate/translator/translate.py b/src/translate/translator/translate.py
new file mode 100755
index 0000000000..87c165c27f
--- /dev/null
+++ b/src/translate/translator/translate.py
@@ -0,0 +1,736 @@
+#! /usr/bin/env python3
+
+
+import os
+import sys
+import traceback
+
+def python_version_supported():
+ return sys.version_info >= (3, 6)
+
+if not python_version_supported():
+ sys.exit("Error: Translator only supports Python >= 3.6.")
+
+
+from collections import defaultdict
+from copy import deepcopy
+from itertools import product
+
+from . import axiom_rules
+from . import fact_groups
+from . import instantiate
+from . import normalize
+from . import options
+from . import pddl
+from . import pddl_parser
+from . import sas_tasks
+import signal
+from . import simplify
+from . import timers
+from . import tools
+from . import variable_order
+
+# TODO: The translator may generate trivial derived variables which are always
+# true, for example if there ia a derived predicate in the input that only
+# depends on (non-derived) variables which are detected as always true.
+# Such a situation was encountered in the PSR-STRIPS-DerivedPredicates domain.
+# Such "always-true" variables should best be compiled away, but it is
+# not clear what the best place to do this should be. Similar
+# simplifications might be possible elsewhere, for example if a
+# derived variable is synonymous with another variable (derived or
+# non-derived).
+
+DEBUG = False
+
+
+## For a full list of exit codes, please see driver/returncodes.py. Here,
+## we only list codes that are used by the translator component of the planner.
+TRANSLATE_OUT_OF_MEMORY = 20
+TRANSLATE_OUT_OF_TIME = 21
+
+simplified_effect_condition_counter = 0
+added_implied_precondition_counter = 0
+
+
+def strips_to_sas_dictionary(groups, assert_partial):
+ dictionary = {}
+ for var_no, group in enumerate(groups):
+ for val_no, atom in enumerate(group):
+ dictionary.setdefault(atom, []).append((var_no, val_no))
+ if assert_partial:
+ assert all(len(sas_pairs) == 1
+ for sas_pairs in dictionary.values())
+ return [len(group) + 1 for group in groups], dictionary
+
+
+def translate_strips_conditions_aux(conditions, dictionary, ranges):
+ condition = {}
+ for fact in conditions:
+ if fact.negated:
+ # we handle negative conditions later, because then we
+ # can recognize when the negative condition is already
+ # ensured by a positive condition
+ continue
+ for var, val in dictionary.get(fact, ()):
+ # The default () here is a bit of a hack. For goals (but
+ # only for goals!), we can get static facts here. They
+ # cannot be statically false (that would have been
+ # detected earlier), and hence they are statically true
+ # and don't need to be translated.
+ # TODO: This would not be necessary if we dealt with goals
+ # in the same way we deal with operator preconditions etc.,
+ # where static facts disappear during grounding. So change
+ # this when the goal code is refactored (also below). (**)
+ if (condition.get(var) is not None and
+ val not in condition.get(var)):
+ # Conflicting conditions on this variable: Operator invalid.
+ return None
+ condition[var] = {val}
+
+ def number_of_values(var_vals_pair):
+ var, vals = var_vals_pair
+ return len(vals)
+
+ for fact in conditions:
+ if fact.negated:
+ ## Note: here we use a different solution than in Sec. 10.6.4
+ ## of the thesis. Compare the last sentences of the third
+ ## paragraph of the section.
+ ## We could do what is written there. As a test case,
+ ## consider Airport ADL tasks with only one airport, where
+ ## (occupied ?x) variables are encoded in a single variable,
+ ## and conditions like (not (occupied ?x)) do occur in
+ ## preconditions.
+ ## However, here we avoid introducing new derived predicates
+ ## by treat the negative precondition as a disjunctive
+ ## precondition and expanding it by "multiplying out" the
+ ## possibilities. This can lead to an exponential blow-up so
+ ## it would be nice to choose the behaviour as an option.
+ done = False
+ new_condition = {}
+ atom = pddl.Atom(fact.predicate, fact.args) # force positive
+ for var, val in dictionary.get(atom, ()):
+ # see comment (**) above
+ poss_vals = set(range(ranges[var]))
+ poss_vals.remove(val)
+
+ if condition.get(var) is None:
+ assert new_condition.get(var) is None
+ new_condition[var] = poss_vals
+ else:
+ # constrain existing condition on var
+ prev_possible_vals = condition.get(var)
+ done = True
+ prev_possible_vals.intersection_update(poss_vals)
+ if len(prev_possible_vals) == 0:
+ # Conflicting conditions on this variable:
+ # Operator invalid.
+ return None
+
+ if not done and len(new_condition) != 0:
+ # we did not enforce the negative condition by constraining
+ # an existing condition on one of the variables representing
+ # this atom. So we need to introduce a new condition:
+ # We can select any from new_condition and currently prefer the
+ # smallest one.
+ candidates = sorted(new_condition.items(), key=number_of_values)
+ var, vals = candidates[0]
+ condition[var] = vals
+
+ def multiply_out(condition): # destroys the input
+ sorted_conds = sorted(condition.items(), key=number_of_values)
+ flat_conds = [{}]
+ for var, vals in sorted_conds:
+ if len(vals) == 1:
+ for cond in flat_conds:
+ cond[var] = vals.pop() # destroys the input here
+ else:
+ new_conds = []
+ for cond in flat_conds:
+ for val in vals:
+ new_cond = deepcopy(cond)
+ new_cond[var] = val
+ new_conds.append(new_cond)
+ flat_conds = new_conds
+ return flat_conds
+
+ return multiply_out(condition)
+
+
+def translate_strips_conditions(conditions, dictionary, ranges,
+ mutex_dict, mutex_ranges):
+ if not conditions:
+ return [{}] # Quick exit for common case.
+
+ # Check if the condition violates any mutexes.
+ if translate_strips_conditions_aux(conditions, mutex_dict,
+ mutex_ranges) is None:
+ return None
+
+ return translate_strips_conditions_aux(conditions, dictionary, ranges)
+
+
+def translate_strips_operator(operator, dictionary, ranges, mutex_dict,
+ mutex_ranges, implied_facts):
+ conditions = translate_strips_conditions(operator.precondition, dictionary,
+ ranges, mutex_dict, mutex_ranges)
+ if conditions is None:
+ return []
+ sas_operators = []
+ for condition in conditions:
+ op = translate_strips_operator_aux(operator, dictionary, ranges,
+ mutex_dict, mutex_ranges,
+ implied_facts, condition)
+ if op is not None:
+ sas_operators.append(op)
+ return sas_operators
+
+
+def negate_and_translate_condition(condition, dictionary, ranges, mutex_dict,
+ mutex_ranges):
+ # condition is a list of lists of literals (DNF)
+ # the result is the negation of the condition in DNF in
+ # finite-domain representation (a list of dictionaries that map
+ # variables to values)
+ negation = []
+ if [] in condition: # condition always satisfied
+ return None # negation unsatisfiable
+ for combination in product(*condition):
+ cond = [l.negate() for l in combination]
+ cond = translate_strips_conditions(cond, dictionary, ranges,
+ mutex_dict, mutex_ranges)
+ if cond is not None:
+ negation.extend(cond)
+ return negation if negation else None
+
+
+def translate_strips_operator_aux(operator, dictionary, ranges, mutex_dict,
+ mutex_ranges, implied_facts, condition):
+
+ # collect all add effects
+ effects_by_variable = defaultdict(lambda: defaultdict(list))
+ # effects_by_variables: var -> val -> list(FDR conditions)
+ add_conds_by_variable = defaultdict(list)
+ for conditions, fact in operator.add_effects:
+ eff_condition_list = translate_strips_conditions(conditions, dictionary,
+ ranges, mutex_dict,
+ mutex_ranges)
+ if eff_condition_list is None: # Impossible condition for this effect.
+ continue
+ for var, val in dictionary[fact]:
+ effects_by_variable[var][val].extend(eff_condition_list)
+ add_conds_by_variable[var].append(conditions)
+
+ # collect all del effects
+ del_effects_by_variable = defaultdict(lambda: defaultdict(list))
+ for conditions, fact in operator.del_effects:
+ eff_condition_list = translate_strips_conditions(conditions, dictionary,
+ ranges, mutex_dict,
+ mutex_ranges)
+ if eff_condition_list is None: # Impossible condition for this effect.
+ continue
+ for var, val in dictionary[fact]:
+ del_effects_by_variable[var][val].extend(eff_condition_list)
+
+ # add effect var=none_of_those for all del effects with the additional
+ # condition that the deleted value has been true and no add effect triggers
+ for var in del_effects_by_variable:
+ no_add_effect_condition = negate_and_translate_condition(
+ add_conds_by_variable[var], dictionary, ranges, mutex_dict,
+ mutex_ranges)
+ if no_add_effect_condition is None: # there is always an add effect
+ continue
+ none_of_those = ranges[var] - 1
+ for val, conds in del_effects_by_variable[var].items():
+ for cond in conds:
+ # add guard
+ if var in cond and cond[var] != val:
+ continue # condition inconsistent with deleted atom
+ cond[var] = val
+ # add condition that no add effect triggers
+ for no_add_cond in no_add_effect_condition:
+ new_cond = dict(cond)
+ # This is a rather expensive step. We try every no_add_cond
+ # with every condition of the delete effect and discard the
+ # overal combination if it is unsatisfiable. Since
+ # no_add_effect_condition is precomputed it can contain many
+ # no_add_conds in which a certain literal occurs. So if cond
+ # plus the literal is already unsatisfiable, we still try
+ # all these combinations. A possible optimization would be
+ # to re-compute no_add_effect_condition for every delete
+ # effect and to unfold the product(*condition) in
+ # negate_and_translate_condition to allow an early break.
+ for cvar, cval in no_add_cond.items():
+ if cvar in new_cond and new_cond[cvar] != cval:
+ # the del effect condition plus the deleted atom
+ # imply that some add effect on the variable
+ # triggers
+ break
+ new_cond[cvar] = cval
+ else:
+ effects_by_variable[var][none_of_those].append(new_cond)
+
+ return build_sas_operator(operator.name, condition, effects_by_variable,
+ operator.cost, ranges, implied_facts)
+
+
+def build_sas_operator(name, condition, effects_by_variable, cost, ranges,
+ implied_facts):
+ if options.add_implied_preconditions:
+ implied_precondition = set()
+ for fact in condition.items():
+ implied_precondition.update(implied_facts[fact])
+ prevail_and_pre = dict(condition)
+ pre_post = []
+ for var, effects_on_var in effects_by_variable.items():
+ orig_pre = condition.get(var, -1)
+ added_effect = False
+ for post, eff_conditions in effects_on_var.items():
+ pre = orig_pre
+ # if the effect does not change the variable value, we ignore it
+ if pre == post:
+ continue
+ eff_condition_lists = [sorted(eff_cond.items())
+ for eff_cond in eff_conditions]
+ if ranges[var] == 2:
+ # Apply simplifications for binary variables.
+ if prune_stupid_effect_conditions(var, post,
+ eff_condition_lists,
+ effects_on_var):
+ global simplified_effect_condition_counter
+ simplified_effect_condition_counter += 1
+ if (options.add_implied_preconditions and pre == -1 and
+ (var, 1 - post) in implied_precondition):
+ global added_implied_precondition_counter
+ added_implied_precondition_counter += 1
+ pre = 1 - post
+ for eff_condition in eff_condition_lists:
+ # we do not need to represent a precondition as effect condition
+ # and we do not want to keep an effect whose condition contradicts
+ # a pre- or prevail condition
+ filtered_eff_condition = []
+ eff_condition_contradicts_precondition = False
+ for variable, value in eff_condition:
+ if variable in prevail_and_pre:
+ if prevail_and_pre[variable] != value:
+ eff_condition_contradicts_precondition = True
+ break
+ else:
+ filtered_eff_condition.append((variable, value))
+ if eff_condition_contradicts_precondition:
+ continue
+ pre_post.append((var, pre, post, filtered_eff_condition))
+ added_effect = True
+ if added_effect:
+ # the condition on var is not a prevail condition but a
+ # precondition, so we remove it from the prevail condition
+ condition.pop(var, -1)
+ if not pre_post: # operator is noop
+ return None
+ prevail = list(condition.items())
+ return sas_tasks.SASOperator(name, prevail, pre_post, cost)
+
+
+def prune_stupid_effect_conditions(var, val, conditions, effects_on_var):
+ ## (IF THEN := ) is a conditional effect.
+ ## is guaranteed to be a binary variable.
+ ## is in DNF representation (list of lists).
+ ##
+ ## We simplify by applying two rules:
+ ## 1. Conditions of the form "var = dualval" where var is the
+ ## effect variable and dualval != val can be omitted.
+ ## (If var != dualval, then var == val because it is binary,
+ ## which means that in such situations the effect is a no-op.)
+ ## The condition can only be omitted if there is no effect
+ ## producing dualval (see issue736).
+ ## 2. If conditions contains any empty list, it is equivalent
+ ## to True and we can remove all other disjuncts.
+ ##
+ ## returns True when anything was changed
+ if conditions == [[]]:
+ return False # Quick exit for common case.
+ assert val in [0, 1]
+ dual_val = 1 - val
+ dual_fact = (var, dual_val)
+ if dual_val in effects_on_var:
+ return False
+ simplified = False
+ for condition in conditions:
+ # Apply rule 1.
+ while dual_fact in condition:
+ # print "*** Removing dual condition"
+ simplified = True
+ condition.remove(dual_fact)
+ # Apply rule 2.
+ if not condition:
+ conditions[:] = [[]]
+ simplified = True
+ break
+ return simplified
+
+
+def translate_strips_axiom(axiom, dictionary, ranges, mutex_dict, mutex_ranges):
+ conditions = translate_strips_conditions(axiom.condition, dictionary,
+ ranges, mutex_dict, mutex_ranges)
+ if conditions is None:
+ return []
+ if axiom.effect.negated:
+ [(var, _)] = dictionary[axiom.effect.positive()]
+ effect = (var, ranges[var] - 1)
+ else:
+ [effect] = dictionary[axiom.effect]
+ axioms = []
+ for condition in conditions:
+ axioms.append(sas_tasks.SASAxiom(condition.items(), effect))
+ return axioms
+
+
+def translate_strips_operators(actions, strips_to_sas, ranges, mutex_dict,
+ mutex_ranges, implied_facts):
+ result = []
+ for action in actions:
+ sas_ops = translate_strips_operator(action, strips_to_sas, ranges,
+ mutex_dict, mutex_ranges,
+ implied_facts)
+ result.extend(sas_ops)
+ return result
+
+
+def translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict,
+ mutex_ranges):
+ result = []
+ for axiom in axioms:
+ sas_axioms = translate_strips_axiom(axiom, strips_to_sas, ranges,
+ mutex_dict, mutex_ranges)
+ result.extend(sas_axioms)
+ return result
+
+
+def dump_task(init, goals, actions, axioms, axiom_layer_dict):
+ old_stdout = sys.stdout
+ with open("output.dump", "w") as dump_file:
+ sys.stdout = dump_file
+ print("Initial state")
+ for atom in init:
+ print(atom)
+ print()
+ print("Goals")
+ for goal in goals:
+ print(goal)
+ for action in actions:
+ print()
+ print("Action")
+ action.dump()
+ for axiom in axioms:
+ print()
+ print("Axiom")
+ axiom.dump()
+ print()
+ print("Axiom layers")
+ for atom, layer in axiom_layer_dict.items():
+ print("%s: layer %d" % (atom, layer))
+ sys.stdout = old_stdout
+
+
+def translate_task(strips_to_sas, ranges, translation_key,
+ mutex_dict, mutex_ranges, mutex_key,
+ init, goals,
+ actions, axioms, metric, implied_facts):
+ with timers.timing("Processing axioms", block=True):
+ axioms, axiom_layer_dict = axiom_rules.handle_axioms(actions, axioms, goals,
+ options.layer_strategy)
+
+ if options.dump_task:
+ # Remove init facts that don't occur in strips_to_sas: they're constant.
+ nonconstant_init = filter(strips_to_sas.get, init)
+ dump_task(nonconstant_init, goals, actions, axioms, axiom_layer_dict)
+
+ init_values = [rang - 1 for rang in ranges]
+ # Closed World Assumption: Initialize to "range - 1" == Nothing.
+ for fact in init:
+ pairs = strips_to_sas.get(fact, []) # empty for static init facts
+ for var, val in pairs:
+ curr_val = init_values[var]
+ if curr_val != ranges[var] - 1 and curr_val != val:
+ assert False, "Inconsistent init facts! [fact = %s]" % fact
+ init_values[var] = val
+ init = sas_tasks.SASInit(init_values)
+
+ goal_dict_list = translate_strips_conditions(goals, strips_to_sas, ranges,
+ mutex_dict, mutex_ranges)
+ if goal_dict_list is None:
+ # "None" is a signal that the goal is unreachable because it
+ # violates a mutex.
+ return unsolvable_sas_task("Goal violates a mutex")
+
+ assert len(goal_dict_list) == 1, "Negative goal not supported"
+ ## we could substitute the negative goal literal in
+ ## normalize.substitute_complicated_goal, using an axiom. We currently
+ ## don't do this, because we don't run into this assertion, if the
+ ## negative goal is part of finite domain variable with only two
+ ## values, which is most of the time the case, and hence refrain from
+ ## introducing axioms (that are not supported by all heuristics)
+ goal_pairs = list(goal_dict_list[0].items())
+ if not goal_pairs:
+ return solvable_sas_task("Empty goal")
+ goal = sas_tasks.SASGoal(goal_pairs)
+
+ operators = translate_strips_operators(actions, strips_to_sas, ranges,
+ mutex_dict, mutex_ranges,
+ implied_facts)
+ axioms = translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict,
+ mutex_ranges)
+
+ axiom_layers = [-1] * len(ranges)
+ for atom, layer in axiom_layer_dict.items():
+ assert layer >= 0
+ [(var, val)] = strips_to_sas[atom]
+ axiom_layers[var] = layer
+ variables = sas_tasks.SASVariables(ranges, axiom_layers, translation_key)
+ mutexes = [sas_tasks.SASMutexGroup(group) for group in mutex_key]
+ return sas_tasks.SASTask(variables, mutexes, init, goal,
+ operators, axioms, metric)
+
+
+def trivial_task(solvable):
+ variables = sas_tasks.SASVariables(
+ [2], [-1], [["Atom dummy(val1)", "Atom dummy(val2)"]])
+ # We create no mutexes: the only possible mutex is between
+ # dummy(val1) and dummy(val2), but the preprocessor would filter
+ # it out anyway since it is trivial (only involves one
+ # finite-domain variable).
+ mutexes = []
+ init = sas_tasks.SASInit([0])
+ if solvable:
+ goal_fact = (0, 0)
+ else:
+ goal_fact = (0, 1)
+ goal = sas_tasks.SASGoal([goal_fact])
+ operators = []
+ axioms = []
+ metric = True
+ return sas_tasks.SASTask(variables, mutexes, init, goal,
+ operators, axioms, metric)
+
+def solvable_sas_task(msg):
+ print("%s! Generating solvable task..." % msg)
+ return trivial_task(solvable=True)
+
+def unsolvable_sas_task(msg):
+ print("%s! Generating unsolvable task..." % msg)
+ return trivial_task(solvable=False)
+
+def pddl_to_sas(task):
+ with timers.timing("Instantiating", block=True):
+ (relaxed_reachable, atoms, actions, axioms,
+ reachable_action_params) = instantiate.explore(task)
+
+ if not relaxed_reachable:
+ return unsolvable_sas_task("No relaxed solution")
+
+ # HACK! Goals should be treated differently.
+ if isinstance(task.goal, pddl.Conjunction):
+ goal_list = task.goal.parts
+ else:
+ goal_list = [task.goal]
+ for item in goal_list:
+ assert isinstance(item, pddl.Literal)
+
+ with timers.timing("Computing fact groups", block=True):
+ groups, mutex_groups, translation_key = fact_groups.compute_groups(
+ task, atoms, reachable_action_params)
+
+ with timers.timing("Building STRIPS to SAS dictionary"):
+ ranges, strips_to_sas = strips_to_sas_dictionary(
+ groups, assert_partial=options.use_partial_encoding)
+
+ with timers.timing("Building dictionary for full mutex groups"):
+ mutex_ranges, mutex_dict = strips_to_sas_dictionary(
+ mutex_groups, assert_partial=False)
+
+ if options.add_implied_preconditions:
+ with timers.timing("Building implied facts dictionary..."):
+ implied_facts = build_implied_facts(strips_to_sas, groups,
+ mutex_groups)
+ else:
+ implied_facts = {}
+
+ with timers.timing("Building mutex information", block=True):
+ if options.use_partial_encoding:
+ mutex_key = build_mutex_key(strips_to_sas, mutex_groups)
+ else:
+ # With our current representation, emitting complete mutex
+ # information for the full encoding can incur an
+ # unacceptable (quadratic) blowup in the task representation
+ # size. See issue771 for details.
+ print("using full encoding: between-variable mutex information skipped.")
+ mutex_key = []
+
+ with timers.timing("Translating task", block=True):
+ sas_task = translate_task(
+ strips_to_sas, ranges, translation_key,
+ mutex_dict, mutex_ranges, mutex_key,
+ task.init, goal_list, actions, axioms, task.use_min_cost_metric,
+ implied_facts)
+
+ print("%d effect conditions simplified" %
+ simplified_effect_condition_counter)
+ print("%d implied preconditions added" %
+ added_implied_precondition_counter)
+
+ if options.filter_unreachable_facts:
+ with timers.timing("Detecting unreachable propositions", block=True):
+ try:
+ simplify.filter_unreachable_propositions(sas_task)
+ except simplify.Impossible:
+ return unsolvable_sas_task("Simplified to trivially false goal")
+ except simplify.TriviallySolvable:
+ return solvable_sas_task("Simplified to empty goal")
+
+ if options.reorder_variables or options.filter_unimportant_vars:
+ with timers.timing("Reordering and filtering variables", block=True):
+ variable_order.find_and_apply_variable_order(
+ sas_task, options.reorder_variables,
+ options.filter_unimportant_vars)
+
+ return sas_task
+
+
+def build_mutex_key(strips_to_sas, groups):
+ assert options.use_partial_encoding
+ group_keys = []
+ for group in groups:
+ group_key = []
+ for fact in group:
+ represented_by = strips_to_sas.get(fact)
+ if represented_by:
+ assert len(represented_by) == 1
+ group_key.append(represented_by[0])
+ else:
+ print("not in strips_to_sas, left out:", fact)
+ group_keys.append(group_key)
+ return group_keys
+
+
+def build_implied_facts(strips_to_sas, groups, mutex_groups):
+ ## Compute a dictionary mapping facts (FDR pairs) to lists of FDR
+ ## pairs implied by that fact. In other words, in all states
+ ## containing p, all pairs in implied_facts[p] must also be true.
+ ##
+ ## There are two simple cases where a pair p implies a pair q != p
+ ## in our FDR encodings:
+ ## 1. p and q encode the same fact
+ ## 2. p encodes a STRIPS proposition X, q encodes a STRIPS literal
+ ## "not Y", and X and Y are mutex.
+ ##
+ ## The first case cannot arise when we use partial encodings, and
+ ## when we use full encodings, I don't think it would give us any
+ ## additional information to exploit in the operator translation,
+ ## so we only use the second case.
+ ##
+ ## Note that for a pair q to encode a fact "not Y", Y must form a
+ ## fact group of size 1. We call such propositions Y "lonely".
+
+ ## In the first step, we compute a dictionary mapping each lonely
+ ## proposition to its variable number.
+ lonely_propositions = {}
+ for var_no, group in enumerate(groups):
+ if len(group) == 1:
+ lonely_prop = group[0]
+ assert strips_to_sas[lonely_prop] == [(var_no, 0)]
+ lonely_propositions[lonely_prop] = var_no
+
+ ## Then we compute implied facts as follows: for each mutex group,
+ ## check if prop is lonely (then and only then "not prop" has a
+ ## representation as an FDR pair). In that case, all other facts
+ ## in this mutex group imply "not prop".
+ implied_facts = defaultdict(list)
+ for mutex_group in mutex_groups:
+ for prop in mutex_group:
+ prop_var = lonely_propositions.get(prop)
+ if prop_var is not None:
+ prop_is_false = (prop_var, 1)
+ for other_prop in mutex_group:
+ if other_prop is not prop:
+ for other_fact in strips_to_sas[other_prop]:
+ implied_facts[other_fact].append(prop_is_false)
+
+ return implied_facts
+
+
+def dump_statistics(sas_task):
+ print("Translator variables: %d" % len(sas_task.variables.ranges))
+ print("Translator derived variables: %d" %
+ len([layer for layer in sas_task.variables.axiom_layers
+ if layer >= 0]))
+ print("Translator facts: %d" % sum(sas_task.variables.ranges))
+ print("Translator goal facts: %d" % len(sas_task.goal.pairs))
+ print("Translator mutex groups: %d" % len(sas_task.mutexes))
+ print("Translator total mutex groups size: %d" %
+ sum(mutex.get_encoding_size() for mutex in sas_task.mutexes))
+ print("Translator operators: %d" % len(sas_task.operators))
+ print("Translator axioms: %d" % len(sas_task.axioms))
+ print("Translator task size: %d" % sas_task.get_encoding_size())
+ try:
+ peak_memory = tools.get_peak_memory_in_kb()
+ except Warning as warning:
+ print(warning)
+ else:
+ print("Translator peak memory: %d KB" % peak_memory)
+
+
+def main(args):
+ options.setup(args)
+ timer = timers.Timer()
+ with timers.timing("Parsing", True):
+ task = pddl_parser.open(
+ domain_filename=options.domain, task_filename=options.task)
+
+ with timers.timing("Normalizing task"):
+ normalize.normalize(task)
+
+ if options.generate_relaxed_task:
+ # Remove delete effects.
+ for action in task.actions:
+ for index, effect in reversed(list(enumerate(action.effects))):
+ if effect.literal.negated:
+ del action.effects[index]
+
+ sas_task = pddl_to_sas(task)
+ dump_statistics(sas_task)
+
+ with timers.timing("Writing output"):
+ with open(options.sas_file, "w") as output_file:
+ sas_task.output(output_file)
+ print("Done! %s" % timer)
+
+
+def handle_sigxcpu(signum, stackframe):
+ print()
+ print("Translator hit the time limit")
+ # sys.exit() is not safe to be called from within signal handlers, but
+ # os._exit() is.
+ os._exit(TRANSLATE_OUT_OF_TIME)
+
+
+def run(args):
+ try:
+ signal.signal(signal.SIGXCPU, handle_sigxcpu)
+ except AttributeError:
+ print("Warning! SIGXCPU is not available on your platform. "
+ "This means that the planner cannot be gracefully terminated "
+ "when using a time limit, which, however, is probably "
+ "supported on your platform anyway.")
+ try:
+ # Reserve about 10 MB of emergency memory.
+ # https://stackoverflow.com/questions/19469608/
+ emergency_memory = b"x" * 10**7
+ main(args)
+ except MemoryError:
+ del emergency_memory
+ print()
+ print("Translator ran out of memory, traceback:")
+ print("=" * 79)
+ traceback.print_exc(file=sys.stdout)
+ print("=" * 79)
+ sys.exit(TRANSLATE_OUT_OF_MEMORY)
diff --git a/src/translate/variable_order.py b/src/translate/translator/variable_order.py
similarity index 99%
rename from src/translate/variable_order.py
rename to src/translate/translator/variable_order.py
index f4fa282a48..af3123c0d5 100644
--- a/src/translate/variable_order.py
+++ b/src/translate/translator/variable_order.py
@@ -2,7 +2,7 @@
from itertools import chain
import heapq
-import sccs
+from . import sccs
DEBUG = False