diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..027b8e38b1 --- /dev/null +++ b/.clang-format @@ -0,0 +1,89 @@ +# Tested with clang-format version 18.1.3. +BasedOnStyle: LLVM + +# We explicitly list all settings we thought about, whether LLVM sets them the +# same way or not. + +# =========================== Not Part of LLVM Style =========================== + +# Dedent 'public', 'private', 'protected' keywords. +AccessModifierOffset: -4 +# How to align arguments after '(', '<', and '['. +AlignAfterOpenBracket: AlwaysBreak +# Never align '\' characters. +AlignEscapedNewlines: DontAlign +# Never align inline comments with comments on following/preceeding lines. +AlignTrailingComments: + Kind: Never +# Force enum entries on multiple lines. +AllowShortEnumsOnASingleLine: false +# Force short functions across multiple lines. +AllowShortFunctionsOnASingleLine: None +# Force line breaks after template declarations. +# Less strict alternative for clang-format >= 19: +# BreakTemplateDeclarations: Leave +AlwaysBreakTemplateDeclarations: Yes +# Leave string literals as they are, this means that, e.g., doc strings may stay +# over-length, but setting 'true' here makes over-length doc strings ugly. +BreakStringLiterals: false +# Do not enforce rules about empty lines before, e.g., the 'public' keyword. +EmptyLineBeforeAccessModifier: Leave +# Do not add/fix comments indicating which namespace ends at the closing brace. +FixNamespaceComments: false +# Automatically group include blocks according to the categories defined below. +IncludeBlocks: Regroup +# Regex is not case sensitive by default. +IncludeCategories: + - Regex: '^"[^/]+"$' + Priority: 1 + - Regex: '^"\.\./[^/]+"$' + Priority: 2 + - Regex: '^"\.\./.+"$' + Priority: 3 +# The next regex covers cases such as '"tasks/cost_adapted_task.h"' (without the +# '../' prefix. + - Regex: '^"[^/]+/[^/]+"$' + Priority: 3 + - Regex: '^<.+>$' + Priority: 4 +IndentWidth: 4 +# Do not allow empty lines at start of block. +KeepEmptyLinesAtTheStartOfBlocks: false +# Put initializer list either on one line or one entry per line. +PackConstructorInitializers: NextLine +# Avoid line breaks between return type and function name. +PenaltyReturnTypeOnItsOwnLine: 120 +# This was not consistent in the code base, now we have to force one choice. +SpaceAfterTemplateKeyword: false +# Set C++ standard. +Standard: c++20 + +# ============================= Part of LLVM Style ============================= + +# If parameters/arguments fit on next line, put them on single line. +AllowAllParametersOfDeclarationOnNextLine: true +AllowAllArgumentsOnNextLine: true +# If parameters/arguments don't fit on the same line or the next, bin pack them. +BinPackParameters: true +BinPackArguments: true +# Break lines such that operators ('+', '<', ...) are at the end of the line. +BreakBeforeBinaryOperators: None +# Never add line break after an opening brace. +BreakBeforeBraces: Attach +# Put ternary operator symbols after line break. +BreakBeforeTernaryOperators: true +# Put colon of any initializer list on new line, even if not over-length. +BreakConstructorInitializers: BeforeColon +# Put colon of over-length inheritance declaration on new line. +BreakInheritanceList: BeforeColon +# Maximal number of characters in a line. +ColumnLimit: 80 +# Always indent by 4. +ContinuationIndentWidth: 4 +ConstructorInitializerIndentWidth: 4 +# Do not indent function names if return type fills line. +IndentWrappedFunctionNames: false +# Pointer indicator '*' is next to variable name instead of next to type. +PointerAlignment: Right +# Force space between class name and inheritance colon. +SpaceBeforeInheritanceColon: true diff --git a/.gitattributes b/.gitattributes index ea24645898..9706a14c80 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,4 +2,4 @@ /.gitattributes export-ignore /.github export-ignore /.gitignore export-ignore -/.uncrustify.cfg export-ignore +/.clang-format export-ignore diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 88ea3acac1..9adb914098 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -25,23 +25,12 @@ jobs: pip3 install tox sudo apt-get -y install clang-tidy-16 - - name: Install uncrustify + - name: Install clang-format run: | - # Set up uncrustify. - wget https://github.com/uncrustify/uncrustify/archive/uncrustify-0.72.0.tar.gz - tar xzf uncrustify-0.72.0.tar.gz - cd uncrustify-uncrustify-0.72.0 - mkdir build - cd build - cmake ../ - make -j2 - mv uncrustify ../../ - cd ../../ - rm -rf uncrustify-0.72.0.tar.gz uncrustify-uncrustify-0.72.0 + sudo apt-get -y install clang-format - name: Run code style tests run: | - export PATH="$(pwd):$PATH" # Add uncrustify to path. cd misc/ tox -e style,clang-tidy diff --git a/.uncrustify.cfg b/.uncrustify.cfg deleted file mode 100644 index ab316995b7..0000000000 --- a/.uncrustify.cfg +++ /dev/null @@ -1,200 +0,0 @@ -## uncrustify configuration file for the Fast Downward codebase. -## Tested with uncrustify 0.61. -## -## This is based on the original K&R config file of uncrustify 0.56, with -## some small changes and a lot of additional stuff. -## -## A convenient way of editing this file is via the "universalindentgui" -## program. If you do that, please re-add this comment afterwards, which -## universalindentgui strips. -## -## Both uncrustify and universalindentgui are standard Ubuntu packages. -## -## Some settings we might want to tighten later: -## nl_enum_leave_one_liners=true -## nl_func_leave_one_liners=true -## These could both be set to false, but maybe this should warrant -## some discussion first. Related to this, the following were changed -## from "force" to "remove" because of one-liners, so they might need -## to be changed, too: -## sp_inside_braces_enum=remove -## sp_inside_braces_struct=remove -## sp_inside_braces=remove - - -indent_align_string=false -indent_braces=false -indent_braces_no_func=false -indent_brace_parent=false -indent_namespace=false -indent_extern=false -indent_class=true -indent_class_colon=true -indent_constr_colon=true -indent_cpp_lambda_body=true -indent_else_if=false -indent_func_call_param=false -indent_func_def_param=false -indent_func_proto_param=false -indent_func_class_param=false -indent_func_ctor_var_param=false -indent_template_param=false -indent_func_param_double=false -indent_relative_single_line_comments=false -indent_col1_comment=false -indent_access_spec_body=false -indent_paren_nl=false -indent_comma_paren=false -indent_bool_paren=false -indent_square_nl=false -indent_preserve_sql=false -indent_align_assign=false -sp_balance_nested_parens=false -align_keep_tabs=false -align_with_tabs=false -align_on_tabstop=false -align_number_right=false -align_func_params=false -align_same_func_call_params=false -align_var_def_colon=false -align_var_def_attribute=false -align_var_def_inline=false -align_right_cmt_mix=false -align_on_operator=false -align_mix_var_proto=false -align_single_line_func=false -align_single_line_brace=false -align_nl_cont=false -align_left_shift=true -nl_cpp_lambda_leave_one_liners=true -nl_collapse_empty_body=false -nl_assign_leave_one_liners=true -nl_class_leave_one_liners=false -nl_enum_leave_one_liners=true -nl_getset_leave_one_liners=false -nl_func_leave_one_liners=true -nl_if_leave_one_liners=false -nl_multi_line_cond=false -nl_multi_line_define=false -nl_before_case=false -nl_after_case=true -nl_after_return=false -nl_after_semicolon=true -nl_after_brace_open=true -nl_after_brace_open_cmt=false -nl_after_vbrace_open=true -nl_after_brace_close=true -nl_define_macro=false -nl_squeeze_ifdef=false -nl_ds_struct_enum_cmt=false -nl_ds_struct_enum_close_brace=false -nl_create_if_one_liner=false -nl_create_for_one_liner=false -nl_create_while_one_liner=false -ls_for_split_full=false -ls_func_split_full=false -nl_after_multiline_comment=false -eat_blanks_after_open_brace=true -eat_blanks_before_close_brace=true -mod_pawn_semicolon=false -mod_full_paren_if_bool=false -mod_remove_extra_semicolon=true -mod_sort_import=false -mod_sort_using=false -mod_sort_include=false -mod_move_case_break=false -mod_remove_empty_return=false -cmt_indent_multi=false -cmt_c_group=false -cmt_c_nl_start=false -cmt_c_nl_end=false -cmt_cpp_group=false -cmt_cpp_nl_start=false -cmt_cpp_nl_end=false -cmt_cpp_to_c=false -cmt_star_cont=false -cmt_multi_check_last=false -cmt_insert_before_preproc=false -pp_indent_at_level=false -pp_region_indent_code=false -pp_if_indent_code=false -pp_define_at_level=false -input_tab_size=8 -output_tab_size=8 -indent_columns=4 -indent_var_def_blk=0 -indent_label=2 -nl_end_of_file_min=1 -newlines=lf -indent_with_tabs=0 -sp_arith=force -sp_assign=force -sp_enum_assign=force -sp_bool=force -sp_compare=force -sp_inside_paren=remove -sp_paren_paren=remove -sp_before_ptr_star=force -sp_between_ptr_star=remove -sp_angle_shift=remove -sp_permit_cpp11_shift=true -sp_after_ptr_star=remove -sp_before_byref=force -sp_after_byref=remove -sp_before_sparen=force -sp_inside_sparen=remove -sp_after_sparen=force -sp_before_semi=remove -sp_after_comma=force -sp_after_cast=remove -sp_sizeof_paren=remove -sp_inside_braces_enum=remove -sp_inside_braces_struct=remove -sp_inside_braces=remove -sp_func_proto_paren=remove -sp_func_def_paren=remove -sp_inside_fparen=remove -sp_fparen_brace=force -sp_func_call_paren=remove -sp_return_paren=remove -sp_else_brace=force -sp_brace_else=force -sp_template_angle=remove -nl_start_of_file=remove -nl_end_of_file=force -nl_fcall_brace=remove -nl_enum_brace=remove -nl_struct_brace=remove -nl_union_brace=remove -nl_if_brace=remove -nl_brace_else=remove -nl_else_brace=remove -nl_else_if=remove -nl_brace_finally=remove -nl_finally_brace=remove -nl_try_brace=remove -nl_getset_brace=remove -nl_for_brace=remove -nl_catch_brace=remove -nl_brace_catch=remove -nl_while_brace=remove -nl_do_brace=remove -nl_brace_while=remove -nl_switch_brace=remove -nl_namespace_brace=remove -nl_template_class=force -nl_class_brace=remove -nl_func_scope_name=remove -nl_func_paren=remove -nl_func_decl_end=remove -nl_fdef_brace=remove -nl_return_expr=remove -pos_assign=trail -pos_comma=trail -pos_class_comma=trail -pos_class_colon=lead -mod_full_brace_do=add -mod_paren_on_return=remove - -sp_before_semi_for_empty = remove -sp_after_semi_for_empty = remove diff --git a/README.md b/README.md index d469b94da0..087022f025 100644 --- a/README.md +++ b/README.md @@ -61,16 +61,16 @@ active, and in case of ties, by the earliest year the person started contributing, and finally by last name. - 2003-2025 Malte Helmert +- 2008-2016, 2018-2025 Gabriele Roeger - 2009, 2025 Christian Muise -- 2008-2016, 2018-2024 Gabriele Roeger -- 2010-2024 Jendrik Seipp +- 2010-2025 Jendrik Seipp +- 2012-2025 Florian Pommerening +- 2021-2025 Clemens Büchner +- 2022-2025 Remo Christen +- 2022-2025 Simon Dold - 2010-2011, 2013-2024 Silvan Sievers -- 2012-2024 Florian Pommerening - 2013, 2015-2024 Salomé Eriksson - 2018-2024 Patrick Ferber -- 2021-2024 Clemens Büchner -- 2022-2024 Remo Christen -- 2022-2024 Simon Dold - 2023-2024 Claudia S. Grundke - 2024 Martín Pozo - 2024 Tanja Schindler diff --git a/misc/style/run-all-style-checks.py b/misc/style/run-all-style-checks.py index b6ef8e4339..f9b8dc3117 100755 --- a/misc/style/run-all-style-checks.py +++ b/misc/style/run-all-style-checks.py @@ -51,7 +51,7 @@ def check_cc_files(): def check_cplusplus_style(): - return subprocess.call(["./run-uncrustify.py"], cwd=DIR) == 0 + return subprocess.call(["./run-clang-format.py"], cwd=DIR) == 0 def main(): diff --git a/misc/style/run-uncrustify.py b/misc/style/run-clang-format.py similarity index 66% rename from misc/style/run-uncrustify.py rename to misc/style/run-clang-format.py index 7b53d79760..19f108ddf9 100755 --- a/misc/style/run-uncrustify.py +++ b/misc/style/run-clang-format.py @@ -1,7 +1,7 @@ #! /usr/bin/env python3 """ -Run uncrustify on all C++ files in the repository. +Run clang-format on all C++ files in the repository. """ import argparse @@ -20,7 +20,7 @@ def parse_args(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "-m", "--modify", action="store_true", - help="modify the files that need to be uncrustified") + help="modify the files that need to be clang-formatted") parser.add_argument( "-f", "--force", action="store_true", help="modify files even if there are uncommited changes") @@ -42,21 +42,24 @@ def main(): if not args.force and args.modify and search_files_are_dirty(): sys.exit(f"Error: {SEARCH_DIR} has uncommited changes.") src_files = utils.get_src_files(SEARCH_DIR, (".h", ".cc")) - print(f"Checking {len(src_files)} files with uncrustify.") - config_file = os.path.join(REPO, ".uncrustify.cfg") - executable = "uncrustify" - cmd = [executable, "-q", "-c", config_file] + src_files - if args.modify: - cmd.append("--no-backup") - else: - cmd.append("--check") + print(f"Checking {len(src_files)} files with clang-format.") + config_file = os.path.join(REPO, ".clang-format") + executable = "clang-format" + exe_error_str = f"Error: {executable} not found. Is it on the PATH?" + flag = "-i" if args.modify else "--dry-run" + cmd = [ + executable, flag, f"--style=file:{config_file}" + ] + src_files try: # Hide clean files printed on stdout. returncode = subprocess.call(cmd, stdout=subprocess.PIPE) - except FileNotFoundError: - sys.exit(f"Error: {executable} not found. Is it on the PATH?") + except FileNotFoundError as not_found: + src_error_str = f"ERROR: Did not find file: '{not_found.filename}'." + error_str = exe_error_str if not_found == executable else src_error_str + sys.exit(error_str) if not args.modify and returncode != 0: - print('Run "tox -e fix-style" in the misc/ directory to fix the C++ style.') + print('Run "tox -e fix-style" in the misc/ directory to fix the C++ ' + + 'style.') return returncode diff --git a/misc/tests/test-parameters.py b/misc/tests/test-parameters.py index 803516bf3a..9b4e28ef13 100644 --- a/misc/tests/test-parameters.py +++ b/misc/tests/test-parameters.py @@ -1,5 +1,6 @@ #! /usr/bin/env python3 + HELP = """\ Check that parameters for the command line features match the parameters of the C++ constructors. Use txt2tags to compare the parameters @@ -107,25 +108,26 @@ def extract_feature_parameter_list(feature_name): def extract_feature_name_and_cpp_class(cc_file, cc_files, cwd, num): source_without_comments = subprocess.check_output( - ["gcc", "-fpreprocessed", "-dD", "-E", cc_file]).decode("utf-8") + ["gcc", "-fpreprocessed", "-dD", "-E", + cc_file]).decode("utf-8").replace('\n', ' ').replace('\r', '') name_pattern = r'TypedFeature\("([^"]*)"\)' class_pattern = r'TypedFeature<(.*?)> {' feature_names = [] class_names = [] feature_error_msgs = [] class_error_msgs = [] - for line in source_without_comments.splitlines(): - if re.search(name_pattern, line): - feature_name = re.search(name_pattern, line).group(1) - feature_error_msg = "feature_name: " + feature_name + "\n" - feature_names.append(feature_name) - feature_error_msgs.append(feature_error_msg) - if re.search(class_pattern, line): - feature_class = re.search(class_pattern, line).group(1) - class_name = feature_class.split()[-1].split("::")[-1] - class_error_msg = "class_name: " + class_name + "\n" - class_names.append(class_name) - class_error_msgs.append(class_error_msg) + + for feature_name in re.findall(name_pattern, source_without_comments): + feature_error_msg = "feature_name: " + feature_name + "\n" + feature_names.append(feature_name) + feature_error_msgs.append(feature_error_msg) + + for class_name in re.findall(class_pattern, source_without_comments): + class_name = class_name.split(' ')[-1].split("::")[-1] + class_error_msg = "class_name: " + class_name + "\n" + class_names.append(class_name) + class_error_msgs.append(class_error_msg) + return (feature_names[num], class_names[num], feature_error_msgs[num] + class_error_msgs[num]) diff --git a/misc/tox.ini b/misc/tox.ini index 06901946df..a541b215c1 100644 --- a/misc/tox.ini +++ b/misc/tox.ini @@ -88,4 +88,4 @@ commands = changedir = {toxinidir}/style/ deps = commands = - python run-uncrustify.py --modify + python run-clang-format.py --modify diff --git a/src/search/abstract_task.cc b/src/search/abstract_task.cc index 8563b11103..db0c676be1 100644 --- a/src/search/abstract_task.cc +++ b/src/search/abstract_task.cc @@ -15,11 +15,11 @@ ostream &operator<<(ostream &os, const FactPair &fact_pair) { return os; } -static class AbstractTaskCategoryPlugin : public plugins::TypedCategoryPlugin { +static class AbstractTaskCategoryPlugin + : public plugins::TypedCategoryPlugin { public: AbstractTaskCategoryPlugin() : TypedCategoryPlugin("AbstractTask") { // TODO: Replace empty string by synopsis for the wiki page. document_synopsis(""); } -} -_category_plugin; +} _category_plugin; diff --git a/src/search/abstract_task.h b/src/search/abstract_task.h index be0eb10375..fedef8413d 100644 --- a/src/search/abstract_task.h +++ b/src/search/abstract_task.h @@ -15,8 +15,7 @@ struct FactPair { int var; int value; - FactPair(int var, int value) - : var(var), value(value) { + FactPair(int var, int value) : var(var), value(value) { } bool operator<(const FactPair &other) const { @@ -58,12 +57,14 @@ class AbstractTask : public subscriber::SubscriberService { virtual int get_variable_axiom_layer(int var) const = 0; virtual int get_variable_default_axiom_value(int var) const = 0; virtual std::string get_fact_name(const FactPair &fact) const = 0; - virtual bool are_facts_mutex(const FactPair &fact1, const FactPair &fact2) const = 0; + virtual bool are_facts_mutex( + const FactPair &fact1, const FactPair &fact2) const = 0; virtual int get_operator_cost(int index, bool is_axiom) const = 0; virtual std::string get_operator_name(int index, bool is_axiom) const = 0; virtual int get_num_operators() const = 0; - virtual int get_num_operator_preconditions(int index, bool is_axiom) const = 0; + virtual int get_num_operator_preconditions( + int index, bool is_axiom) const = 0; virtual FactPair get_operator_precondition( int op_index, int fact_index, bool is_axiom) const = 0; virtual int get_num_operator_effects(int op_index, bool is_axiom) const = 0; @@ -75,9 +76,10 @@ class AbstractTask : public subscriber::SubscriberService { int op_index, int eff_index, bool is_axiom) const = 0; /* - Convert an operator index from this task, C (child), into an operator index - from an ancestor task A (ancestor). Task A has to be an ancestor of C in - the sense that C is the result of a sequence of task transformations on A. + Convert an operator index from this task, C (child), into an operator + index from an ancestor task A (ancestor). Task A has to be an ancestor of + C in the sense that C is the result of a sequence of task transformations + on A. */ virtual int convert_operator_index( int index, const AbstractTask *ancestor_task) const = 0; @@ -99,8 +101,7 @@ class AbstractTask : public subscriber::SubscriberService { the parameter. */ virtual void convert_ancestor_state_values( - std::vector &values, - const AbstractTask *ancestor_task) const = 0; + std::vector &values, const AbstractTask *ancestor_task) const = 0; }; #endif diff --git a/src/search/algorithms/dynamic_bitset.h b/src/search/algorithms/dynamic_bitset.h index 68fffab158..ab1ed82cc6 100644 --- a/src/search/algorithms/dynamic_bitset.h +++ b/src/search/algorithms/dynamic_bitset.h @@ -13,8 +13,7 @@ namespace dynamic_bitset { template class DynamicBitset { static_assert( - !std::numeric_limits::is_signed, - "Block type must be unsigned"); + !std::numeric_limits::is_signed, "Block type must be unsigned"); std::vector blocks; const std::size_t num_bits; @@ -56,8 +55,7 @@ class DynamicBitset { public: explicit DynamicBitset(std::size_t num_bits) - : blocks(compute_num_blocks(num_bits), zeros), - num_bits(num_bits) { + : blocks(compute_num_blocks(num_bits), zeros), num_bits(num_bits) { } std::size_t size() const { diff --git a/src/search/algorithms/equivalence_relation.cc b/src/search/algorithms/equivalence_relation.cc index 76760936d0..297f38b0ba 100644 --- a/src/search/algorithms/equivalence_relation.cc +++ b/src/search/algorithms/equivalence_relation.cc @@ -30,10 +30,10 @@ BlockListIter EquivalenceRelation::add_empty_block() { Add x to (B \cap X) The elements remaining in B are (B \setminus X). - We associate the new block (B \cap X) with the block B to easily access it once - we know block B. Block (B \cap X) is only created on demand, so it is never empty. - We remember all blocks where at least one element was removed and remove those - that become empty at the end of the loop. + We associate the new block (B \cap X) with the block B to easily access it + once we know block B. Block (B \cap X) is only created on demand, so it is + never empty. We remember all blocks where at least one element was removed and + remove those that become empty at the end of the loop. */ void EquivalenceRelation::refine(const vector &block) { /* @@ -44,7 +44,8 @@ void EquivalenceRelation::refine(const vector &block) { vector modified_blocks; for (int x : block) { - typename ElementPositionMap::iterator it_pos = element_positions.find(x); + typename ElementPositionMap::iterator it_pos = + element_positions.find(x); if (it_pos == element_positions.end()) { ABORT("Element from given block not contained in equivalence " "relation."); diff --git a/src/search/algorithms/equivalence_relation.h b/src/search/algorithms/equivalence_relation.h index ec603c317c..451f268b80 100644 --- a/src/search/algorithms/equivalence_relation.h +++ b/src/search/algorithms/equivalence_relation.h @@ -15,15 +15,14 @@ using ElementListConstIter = std::list::const_iterator; using BlockListIter = std::list::iterator; using BlockListConstIter = std::list::const_iterator; - class Block { std::list elements; /* During the refinement step of EquivalenceRelation, every existing block B is split along every new block X into the intersection and difference of B and X. The way the algorithm is set up, the difference remains in the - block that previously represented B. To store the intersection, a new block - is created and stored in B for easier access. + block that previously represented B. To store the intersection, a new + block is created and stored in B for easier access. */ friend class EquivalenceRelation; BlockListIter it_intersection_block; @@ -73,15 +72,19 @@ class EquivalenceRelation { public: explicit EquivalenceRelation(const std::vector &elements); - BlockListConstIter begin() const {return blocks.begin();} - BlockListConstIter end() const {return blocks.end();} + BlockListConstIter begin() const { + return blocks.begin(); + } + BlockListConstIter end() const { + return blocks.end(); + } /* - Refining a relation with a block X is equivalent to splitting every block B - into two blocks (B \cap X) and (B \setminus X). After refining, two items A - and B are in the same block if and only if they were in the same block - before and they are in one block in the other relation. The amortized - runtime is linear in the number of elements specified in other. + Refining a relation with a block X is equivalent to splitting every block + B into two blocks (B \cap X) and (B \setminus X). After refining, two + items A and B are in the same block if and only if they were in the same + block before and they are in one block in the other relation. The + amortized runtime is linear in the number of elements specified in other. */ void refine(const std::vector &block); }; diff --git a/src/search/algorithms/int_hash_set.h b/src/search/algorithms/int_hash_set.h index 259ce68f7d..be61fa1a01 100644 --- a/src/search/algorithms/int_hash_set.h +++ b/src/search/algorithms/int_hash_set.h @@ -79,7 +79,8 @@ template class IntHashSet { // Max distance from the ideal bucket to the actual bucket for each key. static const int MAX_DISTANCE = 32; - static const unsigned int MAX_BUCKETS = std::numeric_limits::max(); + static const unsigned int MAX_BUCKETS = + std::numeric_limits::max(); struct Bucket { KeyType key; @@ -87,14 +88,10 @@ class IntHashSet { static const KeyType empty_bucket_key = -1; - Bucket() - : key(empty_bucket_key), - hash(0) { + Bucket() : key(empty_bucket_key), hash(0) { } - Bucket(KeyType key, HashType hash) - : key(key), - hash(hash) { + Bucket(KeyType key, HashType hash) : key(key), hash(hash) { } bool full() const { @@ -135,10 +132,10 @@ class IntHashSet { assert((num_buckets & (num_buckets - 1)) == 0); if (num_buckets > MAX_BUCKETS / 2) { std::cerr << "IntHashSet surpassed maximum capacity. This means" - " you either use IntHashSet for high-memory" - " applications for which it was not designed, or there" - " is an unexpectedly high number of hash collisions" - " that should be investigated. Aborting." + " you either use IntHashSet for high-memory" + " applications for which it was not designed, or there" + " is an unexpectedly high number of hash collisions" + " that should be investigated. Aborting." << std::endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } @@ -184,7 +181,8 @@ class IntHashSet { for (int i = 0; i < MAX_DISTANCE; ++i) { int index = get_bucket(ideal_index + i); const Bucket &bucket = buckets[index]; - if (bucket.full() && bucket.hash == hash && equal(bucket.key, key)) { + if (bucket.full() && bucket.hash == hash && + equal(bucket.key, key)) { return bucket.key; } } @@ -245,7 +243,8 @@ class IntHashSet { candidate_index = get_bucket(candidate_index); HashType candidate_hash = buckets[candidate_index].hash; int candidate_ideal_index = get_bucket(candidate_hash); - if (get_distance(candidate_ideal_index, free_index) < MAX_DISTANCE) { + if (get_distance(candidate_ideal_index, free_index) < + MAX_DISTANCE) { // Candidate can be swapped. std::swap(buckets[candidate_index], buckets[free_index]); free_index = candidate_index; @@ -313,9 +312,8 @@ class IntHashSet { assert(!buckets.empty()); int num_buckets = capacity(); assert(num_buckets != 0); - log << "Int hash set load factor: " << num_entries << "/" - << num_buckets << " = " - << static_cast(num_entries) / num_buckets + log << "Int hash set load factor: " << num_entries << "/" << num_buckets + << " = " << static_cast(num_entries) / num_buckets << std::endl; log << "Int hash set resizes: " << num_resizes << std::endl; } diff --git a/src/search/algorithms/int_packer.cc b/src/search/algorithms/int_packer.cc index 6888156122..809c03c467 100644 --- a/src/search/algorithms/int_packer.cc +++ b/src/search/algorithms/int_packer.cc @@ -42,16 +42,13 @@ class IntPacker::VariableInfo { Bin clear_mask; public: VariableInfo(int range_, int bin_index_, int shift_) - : range(range_), - bin_index(bin_index_), - shift(shift_) { + : range(range_), bin_index(bin_index_), shift(shift_) { int bit_size = get_bit_size_for_range(range); read_mask = get_bit_mask(shift, shift + bit_size); clear_mask = ~read_mask; } - VariableInfo() - : bin_index(-1), shift(0), read_mask(0), clear_mask(0) { + VariableInfo() : bin_index(-1), shift(0), read_mask(0), clear_mask(0) { // Default constructor needed for resize() in pack_bins. } @@ -69,9 +66,7 @@ class IntPacker::VariableInfo { } }; - -IntPacker::IntPacker(const vector &ranges) - : num_bins(0) { +IntPacker::IntPacker(const vector &ranges) : num_bins(0) { pack_bins(ranges); } @@ -109,8 +104,8 @@ void IntPacker::pack_bins(const vector &ranges) { packed_vars += pack_one_bin(ranges, bits_to_vars); } -int IntPacker::pack_one_bin(const vector &ranges, - vector> &bits_to_vars) { +int IntPacker::pack_one_bin( + const vector &ranges, vector> &bits_to_vars) { // Returns the number of variables added to the bin. We pack each // bin with a greedy strategy, always adding the largest variable // that still fits. diff --git a/src/search/algorithms/int_packer.h b/src/search/algorithms/int_packer.h index 0f6a742ea7..9d8ea73a96 100644 --- a/src/search/algorithms/int_packer.h +++ b/src/search/algorithms/int_packer.h @@ -24,8 +24,9 @@ class IntPacker { std::vector var_infos; int num_bins; - int pack_one_bin(const std::vector &ranges, - std::vector> &bits_to_vars); + int pack_one_bin( + const std::vector &ranges, + std::vector> &bits_to_vars); void pack_bins(const std::vector &ranges); public: typedef unsigned int Bin; @@ -42,7 +43,9 @@ class IntPacker { int get(const Bin *buffer, int var) const; void set(Bin *buffer, int var, int value) const; - int get_num_bins() const {return num_bins;} + int get_num_bins() const { + return num_bins; + } }; } diff --git a/src/search/algorithms/max_cliques.cc b/src/search/algorithms/max_cliques.cc index 0fd8d4aa61..dd03d4bea3 100644 --- a/src/search/algorithms/max_cliques.cc +++ b/src/search/algorithms/max_cliques.cc @@ -22,8 +22,8 @@ class MaxCliqueComputer { assert(utils::is_sorted_unique(subg)); assert(utils::is_sorted_unique(cand)); - //utils::g_log << "subg: " << subg << endl; - //utils::g_log << "cand: " << cand << endl; + // utils::g_log << "subg: " << subg << endl; + // utils::g_log << "cand: " << cand << endl; size_t max = 0; // We will take the first vertex if there is no better one. int vertex = subg[0]; @@ -32,14 +32,15 @@ class MaxCliqueComputer { vector intersection; intersection.reserve(subg.size()); // for vertex u in subg get u's adjacent vertices: graph[subg[i]]; - set_intersection(cand.begin(), cand.end(), - graph[subg[i]].begin(), graph[subg[i]].end(), - back_inserter(intersection)); + set_intersection( + cand.begin(), cand.end(), graph[subg[i]].begin(), + graph[subg[i]].end(), back_inserter(intersection)); if (intersection.size() > max) { max = intersection.size(); vertex = subg[i]; - //utils::g_log << "success: there is a maximizing vertex." << endl; + // utils::g_log << "success: there is a maximizing vertex." << + // endl; } } return vertex; @@ -49,50 +50,50 @@ class MaxCliqueComputer { // utils::g_log << "subg: " << subg << endl; // utils::g_log << "cand: " << cand << endl; if (subg.empty()) { - //utils::g_log << "clique" << endl; + // utils::g_log << "clique" << endl; max_cliques.push_back(current_max_clique); } else { int u = get_maximizing_vertex(subg, cand); vector ext_u; ext_u.reserve(cand.size()); - set_difference(cand.begin(), cand.end(), - graph[u].begin(), graph[u].end(), - back_inserter(ext_u)); + set_difference( + cand.begin(), cand.end(), graph[u].begin(), graph[u].end(), + back_inserter(ext_u)); while (!ext_u.empty()) { int q = ext_u.back(); ext_u.pop_back(); - //utils::g_log << q << ","; + // utils::g_log << q << ","; current_max_clique.push_back(q); // subg_q = subg n gamma(q) vector subg_q; subg_q.reserve(subg.size()); - set_intersection(subg.begin(), subg.end(), - graph[q].begin(), graph[q].end(), - back_inserter(subg_q)); + set_intersection( + subg.begin(), subg.end(), graph[q].begin(), graph[q].end(), + back_inserter(subg_q)); // cand_q = cand n gamma(q) vector cand_q; cand_q.reserve(cand.size()); - set_intersection(cand.begin(), cand.end(), - graph[q].begin(), graph[q].end(), - back_inserter(cand_q)); + set_intersection( + cand.begin(), cand.end(), graph[q].begin(), graph[q].end(), + back_inserter(cand_q)); expand(subg_q, cand_q); // remove q from cand --> cand = cand - q cand.erase(lower_bound(cand.begin(), cand.end(), q)); - //utils::g_log << "back" << endl; + // utils::g_log << "back" << endl; current_max_clique.pop_back(); } } } public: - MaxCliqueComputer(const vector> &graph_, - vector> &max_cliques_) + MaxCliqueComputer( + const vector> &graph_, vector> &max_cliques_) : graph(graph_), max_cliques(max_cliques_) { } @@ -108,10 +109,8 @@ class MaxCliqueComputer { } }; - void compute_max_cliques( - const vector> &graph, - vector> &max_cliques) { + const vector> &graph, vector> &max_cliques) { MaxCliqueComputer clique_computer(graph, max_cliques); clique_computer.compute(); } diff --git a/src/search/algorithms/named_vector.h b/src/search/algorithms/named_vector.h index 66814dde21..9dd9a28c86 100644 --- a/src/search/algorithms/named_vector.h +++ b/src/search/algorithms/named_vector.h @@ -19,9 +19,9 @@ class NamedVector { std::vector elements; std::vector names; public: - template - void emplace_back(_Args && ... __args) { - elements.emplace_back(std::forward<_Args>(__args) ...); + template + void emplace_back(_Args &&...__args) { + elements.emplace_back(std::forward<_Args>(__args)...); } void push_back(const T &element) { diff --git a/src/search/algorithms/priority_queues.h b/src/search/algorithms/priority_queues.h index af5cd87a4a..774d54273f 100644 --- a/src/search/algorithms/priority_queues.h +++ b/src/search/algorithms/priority_queues.h @@ -33,8 +33,10 @@ class AbstractQueue { public: typedef std::pair Entry; - AbstractQueue() {} - virtual ~AbstractQueue() {} + AbstractQueue() { + } + virtual ~AbstractQueue() { + } virtual void push(int key, const Value &value) = 0; virtual Entry pop() = 0; @@ -66,7 +68,6 @@ class AbstractQueue { virtual void add_virtual_pushes(int num_extra_pushes) = 0; }; - template class HeapQueue : public AbstractQueue { typedef typename AbstractQueue::Entry Entry; @@ -131,7 +132,6 @@ class HeapQueue : public AbstractQueue { } }; - template class BucketQueue : public AbstractQueue { static const int MIN_BUCKETS_BEFORE_SWITCH = 100; @@ -240,7 +240,6 @@ class BucketQueue : public AbstractQueue { } }; - template class AdaptiveQueue { AbstractQueue *wrapped_queue; diff --git a/src/search/algorithms/sccs.cc b/src/search/algorithms/sccs.cc index 3bd647fb3c..a9ccdc66a7 100644 --- a/src/search/algorithms/sccs.cc +++ b/src/search/algorithms/sccs.cc @@ -6,14 +6,9 @@ using namespace std; namespace sccs { static void dfs( - const vector> &graph, - int vertex, - vector &dfs_numbers, - vector &dfs_minima, - vector &stack_indices, - vector &stack, - int ¤t_dfs_number, - vector> &sccs) { + const vector> &graph, int vertex, vector &dfs_numbers, + vector &dfs_minima, vector &stack_indices, vector &stack, + int ¤t_dfs_number, vector> &sccs) { int vertex_dfs_number = current_dfs_number++; dfs_numbers[vertex] = dfs_minima[vertex] = vertex_dfs_number; stack_indices[vertex] = stack.size(); @@ -24,9 +19,11 @@ static void dfs( int succ = successors[i]; int succ_dfs_number = dfs_numbers[succ]; if (succ_dfs_number == -1) { - dfs(graph, succ, dfs_numbers, dfs_minima, stack_indices, stack, current_dfs_number, sccs); + dfs(graph, succ, dfs_numbers, dfs_minima, stack_indices, stack, + current_dfs_number, sccs); dfs_minima[vertex] = min(dfs_minima[vertex], dfs_minima[succ]); - } else if (succ_dfs_number < vertex_dfs_number && stack_indices[succ] != -1) { + } else if ( + succ_dfs_number < vertex_dfs_number && stack_indices[succ] != -1) { dfs_minima[vertex] = min(dfs_minima[vertex], succ_dfs_number); } } @@ -43,8 +40,7 @@ static void dfs( } } -vector> compute_maximal_sccs( - const vector> &graph) { +vector> compute_maximal_sccs(const vector> &graph) { int node_count = graph.size(); vector dfs_numbers(node_count, -1); vector dfs_minima(node_count, -1); @@ -56,7 +52,8 @@ vector> compute_maximal_sccs( vector> sccs; for (int i = 0; i < node_count; i++) { if (dfs_numbers[i] == -1) { - dfs(graph, i, dfs_numbers, dfs_minima, stack_indices, stack, current_dfs_number, sccs); + dfs(graph, i, dfs_numbers, dfs_minima, stack_indices, stack, + current_dfs_number, sccs); } } diff --git a/src/search/algorithms/segmented_vector.h b/src/search/algorithms/segmented_vector.h index 0eb4da6084..b8e934860b 100644 --- a/src/search/algorithms/segmented_vector.h +++ b/src/search/algorithms/segmented_vector.h @@ -51,12 +51,13 @@ namespace segmented_vector { template> class SegmentedVector { - using EntryAllocator = typename std::allocator_traits::template rebind_alloc; + using EntryAllocator = + typename std::allocator_traits::template rebind_alloc; static const size_t SEGMENT_BYTES = 8192; - static const size_t SEGMENT_ELEMENTS = - (SEGMENT_BYTES / sizeof(Entry)) >= 1 ? - (SEGMENT_BYTES / sizeof(Entry)) : 1; + static const size_t SEGMENT_ELEMENTS = (SEGMENT_BYTES / sizeof(Entry)) >= 1 + ? (SEGMENT_BYTES / sizeof(Entry)) + : 1; EntryAllocator entry_allocator; @@ -72,28 +73,29 @@ class SegmentedVector { } void add_segment() { - Entry *new_segment = std::allocator_traits::allocate(entry_allocator, SEGMENT_ELEMENTS); + Entry *new_segment = std::allocator_traits::allocate( + entry_allocator, SEGMENT_ELEMENTS); segments.push_back(new_segment); } SegmentedVector(const SegmentedVector &) = delete; SegmentedVector &operator=(const SegmentedVector &) = delete; public: - SegmentedVector() - : the_size(0) { + SegmentedVector() : the_size(0) { } SegmentedVector(const EntryAllocator &allocator_) - : entry_allocator(allocator_), - the_size(0) { + : entry_allocator(allocator_), the_size(0) { } ~SegmentedVector() { for (size_t i = 0; i < the_size; ++i) { - std::allocator_traits::destroy(entry_allocator, &operator[](i)); + std::allocator_traits::destroy( + entry_allocator, &operator[](i)); } for (size_t segment = 0; segment < segments.size(); ++segment) { - std::allocator_traits::deallocate(entry_allocator, segments[segment], SEGMENT_ELEMENTS); + std::allocator_traits::deallocate( + entry_allocator, segments[segment], SEGMENT_ELEMENTS); } } @@ -123,12 +125,14 @@ class SegmentedVector { // Must add a new segment. add_segment(); } - std::allocator_traits::construct(entry_allocator, segments[segment] + offset, entry); + std::allocator_traits::construct( + entry_allocator, segments[segment] + offset, entry); ++the_size; } void pop_back() { - std::allocator_traits::destroy(entry_allocator, &operator[](the_size - 1)); + std::allocator_traits::destroy( + entry_allocator, &operator[](the_size - 1)); --the_size; /* If the removed element was the last in its segment, the segment @@ -147,10 +151,10 @@ class SegmentedVector { } }; - template> class SegmentedArrayVector { - using ElementAllocator = typename std::allocator_traits::template rebind_alloc; + using ElementAllocator = typename std::allocator_traits< + Allocator>::template rebind_alloc; static const size_t SEGMENT_BYTES = 8192; const size_t elements_per_array; @@ -171,29 +175,34 @@ class SegmentedArrayVector { } void add_segment() { - Element *new_segment = std::allocator_traits::allocate(element_allocator, elements_per_segment); + Element *new_segment = + std::allocator_traits::allocate( + element_allocator, elements_per_segment); segments.push_back(new_segment); } SegmentedArrayVector(const SegmentedArrayVector &) = delete; - SegmentedArrayVector &operator=(const SegmentedArrayVector &) = delete; + SegmentedArrayVector &operator=(const SegmentedArrayVector &) = + delete; public: SegmentedArrayVector(size_t elements_per_array_) - : elements_per_array((assert(elements_per_array_ > 0), - elements_per_array_)), - arrays_per_segment( - std::max(SEGMENT_BYTES / (elements_per_array * sizeof(Element)), size_t (1))), + : elements_per_array( + (assert(elements_per_array_ > 0), elements_per_array_)), + arrays_per_segment(std::max( + SEGMENT_BYTES / (elements_per_array * sizeof(Element)), + size_t(1))), elements_per_segment(elements_per_array * arrays_per_segment), the_size(0) { } - - SegmentedArrayVector(size_t elements_per_array_, const ElementAllocator &allocator_) + SegmentedArrayVector( + size_t elements_per_array_, const ElementAllocator &allocator_) : element_allocator(allocator_), - elements_per_array((assert(elements_per_array_ > 0), - elements_per_array_)), - arrays_per_segment( - std::max(SEGMENT_BYTES / (elements_per_array * sizeof(Element)), size_t (1))), + elements_per_array( + (assert(elements_per_array_ > 0), elements_per_array_)), + arrays_per_segment(std::max( + SEGMENT_BYTES / (elements_per_array * sizeof(Element)), + size_t(1))), elements_per_segment(elements_per_array * arrays_per_segment), the_size(0) { } @@ -201,11 +210,13 @@ class SegmentedArrayVector { ~SegmentedArrayVector() { for (size_t i = 0; i < the_size; ++i) { for (size_t offset = 0; offset < elements_per_array; ++offset) { - std::allocator_traits::destroy(element_allocator, operator[](i) + offset); + std::allocator_traits::destroy( + element_allocator, operator[](i) + offset); } } for (size_t i = 0; i < segments.size(); ++i) { - std::allocator_traits::deallocate(element_allocator, segments[i], elements_per_segment); + std::allocator_traits::deallocate( + element_allocator, segments[i], elements_per_segment); } } @@ -237,13 +248,15 @@ class SegmentedArrayVector { } Element *dest = segments[segment] + offset; for (size_t i = 0; i < elements_per_array; ++i) - std::allocator_traits::construct(element_allocator, dest++, *entry++); + std::allocator_traits::construct( + element_allocator, dest++, *entry++); ++the_size; } void pop_back() { for (size_t offset = 0; offset < elements_per_array; ++offset) { - std::allocator_traits::destroy(element_allocator, operator[](the_size - 1) + offset); + std::allocator_traits::destroy( + element_allocator, operator[](the_size - 1) + offset); } --the_size; /* diff --git a/src/search/algorithms/subscriber.h b/src/search/algorithms/subscriber.h index 25ad41364d..80fa9cef47 100644 --- a/src/search/algorithms/subscriber.h +++ b/src/search/algorithms/subscriber.h @@ -34,7 +34,6 @@ } */ - namespace subscriber { template class SubscriberService; @@ -55,7 +54,8 @@ class Subscriber { We have to copy the services because unsubscribing erases the current service during the iteration. */ - std::unordered_set *> services_copy(services); + std::unordered_set *> services_copy( + services); for (const SubscriberService *service : services_copy) { service->unsubscribe(this); } diff --git a/src/search/axioms.cc b/src/search/axioms.cc index eac3cb1923..3b80db194f 100644 --- a/src/search/axioms.cc +++ b/src/search/axioms.cc @@ -28,8 +28,11 @@ AxiomEvaluator::AxiomEvaluator(const TaskProxy &task_proxy) { int num_conditions = cond_effect.get_conditions().size(); // We don't allow axioms that set the variable to its default value. - assert(effect.value != variables[effect.var].get_default_axiom_value()); - AxiomLiteral *eff_literal = &axiom_literals[effect.var][effect.value]; + assert( + effect.value != + variables[effect.var].get_default_axiom_value()); + AxiomLiteral *eff_literal = + &axiom_literals[effect.var][effect.value]; rules.emplace_back( num_conditions, effect.var, effect.value, eff_literal); } @@ -61,7 +64,8 @@ AxiomEvaluator::AxiomEvaluator(const TaskProxy &task_proxy) { if (layer != last_layer) { int var_id = var.get_id(); int nbf_value = var.get_default_axiom_value(); - AxiomLiteral *nbf_literal = &axiom_literals[var_id][nbf_value]; + AxiomLiteral *nbf_literal = + &axiom_literals[var_id][nbf_value]; nbf_info_by_layer[layer].emplace_back(var_id, nbf_literal); } } @@ -139,7 +143,8 @@ void AxiomEvaluator::evaluate(vector &state) { to save some time (see issue420, msg3058). */ if (layer_no != nbf_info_by_layer.size() - 1) { - const vector &nbf_info = nbf_info_by_layer[layer_no]; + const vector &nbf_info = + nbf_info_by_layer[layer_no]; for (size_t i = 0; i < nbf_info.size(); ++i) { int var_no = nbf_info[i].var_no; // Verify that variable is derived. diff --git a/src/search/axioms.h b/src/search/axioms.h index 351bfc506b..288c5ff364 100644 --- a/src/search/axioms.h +++ b/src/search/axioms.h @@ -18,16 +18,21 @@ class AxiomEvaluator { int effect_var; int effect_val; AxiomLiteral *effect_literal; - AxiomRule(int cond_count, int eff_var, int eff_val, AxiomLiteral *eff_literal) - : condition_count(cond_count), unsatisfied_conditions(cond_count), - effect_var(eff_var), effect_val(eff_val), effect_literal(eff_literal) { + AxiomRule( + int cond_count, int eff_var, int eff_val, AxiomLiteral *eff_literal) + : condition_count(cond_count), + unsatisfied_conditions(cond_count), + effect_var(eff_var), + effect_val(eff_val), + effect_literal(eff_literal) { } }; struct NegationByFailureInfo { int var_no; AxiomLiteral *literal; NegationByFailureInfo(int var, AxiomLiteral *lit) - : var_no(var), literal(lit) {} + : var_no(var), literal(lit) { + } }; bool task_has_axioms; diff --git a/src/search/cartesian_abstractions/abstract_search.cc b/src/search/cartesian_abstractions/abstract_search.cc index 91895fe1f3..9d4d135af3 100644 --- a/src/search/cartesian_abstractions/abstract_search.cc +++ b/src/search/cartesian_abstractions/abstract_search.cc @@ -9,10 +9,8 @@ using namespace std; namespace cartesian_abstractions { -AbstractSearch::AbstractSearch( - const vector &operator_costs) - : operator_costs(operator_costs), - search_info(1) { +AbstractSearch::AbstractSearch(const vector &operator_costs) + : operator_costs(operator_costs), search_info(1) { } void AbstractSearch::reset(int num_states) { @@ -23,11 +21,13 @@ void AbstractSearch::reset(int num_states) { } } -unique_ptr AbstractSearch::extract_solution(int init_id, int goal_id) const { +unique_ptr AbstractSearch::extract_solution( + int init_id, int goal_id) const { unique_ptr solution = make_unique(); int current_id = goal_id; while (current_id != init_id) { - const Transition &prev = search_info[current_id].get_incoming_transition(); + const Transition &prev = + search_info[current_id].get_incoming_transition(); solution->emplace_front(prev.op_id, current_id); assert(prev.target_id != current_id); current_id = prev.target_id; @@ -65,15 +65,15 @@ void AbstractSearch::update_goal_distances(const Solution &solution) { } for (auto &info : search_info) { if (info.get_g_value() < INF) { - int new_h = max(info.get_h_value(), solution_cost - info.get_g_value()); + int new_h = + max(info.get_h_value(), solution_cost - info.get_g_value()); info.increase_h_value_to(new_h); } } } unique_ptr AbstractSearch::find_solution( - const vector &transitions, - int init_id, + const vector &transitions, int init_id, const Goals &goal_ids) { reset(transitions.size()); search_info[init_id].decrease_g_value_to(0); @@ -127,7 +127,8 @@ int AbstractSearch::astar_search( assert(f >= 0); assert(f != INF); open_queue.push(f, succ_id); - search_info[succ_id].set_incoming_transition(Transition(op_id, state_id)); + search_info[succ_id].set_incoming_transition( + Transition(op_id, state_id)); } } } @@ -151,10 +152,8 @@ void AbstractSearch::copy_h_value_to_children(int v, int v1, int v2) { set_h_value(v2, h); } - vector compute_distances( - const vector &transitions, - const vector &costs, + const vector &transitions, const vector &costs, const unordered_set &start_ids) { vector distances(transitions.size(), INF); priority_queues::AdaptiveQueue open_queue; diff --git a/src/search/cartesian_abstractions/abstract_search.h b/src/search/cartesian_abstractions/abstract_search.h index 0504008a96..77e129e063 100644 --- a/src/search/cartesian_abstractions/abstract_search.h +++ b/src/search/cartesian_abstractions/abstract_search.h @@ -21,10 +21,8 @@ class AbstractSearch { int g; int h; Transition incoming_transition; -public: - AbstractSearchInfo() - : h(0), - incoming_transition(UNDEFINED, UNDEFINED) { + public: + AbstractSearchInfo() : h(0), incoming_transition(UNDEFINED, UNDEFINED) { reset(); } @@ -56,8 +54,9 @@ class AbstractSearch { } const Transition &get_incoming_transition() const { - assert(incoming_transition.op_id != UNDEFINED && - incoming_transition.target_id != UNDEFINED); + assert( + incoming_transition.op_id != UNDEFINED && + incoming_transition.target_id != UNDEFINED); return incoming_transition; } }; @@ -73,23 +72,20 @@ class AbstractSearch { std::unique_ptr extract_solution(int init_id, int goal_id) const; void update_goal_distances(const Solution &solution); int astar_search( - const std::vector &transitions, - const Goals &goals); + const std::vector &transitions, const Goals &goals); public: explicit AbstractSearch(const std::vector &operator_costs); std::unique_ptr find_solution( - const std::vector &transitions, - int init_id, + const std::vector &transitions, int init_id, const Goals &goal_ids); int get_h_value(int state_id) const; void copy_h_value_to_children(int v, int v1, int v2); }; std::vector compute_distances( - const std::vector &transitions, - const std::vector &costs, + const std::vector &transitions, const std::vector &costs, const std::unordered_set &start_ids); } diff --git a/src/search/cartesian_abstractions/abstract_state.cc b/src/search/cartesian_abstractions/abstract_state.cc index 01f866c66e..83f1a9ad4e 100644 --- a/src/search/cartesian_abstractions/abstract_state.cc +++ b/src/search/cartesian_abstractions/abstract_state.cc @@ -12,9 +12,7 @@ using namespace std; namespace cartesian_abstractions { AbstractState::AbstractState( int state_id, NodeID node_id, CartesianSet &&cartesian_set) - : state_id(state_id), - node_id(node_id), - cartesian_set(move(cartesian_set)) { + : state_id(state_id), node_id(node_id), cartesian_set(move(cartesian_set)) { } int AbstractState::count(int var) const { @@ -47,7 +45,8 @@ pair AbstractState::split_domain( // In v2 var can only have the wanted values. v2_cartesian_set.add(var, value); } - assert(v1_cartesian_set.count(var) == cartesian_set.count(var) - num_wanted); + assert( + v1_cartesian_set.count(var) == cartesian_set.count(var) - num_wanted); assert(v2_cartesian_set.count(var) == num_wanted); return make_pair(v1_cartesian_set, v2_cartesian_set); } @@ -65,7 +64,8 @@ CartesianSet AbstractState::regress(const OperatorProxy &op) const { return regression; } -bool AbstractState::domain_subsets_intersect(const AbstractState &other, int var) const { +bool AbstractState::domain_subsets_intersect( + const AbstractState &other, int var) const { return cartesian_set.intersects(other.cartesian_set, var); } diff --git a/src/search/cartesian_abstractions/abstract_state.h b/src/search/cartesian_abstractions/abstract_state.h index 96e6dfd544..392b13eca2 100644 --- a/src/search/cartesian_abstractions/abstract_state.h +++ b/src/search/cartesian_abstractions/abstract_state.h @@ -58,7 +58,8 @@ class AbstractState { NodeID get_node_id() const; - friend std::ostream &operator<<(std::ostream &os, const AbstractState &state) { + friend std::ostream &operator<<( + std::ostream &os, const AbstractState &state) { return os << "#" << state.get_id() << state.cartesian_set; } diff --git a/src/search/cartesian_abstractions/abstraction.cc b/src/search/cartesian_abstractions/abstraction.cc index a036c63a5f..35892c56f9 100644 --- a/src/search/cartesian_abstractions/abstraction.cc +++ b/src/search/cartesian_abstractions/abstraction.cc @@ -18,8 +18,10 @@ using namespace std; namespace cartesian_abstractions { -Abstraction::Abstraction(const shared_ptr &task, utils::LogProxy &log) - : transition_system(make_unique(TaskProxy(*task).get_operators())), +Abstraction::Abstraction( + const shared_ptr &task, utils::LogProxy &log) + : transition_system( + make_unique(TaskProxy(*task).get_operators())), concrete_initial_state(TaskProxy(*task).get_initial_state()), goal_facts(task_properties::get_fact_pairs(TaskProxy(*task).get_goals())), refinement_hierarchy(make_unique(task)), @@ -62,7 +64,8 @@ void Abstraction::mark_all_states_as_goals() { } } -void Abstraction::initialize_trivial_abstraction(const vector &domain_sizes) { +void Abstraction::initialize_trivial_abstraction( + const vector &domain_sizes) { unique_ptr init_state = AbstractState::get_trivial_abstract_state(domain_sizes); init_id = init_state->get_id(); diff --git a/src/search/cartesian_abstractions/abstraction.h b/src/search/cartesian_abstractions/abstraction.h index 779cdfe507..df330e1776 100644 --- a/src/search/cartesian_abstractions/abstraction.h +++ b/src/search/cartesian_abstractions/abstraction.h @@ -46,7 +46,8 @@ class Abstraction { void initialize_trivial_abstraction(const std::vector &domain_sizes); public: - Abstraction(const std::shared_ptr &task, utils::LogProxy &log); + Abstraction( + const std::shared_ptr &task, utils::LogProxy &log); ~Abstraction(); Abstraction(const Abstraction &) = delete; diff --git a/src/search/cartesian_abstractions/additive_cartesian_heuristic.cc b/src/search/cartesian_abstractions/additive_cartesian_heuristic.cc index c25c8f0d1a..c7637383c1 100644 --- a/src/search/cartesian_abstractions/additive_cartesian_heuristic.cc +++ b/src/search/cartesian_abstractions/additive_cartesian_heuristic.cc @@ -18,14 +18,13 @@ using namespace std; namespace cartesian_abstractions { static vector generate_heuristic_functions( const vector> &subtask_generators, - int max_states, int max_transitions, double max_time, - PickSplit pick, bool use_general_costs, int random_seed, + int max_states, int max_transitions, double max_time, PickSplit pick, + bool use_general_costs, int random_seed, const shared_ptr &transform, utils::LogProxy &log) { if (log.is_at_least_normal()) { log << "Initializing additive Cartesian heuristic..." << endl; } - shared_ptr rng = - utils::get_rng(random_seed); + shared_ptr rng = utils::get_rng(random_seed); CostSaturation cost_saturation( subtask_generators, max_states, max_transitions, max_time, pick, use_general_costs, *rng, log); @@ -33,16 +32,15 @@ static vector generate_heuristic_functions( } AdditiveCartesianHeuristic::AdditiveCartesianHeuristic( - const vector> &subtasks, - int max_states, int max_transitions, double max_time, - PickSplit pick, bool use_general_costs, int random_seed, + const vector> &subtasks, int max_states, + int max_transitions, double max_time, PickSplit pick, + bool use_general_costs, int random_seed, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) : Heuristic(transform, cache_estimates, description, verbosity), heuristic_functions(generate_heuristic_functions( - subtasks, max_states, max_transitions, - max_time, pick, use_general_costs, - random_seed, transform, log)) { + subtasks, max_states, max_transitions, max_time, pick, + use_general_costs, random_seed, transform, log)) { } int AdditiveCartesianHeuristic::compute_heuristic(const State &ancestor_state) { @@ -73,9 +71,7 @@ class AdditiveCartesianHeuristicFeature "https://ai.dmi.unibas.ch/papers/seipp-helmert-icaps2013.pdf", "Proceedings of the 23rd International Conference on Automated " "Planning and Scheduling (ICAPS 2013)", - "347-351", - "AAAI Press", - "2013") + + "347-351", "AAAI Press", "2013") + "and the paper showing how to make the abstractions additive:" + utils::format_conference_reference( {"Jendrik Seipp", "Malte Helmert"}, @@ -83,9 +79,7 @@ class AdditiveCartesianHeuristicFeature "https://ai.dmi.unibas.ch/papers/seipp-helmert-icaps2014.pdf", "Proceedings of the 24th International Conference on " "Automated Planning and Scheduling (ICAPS 2014)", - "289-297", - "AAAI Press", - "2014") + + "289-297", "AAAI Press", "2014") + "For more details on Cartesian CEGAR and saturated cost partitioning, " "see the journal paper" + utils::format_journal_reference( @@ -93,38 +87,28 @@ class AdditiveCartesianHeuristicFeature "Counterexample-Guided Cartesian Abstraction Refinement for " "Classical Planning", "https://ai.dmi.unibas.ch/papers/seipp-helmert-jair2018.pdf", - "Journal of Artificial Intelligence Research", - "62", - "535-577", + "Journal of Artificial Intelligence Research", "62", "535-577", "2018")); add_list_option>( - "subtasks", - "subtask generators", - "[landmarks(),goals()]"); + "subtasks", "subtask generators", "[landmarks(),goals()]"); add_option( "max_states", - "maximum sum of abstract states over all abstractions", - "infinity", + "maximum sum of abstract states over all abstractions", "infinity", plugins::Bounds("1", "infinity")); add_option( "max_transitions", "maximum sum of real transitions (excluding self-loops) over " " all abstractions", - "1M", - plugins::Bounds("0", "infinity")); + "1M", plugins::Bounds("0", "infinity")); add_option( - "max_time", - "maximum time in seconds for building abstractions", - "infinity", - plugins::Bounds("0.0", "infinity")); + "max_time", "maximum time in seconds for building abstractions", + "infinity", plugins::Bounds("0.0", "infinity")); add_option( - "pick", - "how to choose on which variable to split the flaw state", + "pick", "how to choose on which variable to split the flaw state", "max_refined"); add_option( - "use_general_costs", - "allow negative costs in cost partitioning", + "use_general_costs", "allow negative costs in cost partitioning", "true"); utils::add_rng_options_to_feature(*this); add_heuristic_options_to_feature(*this, "cegar"); @@ -139,14 +123,12 @@ class AdditiveCartesianHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get_list>("subtasks"), - opts.get("max_states"), - opts.get("max_transitions"), - opts.get("max_time"), - opts.get("pick"), + opts.get("max_states"), opts.get("max_transitions"), + opts.get("max_time"), opts.get("pick"), opts.get("use_general_costs"), utils::get_rng_arguments_from_options(opts), get_heuristic_arguments_from_options(opts)); @@ -155,30 +137,25 @@ class AdditiveCartesianHeuristicFeature static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"random", - "select a random variable (among all eligible variables)"}, - {"min_unwanted", - "select an eligible variable which has the least unwanted values " - "(number of values of v that land in the abstract state whose " - "h-value will probably be raised) in the flaw state"}, - {"max_unwanted", - "select an eligible variable which has the most unwanted values " - "(number of values of v that land in the abstract state whose " - "h-value will probably be raised) in the flaw state"}, - {"min_refined", - "select an eligible variable which is the least refined " - "(-1 * (remaining_values(v) / original_domain_size(v))) " - "in the flaw state"}, - {"max_refined", - "select an eligible variable which is the most refined " - "(-1 * (remaining_values(v) / original_domain_size(v))) " - "in the flaw state"}, - {"min_hadd", - "select an eligible variable with minimal h^add(s_0) value " - "over all facts that need to be removed from the flaw state"}, - {"max_hadd", - "select an eligible variable with maximal h^add(s_0) value " - "over all facts that need to be removed from the flaw state"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"random", "select a random variable (among all eligible variables)"}, + {"min_unwanted", + "select an eligible variable which has the least unwanted values " + "(number of values of v that land in the abstract state whose " + "h-value will probably be raised) in the flaw state"}, + {"max_unwanted", + "select an eligible variable which has the most unwanted values " + "(number of values of v that land in the abstract state whose " + "h-value will probably be raised) in the flaw state"}, + {"min_refined", "select an eligible variable which is the least refined " + "(-1 * (remaining_values(v) / original_domain_size(v))) " + "in the flaw state"}, + {"max_refined", "select an eligible variable which is the most refined " + "(-1 * (remaining_values(v) / original_domain_size(v))) " + "in the flaw state"}, + {"min_hadd", "select an eligible variable with minimal h^add(s_0) value " + "over all facts that need to be removed from the flaw state"}, + {"max_hadd", + "select an eligible variable with maximal h^add(s_0) value " + "over all facts that need to be removed from the flaw state"}}); } diff --git a/src/search/cartesian_abstractions/additive_cartesian_heuristic.h b/src/search/cartesian_abstractions/additive_cartesian_heuristic.h index ed3e66b625..1e9e0909d5 100644 --- a/src/search/cartesian_abstractions/additive_cartesian_heuristic.h +++ b/src/search/cartesian_abstractions/additive_cartesian_heuristic.h @@ -23,11 +23,10 @@ class AdditiveCartesianHeuristic : public Heuristic { public: explicit AdditiveCartesianHeuristic( const std::vector> &subtasks, - int max_states, int max_transitions, double max_time, - PickSplit pick, bool use_general_costs, int random_seed, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + int max_states, int max_transitions, double max_time, PickSplit pick, + bool use_general_costs, int random_seed, + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/cartesian_abstractions/cartesian_heuristic_function.cc b/src/search/cartesian_abstractions/cartesian_heuristic_function.cc index 86477183ec..770c5fe47a 100644 --- a/src/search/cartesian_abstractions/cartesian_heuristic_function.cc +++ b/src/search/cartesian_abstractions/cartesian_heuristic_function.cc @@ -8,10 +8,8 @@ using namespace std; namespace cartesian_abstractions { CartesianHeuristicFunction::CartesianHeuristicFunction( - unique_ptr &&hierarchy, - vector &&h_values) - : refinement_hierarchy(move(hierarchy)), - h_values(move(h_values)) { + unique_ptr &&hierarchy, vector &&h_values) + : refinement_hierarchy(move(hierarchy)), h_values(move(h_values)) { } int CartesianHeuristicFunction::get_value(const State &state) const { diff --git a/src/search/cartesian_abstractions/cegar.cc b/src/search/cartesian_abstractions/cegar.cc index 3e9ab0dc87..65ebff8272 100644 --- a/src/search/cartesian_abstractions/cegar.cc +++ b/src/search/cartesian_abstractions/cegar.cc @@ -1,7 +1,7 @@ #include "cegar.h" -#include "abstraction.h" #include "abstract_state.h" +#include "abstraction.h" #include "cartesian_set.h" #include "transition_system.h" #include "utils.h" @@ -20,7 +20,8 @@ using namespace std; namespace cartesian_abstractions { -// Create the Cartesian set that corresponds to the given preconditions or goals. +// Create the Cartesian set that corresponds to the given preconditions or +// goals. static CartesianSet get_cartesian_set( const vector &domain_sizes, const ConditionsProxy &conditions) { CartesianSet cartesian_set(domain_sizes); @@ -39,8 +40,7 @@ struct Flaw { CartesianSet desired_cartesian_set; Flaw( - State &&concrete_state, - const AbstractState ¤t_abstract_state, + State &&concrete_state, const AbstractState ¤t_abstract_state, CartesianSet &&desired_cartesian_set) : concrete_state(move(concrete_state)), current_abstract_state(current_abstract_state), @@ -79,13 +79,9 @@ struct Flaw { }; CEGAR::CEGAR( - const shared_ptr &task, - int max_states, - int max_non_looping_transitions, - double max_time, - PickSplit pick, - utils::RandomNumberGenerator &rng, - utils::LogProxy &log) + const shared_ptr &task, int max_states, + int max_non_looping_transitions, double max_time, PickSplit pick, + utils::RandomNumberGenerator &rng, utils::LogProxy &log) : task_proxy(*task), domain_sizes(get_domain_sizes(task_proxy)), max_states(max_states), @@ -99,14 +95,15 @@ CEGAR::CEGAR( if (log.is_at_least_normal()) { log << "Start building abstraction." << endl; log << "Maximum number of states: " << max_states << endl; - log << "Maximum number of transitions: " - << max_non_looping_transitions << endl; + log << "Maximum number of transitions: " << max_non_looping_transitions + << endl; } refinement_loop(rng); if (log.is_at_least_normal()) { log << "Done building abstraction." << endl; - log << "Time for building abstraction: " << timer.get_elapsed_time() << endl; + log << "Time for building abstraction: " << timer.get_elapsed_time() + << endl; print_statistics(); } } @@ -124,8 +121,8 @@ void CEGAR::separate_facts_unreachable_before_goal() { assert(abstraction->get_num_states() == 1); assert(task_proxy.get_goals().size() == 1); FactProxy goal = task_proxy.get_goals()[0]; - utils::HashSet reachable_facts = get_relaxed_possible_before( - task_proxy, goal); + utils::HashSet reachable_facts = + get_relaxed_possible_before(task_proxy, goal); for (VariableProxy var : task_proxy.get_variables()) { if (!may_keep_refining()) break; @@ -137,7 +134,8 @@ void CEGAR::separate_facts_unreachable_before_goal() { unreachable_values.push_back(value); } if (!unreachable_values.empty()) - abstraction->refine(abstraction->get_initial_state(), var_id, unreachable_values); + abstraction->refine( + abstraction->get_initial_state(), var_id, unreachable_values); } abstraction->mark_all_states_as_goals(); } @@ -148,7 +146,9 @@ bool CEGAR::may_keep_refining() const { log << "Reached maximum number of states." << endl; } return false; - } else if (abstraction->get_transition_system().get_num_non_loops() >= max_non_looping_transitions) { + } else if ( + abstraction->get_transition_system().get_num_non_loops() >= + max_non_looping_transitions) { if (log.is_at_least_normal()) { log << "Reached maximum number of transitions." << endl; } @@ -211,18 +211,22 @@ void CEGAR::refinement_loop(utils::RandomNumberGenerator &rng) { const AbstractState &abstract_state = flaw->current_abstract_state; int state_id = abstract_state.get_id(); vector splits = flaw->get_possible_splits(); - const Split &split = split_selector.pick_split(abstract_state, splits, rng); - auto new_state_ids = abstraction->refine(abstract_state, split.var_id, split.values); - // Since h-values only increase we can assign the h-value to the children. + const Split &split = + split_selector.pick_split(abstract_state, splits, rng); + auto new_state_ids = + abstraction->refine(abstract_state, split.var_id, split.values); + // Since h-values only increase we can assign the h-value to the + // children. abstract_search.copy_h_value_to_children( state_id, new_state_ids.first, new_state_ids.second); refine_timer.stop(); if (log.is_at_least_verbose() && abstraction->get_num_states() % 1000 == 0) { - log << abstraction->get_num_states() << "/" << max_states << " states, " - << abstraction->get_transition_system().get_num_non_loops() << "/" - << max_non_looping_transitions << " transitions" << endl; + log << abstraction->get_num_states() << "/" << max_states + << " states, " + << abstraction->get_transition_system().get_num_non_loops() + << "/" << max_non_looping_transitions << " transitions" << endl; } } if (log.is_at_least_normal()) { @@ -247,18 +251,19 @@ unique_ptr CEGAR::find_flaw(const Solution &solution) { if (!utils::extra_memory_padding_is_reserved()) break; OperatorProxy op = task_proxy.get_operators()[step.op_id]; - const AbstractState *next_abstract_state = &abstraction->get_state(step.target_id); + const AbstractState *next_abstract_state = + &abstraction->get_state(step.target_id); if (task_properties::is_applicable(op, concrete_state)) { if (log.is_at_least_debug()) log << " Move to " << *next_abstract_state << " with " << op.get_name() << endl; - State next_concrete_state = concrete_state.get_unregistered_successor(op); + State next_concrete_state = + concrete_state.get_unregistered_successor(op); if (!next_abstract_state->includes(next_concrete_state)) { if (log.is_at_least_debug()) log << " Paths deviate." << endl; return make_unique( - move(concrete_state), - *abstract_state, + move(concrete_state), *abstract_state, next_abstract_state->regress(op)); } abstract_state = next_abstract_state; @@ -267,8 +272,7 @@ unique_ptr CEGAR::find_flaw(const Solution &solution) { if (log.is_at_least_debug()) log << " Operator not applicable: " << op.get_name() << endl; return make_unique( - move(concrete_state), - *abstract_state, + move(concrete_state), *abstract_state, get_cartesian_set(domain_sizes, op.get_preconditions())); } } @@ -280,8 +284,7 @@ unique_ptr CEGAR::find_flaw(const Solution &solution) { if (log.is_at_least_debug()) log << " Goal test failed." << endl; return make_unique( - move(concrete_state), - *abstract_state, + move(concrete_state), *abstract_state, get_cartesian_set(domain_sizes, task_proxy.get_goals())); } } @@ -290,7 +293,8 @@ void CEGAR::print_statistics() { if (log.is_at_least_normal()) { abstraction->print_statistics(); int init_id = abstraction->get_initial_state().get_id(); - log << "Initial h value: " << abstract_search.get_h_value(init_id) << endl; + log << "Initial h value: " << abstract_search.get_h_value(init_id) + << endl; log << endl; } } diff --git a/src/search/cartesian_abstractions/cegar.h b/src/search/cartesian_abstractions/cegar.h index b01f89324e..ac599fa955 100644 --- a/src/search/cartesian_abstractions/cegar.h +++ b/src/search/cartesian_abstractions/cegar.h @@ -65,13 +65,9 @@ class CEGAR { public: CEGAR( - const std::shared_ptr &task, - int max_states, - int max_non_looping_transitions, - double max_time, - PickSplit pick, - utils::RandomNumberGenerator &rng, - utils::LogProxy &log); + const std::shared_ptr &task, int max_states, + int max_non_looping_transitions, double max_time, PickSplit pick, + utils::RandomNumberGenerator &rng, utils::LogProxy &log); ~CEGAR(); CEGAR(const CEGAR &) = delete; diff --git a/src/search/cartesian_abstractions/cost_saturation.cc b/src/search/cartesian_abstractions/cost_saturation.cc index ef0081674e..924ba6857d 100644 --- a/src/search/cartesian_abstractions/cost_saturation.cc +++ b/src/search/cartesian_abstractions/cost_saturation.cc @@ -33,12 +33,11 @@ namespace cartesian_abstractions { static const int memory_padding_in_mb = 75; static vector compute_saturated_costs( - const TransitionSystem &transition_system, - const vector &g_values, - const vector &h_values, - bool use_general_costs) { + const TransitionSystem &transition_system, const vector &g_values, + const vector &h_values, bool use_general_costs) { const int min_cost = use_general_costs ? -INF : 0; - vector saturated_costs(transition_system.get_num_operators(), min_cost); + vector saturated_costs( + transition_system.get_num_operators(), min_cost); assert(g_values.size() == h_values.size()); int num_states = h_values.size(); for (int state_id = 0; state_id < num_states; ++state_id) { @@ -56,7 +55,7 @@ static vector compute_saturated_costs( if (g == INF || h == INF) continue; - for (const Transition &transition: + for (const Transition &transition : transition_system.get_outgoing_transitions()[state_id]) { int op_id = transition.op_id; int succ_id = transition.target_id; @@ -80,16 +79,11 @@ static vector compute_saturated_costs( return saturated_costs; } - CostSaturation::CostSaturation( const vector> &subtask_generators, - int max_states, - int max_non_looping_transitions, - double max_time, - PickSplit pick_split, - bool use_general_costs, - utils::RandomNumberGenerator &rng, - utils::LogProxy &log) + int max_states, int max_non_looping_transitions, double max_time, + PickSplit pick_split, bool use_general_costs, + utils::RandomNumberGenerator &rng, utils::LogProxy &log) : subtask_generators(subtask_generators), max_states(max_states), max_non_looping_transitions(max_non_looping_transitions), @@ -119,17 +113,17 @@ vector CostSaturation::generate_heuristic_functions( State initial_state = TaskProxy(*task).get_initial_state(); - function should_abort = - [&] () { - return num_states >= max_states || - num_non_looping_transitions >= max_non_looping_transitions || - timer.is_expired() || - !utils::extra_memory_padding_is_reserved() || - state_is_dead_end(initial_state); - }; + function should_abort = [&]() { + return num_states >= max_states || + num_non_looping_transitions >= max_non_looping_transitions || + timer.is_expired() || + !utils::extra_memory_padding_is_reserved() || + state_is_dead_end(initial_state); + }; utils::reserve_extra_memory_padding(memory_padding_in_mb); - for (const shared_ptr &subtask_generator : subtask_generators) { + for (const shared_ptr &subtask_generator : + subtask_generators) { SharedTasks subtasks = subtask_generator->get_subtasks(task, log); build_abstractions(subtasks, timer, should_abort); if (should_abort()) @@ -189,47 +183,39 @@ bool CostSaturation::state_is_dead_end(const State &state) const { void CostSaturation::build_abstractions( const vector> &subtasks, - const utils::CountdownTimer &timer, - const function &should_abort) { + const utils::CountdownTimer &timer, const function &should_abort) { int rem_subtasks = subtasks.size(); for (shared_ptr subtask : subtasks) { subtask = get_remaining_costs_task(subtask); assert(num_states < max_states); CEGAR cegar( - subtask, - max(1, (max_states - num_states) / rem_subtasks), + subtask, max(1, (max_states - num_states) / rem_subtasks), max(1, (max_non_looping_transitions - num_non_looping_transitions) / - rem_subtasks), - timer.get_remaining_time() / rem_subtasks, - pick_split, - rng, - log); + rem_subtasks), + timer.get_remaining_time() / rem_subtasks, pick_split, rng, log); unique_ptr abstraction = cegar.extract_abstraction(); ++num_abstractions; num_states += abstraction->get_num_states(); - num_non_looping_transitions += abstraction->get_transition_system().get_num_non_loops(); + num_non_looping_transitions += + abstraction->get_transition_system().get_num_non_loops(); assert(num_states <= max_states); - vector costs = task_properties::get_operator_costs(TaskProxy(*subtask)); + vector costs = + task_properties::get_operator_costs(TaskProxy(*subtask)); vector init_distances = compute_distances( abstraction->get_transition_system().get_outgoing_transitions(), - costs, - {abstraction->get_initial_state().get_id()}); + costs, {abstraction->get_initial_state().get_id()}); vector goal_distances = compute_distances( abstraction->get_transition_system().get_incoming_transitions(), - costs, - abstraction->get_goals()); + costs, abstraction->get_goals()); vector saturated_costs = compute_saturated_costs( - abstraction->get_transition_system(), - init_distances, - goal_distances, - use_general_costs); + abstraction->get_transition_system(), init_distances, + goal_distances, use_general_costs); heuristic_functions.emplace_back( - abstraction->extract_refinement_hierarchy(), - move(goal_distances)); + abstraction->extract_refinement_hierarchy(), move(goal_distances)); reduce_remaining_costs(saturated_costs); diff --git a/src/search/cartesian_abstractions/cost_saturation.h b/src/search/cartesian_abstractions/cost_saturation.h index f64ef3537f..29cc8354b4 100644 --- a/src/search/cartesian_abstractions/cost_saturation.h +++ b/src/search/cartesian_abstractions/cost_saturation.h @@ -54,14 +54,11 @@ class CostSaturation { public: CostSaturation( - const std::vector> &subtask_generators, - int max_states, - int max_non_looping_transitions, - double max_time, - PickSplit pick_split, - bool use_general_costs, - utils::RandomNumberGenerator &rng, - utils::LogProxy &log); + const std::vector> + &subtask_generators, + int max_states, int max_non_looping_transitions, double max_time, + PickSplit pick_split, bool use_general_costs, + utils::RandomNumberGenerator &rng, utils::LogProxy &log); std::vector generate_heuristic_functions( const std::shared_ptr &task); diff --git a/src/search/cartesian_abstractions/refinement_hierarchy.cc b/src/search/cartesian_abstractions/refinement_hierarchy.cc index 7431b118ef..b22c3b6881 100644 --- a/src/search/cartesian_abstractions/refinement_hierarchy.cc +++ b/src/search/cartesian_abstractions/refinement_hierarchy.cc @@ -36,15 +36,12 @@ void Node::split(int var, int value, NodeID left_child, NodeID right_child) { assert(is_split()); } - - ostream &operator<<(ostream &os, const Node &node) { return os << ""; } - RefinementHierarchy::RefinementHierarchy(const shared_ptr &task) : task(task) { nodes.emplace_back(0); @@ -66,7 +63,8 @@ NodeID RefinementHierarchy::get_node_id(const State &state) const { } pair RefinementHierarchy::split( - NodeID node_id, int var, const vector &values, int left_state_id, int right_state_id) { + NodeID node_id, int var, const vector &values, int left_state_id, + int right_state_id) { NodeID helper_id = node_id; NodeID right_child_id = add_node(right_state_id); for (int value : values) { diff --git a/src/search/cartesian_abstractions/refinement_hierarchy.h b/src/search/cartesian_abstractions/refinement_hierarchy.h index 45eeafaf06..6b86217b5d 100644 --- a/src/search/cartesian_abstractions/refinement_hierarchy.h +++ b/src/search/cartesian_abstractions/refinement_hierarchy.h @@ -50,7 +50,6 @@ class RefinementHierarchy { int get_abstract_state_id(const State &state) const; }; - class Node { /* While right_child is always the node of a (possibly split) @@ -66,7 +65,8 @@ class Node { int var; int value; - // When splitting the corresponding state, we change this value to UNDEFINED. + // When splitting the corresponding state, we change this value to + // UNDEFINED. int state_id; bool information_is_valid() const; diff --git a/src/search/cartesian_abstractions/split_selector.cc b/src/search/cartesian_abstractions/split_selector.cc index 03effa1276..493ff7e63b 100644 --- a/src/search/cartesian_abstractions/split_selector.cc +++ b/src/search/cartesian_abstractions/split_selector.cc @@ -4,7 +4,6 @@ #include "utils.h" #include "../heuristics/additive_heuristic.h" - #include "../utils/logging.h" #include "../utils/rng.h" @@ -16,17 +15,12 @@ using namespace std; namespace cartesian_abstractions { SplitSelector::SplitSelector( - const shared_ptr &task, - PickSplit pick) - : task(task), - task_proxy(*task), - pick(pick) { + const shared_ptr &task, PickSplit pick) + : task(task), task_proxy(*task), pick(pick) { if (pick == PickSplit::MIN_HADD || pick == PickSplit::MAX_HADD) { - additive_heuristic = - make_unique( - tasks::AxiomHandlingType::APPROXIMATE_NEGATIVE, task, - false, "h^add within CEGAR abstractions", - utils::Verbosity::SILENT); + additive_heuristic = make_unique( + tasks::AxiomHandlingType::APPROXIMATE_NEGATIVE, task, false, + "h^add within CEGAR abstractions", utils::Verbosity::SILENT); additive_heuristic->compute_heuristic_for_cegar( task_proxy.get_initial_state()); } @@ -43,7 +37,8 @@ int SplitSelector::get_num_unwanted_values( return num_unwanted_values; } -double SplitSelector::get_refinedness(const AbstractState &state, int var_id) const { +double SplitSelector::get_refinedness( + const AbstractState &state, int var_id) const { double all_values = task_proxy.get_variables()[var_id].get_domain_size(); assert(all_values >= 2); double remaining_values = state.count(var_id); @@ -60,7 +55,8 @@ int SplitSelector::get_hadd_value(int var_id, int value) const { return hadd; } -int SplitSelector::get_min_hadd_value(int var_id, const vector &values) const { +int SplitSelector::get_min_hadd_value( + int var_id, const vector &values) const { int min_hadd = numeric_limits::max(); for (int value : values) { const int hadd = get_hadd_value(var_id, value); @@ -71,7 +67,8 @@ int SplitSelector::get_min_hadd_value(int var_id, const vector &values) con return min_hadd; } -int SplitSelector::get_max_hadd_value(int var_id, const vector &values) const { +int SplitSelector::get_max_hadd_value( + int var_id, const vector &values) const { int max_hadd = -1; for (int value : values) { const int hadd = get_hadd_value(var_id, value); @@ -82,7 +79,8 @@ int SplitSelector::get_max_hadd_value(int var_id, const vector &values) con return max_hadd; } -double SplitSelector::rate_split(const AbstractState &state, const Split &split) const { +double SplitSelector::rate_split( + const AbstractState &state, const Split &split) const { int var_id = split.var_id; const vector &values = split.values; double rating; @@ -112,9 +110,9 @@ double SplitSelector::rate_split(const AbstractState &state, const Split &split) return rating; } -const Split &SplitSelector::pick_split(const AbstractState &state, - const vector &splits, - utils::RandomNumberGenerator &rng) const { +const Split &SplitSelector::pick_split( + const AbstractState &state, const vector &splits, + utils::RandomNumberGenerator &rng) const { assert(!splits.empty()); if (splits.size() == 1) { diff --git a/src/search/cartesian_abstractions/split_selector.h b/src/search/cartesian_abstractions/split_selector.h index f1a631bd40..6c3db71785 100644 --- a/src/search/cartesian_abstractions/split_selector.h +++ b/src/search/cartesian_abstractions/split_selector.h @@ -31,7 +31,6 @@ enum class PickSplit { MAX_HADD }; - struct Split { const int var_id; const std::vector values; @@ -41,7 +40,6 @@ struct Split { } }; - /* Select split in case there are multiple possible splits. */ @@ -52,7 +50,8 @@ class SplitSelector { const PickSplit pick; - int get_num_unwanted_values(const AbstractState &state, const Split &split) const; + int get_num_unwanted_values( + const AbstractState &state, const Split &split) const; double get_refinedness(const AbstractState &state, int var_id) const; int get_hadd_value(int var_id, int value) const; int get_min_hadd_value(int var_id, const std::vector &values) const; @@ -65,8 +64,7 @@ class SplitSelector { ~SplitSelector(); const Split &pick_split( - const AbstractState &state, - const std::vector &splits, + const AbstractState &state, const std::vector &splits, utils::RandomNumberGenerator &rng) const; }; } diff --git a/src/search/cartesian_abstractions/subtask_generators.cc b/src/search/cartesian_abstractions/subtask_generators.cc index 0229992891..9527a4684a 100644 --- a/src/search/cartesian_abstractions/subtask_generators.cc +++ b/src/search/cartesian_abstractions/subtask_generators.cc @@ -35,9 +35,8 @@ class SortFactsByIncreasingHaddValues { explicit SortFactsByIncreasingHaddValues( const shared_ptr &task) : hadd(make_unique( - tasks::AxiomHandlingType::APPROXIMATE_NEGATIVE, task, - false, "h^add within CEGAR abstractions", - utils::Verbosity::SILENT)) { + tasks::AxiomHandlingType::APPROXIMATE_NEGATIVE, task, false, + "h^add within CEGAR abstractions", utils::Verbosity::SILENT)) { TaskProxy task_proxy(*task); hadd->compute_heuristic_for_cegar(task_proxy.get_initial_state()); } @@ -47,20 +46,21 @@ class SortFactsByIncreasingHaddValues { } }; - static void remove_initial_state_facts( const TaskProxy &task_proxy, Facts &facts) { State initial_state = task_proxy.get_initial_state(); - facts.erase(remove_if(facts.begin(), facts.end(), [&](FactPair fact) { - return initial_state[fact.var].get_value() == fact.value; - }), facts.end()); + facts.erase( + remove_if( + facts.begin(), facts.end(), + [&](FactPair fact) { + return initial_state[fact.var].get_value() == fact.value; + }), + facts.end()); } static void order_facts( - const shared_ptr &task, - FactOrder fact_order, - vector &facts, - utils::RandomNumberGenerator &rng, + const shared_ptr &task, FactOrder fact_order, + vector &facts, utils::RandomNumberGenerator &rng, utils::LogProxy &log) { if (log.is_at_least_verbose()) { log << "Sort " << facts.size() << " facts" << endl; @@ -85,20 +85,15 @@ static void order_facts( } static Facts filter_and_order_facts( - const shared_ptr &task, - FactOrder fact_order, - Facts &facts, - utils::RandomNumberGenerator &rng, - utils::LogProxy &log) { + const shared_ptr &task, FactOrder fact_order, Facts &facts, + utils::RandomNumberGenerator &rng, utils::LogProxy &log) { TaskProxy task_proxy(*task); remove_initial_state_facts(task_proxy, facts); order_facts(task, fact_order, facts, rng, log); return facts; } - -TaskDuplicator::TaskDuplicator(int copies) - : num_copies(copies) { +TaskDuplicator::TaskDuplicator(int copies) : num_copies(copies) { } SharedTasks TaskDuplicator::get_subtasks( @@ -112,8 +107,7 @@ SharedTasks TaskDuplicator::get_subtasks( } GoalDecomposition::GoalDecomposition(FactOrder order, int random_seed) - : fact_order(order), - rng(utils::get_rng(random_seed)) { + : fact_order(order), rng(utils::get_rng(random_seed)) { } SharedTasks GoalDecomposition::get_subtasks( @@ -124,13 +118,12 @@ SharedTasks GoalDecomposition::get_subtasks( filter_and_order_facts(task, fact_order, goal_facts, *rng, log); for (const FactPair &goal : goal_facts) { shared_ptr subtask = - make_shared(task, Facts {goal}); + make_shared(task, Facts{goal}); subtasks.push_back(subtask); } return subtasks; } - LandmarkDecomposition::LandmarkDecomposition( FactOrder order, int random_seed, bool combine_facts) : fact_order(order), @@ -163,7 +156,7 @@ SharedTasks LandmarkDecomposition::get_subtasks( filter_and_order_facts(task, fact_order, landmark_facts, *rng, log); for (const FactPair &landmark : landmark_facts) { shared_ptr subtask = - make_shared(task, Facts {landmark}); + make_shared(task, Facts{landmark}); if (combine_facts) { subtask = build_domain_abstracted_task( subtask, atom_to_landmark_map[landmark]); @@ -175,16 +168,15 @@ SharedTasks LandmarkDecomposition::get_subtasks( static void add_fact_order_option(plugins::Feature &feature) { feature.add_option( - "order", - "ordering of goal or landmark facts", - "hadd_down"); + "order", "ordering of goal or landmark facts", "hadd_down"); utils::add_rng_options_to_feature(feature); } static tuple get_fact_order_arguments_from_options( const plugins::Options &opts) { - return tuple_cat(make_tuple(opts.get("order")), - utils::get_rng_arguments_from_options(opts)); + return tuple_cat( + make_tuple(opts.get("order")), + utils::get_rng_arguments_from_options(opts)); } class TaskDuplicatorFeature @@ -192,14 +184,12 @@ class TaskDuplicatorFeature public: TaskDuplicatorFeature() : TypedFeature("original") { add_option( - "copies", - "number of task copies", - "1", + "copies", "number of task copies", "1", plugins::Bounds("1", "infinity")); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get("copies")); } @@ -214,8 +204,8 @@ class GoalDecompositionFeature add_fact_order_option(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( get_fact_order_arguments_from_options(opts)); } @@ -223,20 +213,18 @@ class GoalDecompositionFeature static plugins::FeaturePlugin _plugin_goals; - class LandmarkDecompositionFeature : public plugins::TypedFeature { public: LandmarkDecompositionFeature() : TypedFeature("landmarks") { add_fact_order_option(*this); add_option( - "combine_facts", - "combine landmark facts with domain abstraction", + "combine_facts", "combine landmark facts with domain abstraction", "true"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( get_fact_order_arguments_from_options(opts), opts.get("combine_facts")); @@ -245,19 +233,17 @@ class LandmarkDecompositionFeature static plugins::FeaturePlugin _plugin_landmarks; - -static class SubtaskGeneratorCategoryPlugin : public plugins::TypedCategoryPlugin { +static class SubtaskGeneratorCategoryPlugin + : public plugins::TypedCategoryPlugin { public: SubtaskGeneratorCategoryPlugin() : TypedCategoryPlugin("SubtaskGenerator") { document_synopsis("Subtask generator (used by the CEGAR heuristic)."); } -} -_category_plugin; +} _category_plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"original", "according to their (internal) variable index"}, - {"random", "according to a random permutation"}, - {"hadd_up", "according to their h^add value, lowest first"}, - {"hadd_down", "according to their h^add value, highest first "} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"original", "according to their (internal) variable index"}, + {"random", "according to a random permutation"}, + {"hadd_up", "according to their h^add value, lowest first"}, + {"hadd_down", "according to their h^add value, highest first "}}); } diff --git a/src/search/cartesian_abstractions/subtask_generators.h b/src/search/cartesian_abstractions/subtask_generators.h index cf55f54dec..dc7c4fcb50 100644 --- a/src/search/cartesian_abstractions/subtask_generators.h +++ b/src/search/cartesian_abstractions/subtask_generators.h @@ -31,7 +31,6 @@ enum class FactOrder { HADD_DOWN }; - /* Create focused subtasks. */ @@ -43,7 +42,6 @@ class SubtaskGenerator { virtual ~SubtaskGenerator() = default; }; - /* Return copies of the original task. */ @@ -58,7 +56,6 @@ class TaskDuplicator : public SubtaskGenerator { utils::LogProxy &log) const override; }; - /* Use ModifiedGoalsTask to return a subtask for each goal fact. */ @@ -74,7 +71,6 @@ class GoalDecomposition : public SubtaskGenerator { utils::LogProxy &log) const override; }; - /* Nest ModifiedGoalsTask and DomainAbstractedTask to return subtasks focussing on a single landmark fact. @@ -91,9 +87,8 @@ class LandmarkDecomposition : public SubtaskGenerator { const landmarks::LandmarkNode *node) const; public: - explicit LandmarkDecomposition(FactOrder order, - int random_seed, - bool combine_facts); + explicit LandmarkDecomposition( + FactOrder order, int random_seed, bool combine_facts); virtual SharedTasks get_subtasks( const std::shared_ptr &task, diff --git a/src/search/cartesian_abstractions/transition.h b/src/search/cartesian_abstractions/transition.h index 2823be763e..5a10e3a206 100644 --- a/src/search/cartesian_abstractions/transition.h +++ b/src/search/cartesian_abstractions/transition.h @@ -8,9 +8,7 @@ struct Transition { int op_id; int target_id; - Transition(int op_id, int target_id) - : op_id(op_id), - target_id(target_id) { + Transition(int op_id, int target_id) : op_id(op_id), target_id(target_id) { } bool operator==(const Transition &other) const { diff --git a/src/search/cartesian_abstractions/transition_system.cc b/src/search/cartesian_abstractions/transition_system.cc index 7b6770cf89..57b5f9e9b5 100644 --- a/src/search/cartesian_abstractions/transition_system.cc +++ b/src/search/cartesian_abstractions/transition_system.cc @@ -20,15 +20,15 @@ static vector> get_preconditions_by_operator( vector> preconditions_by_operator; preconditions_by_operator.reserve(ops.size()); for (OperatorProxy op : ops) { - vector preconditions = task_properties::get_fact_pairs(op.get_preconditions()); + vector preconditions = + task_properties::get_fact_pairs(op.get_preconditions()); sort(preconditions.begin(), preconditions.end()); preconditions_by_operator.push_back(move(preconditions)); } return preconditions_by_operator; } -static vector get_postconditions( - const OperatorProxy &op) { +static vector get_postconditions(const OperatorProxy &op) { // Use map to obtain sorted postconditions. map var_to_post; for (FactProxy fact : op.get_preconditions()) { @@ -72,12 +72,11 @@ static void remove_transitions_with_given_target( Transitions &transitions, int state_id) { auto new_end = remove_if( transitions.begin(), transitions.end(), - [state_id](const Transition &t) {return t.target_id == state_id;}); + [state_id](const Transition &t) { return t.target_id == state_id; }); assert(new_end != transitions.end()); transitions.erase(new_end, transitions.end()); } - TransitionSystem::TransitionSystem(const OperatorsProxy &ops) : preconditions_by_operator(get_preconditions_by_operator(ops)), postconditions_by_operator(get_postconditions_by_operator(ops)), @@ -220,7 +219,8 @@ void TransitionSystem::rewire_outgoing_transitions( } void TransitionSystem::rewire_loops( - const Loops &old_loops, const AbstractState &v1, const AbstractState &v2, int var) { + const Loops &old_loops, const AbstractState &v1, const AbstractState &v2, + int var) { /* State v has been split into v1 and v2. Now for all self-loops v->v we need to add one or two of the transitions v1->v1, v1->v2, v2->v1 and v2->v2. */ @@ -274,8 +274,8 @@ void TransitionSystem::rewire_loops( } void TransitionSystem::rewire( - const AbstractStates &states, int v_id, - const AbstractState &v1, const AbstractState &v2, int var) { + const AbstractStates &states, int v_id, const AbstractState &v1, + const AbstractState &v2, int var) { // Retrieve old transitions and make space for new transitions. Transitions old_incoming = move(incoming[v_id]); Transitions old_outgoing = move(outgoing[v_id]); @@ -285,8 +285,12 @@ void TransitionSystem::rewire( int v2_id = v2.get_id(); utils::unused_variable(v1_id); utils::unused_variable(v2_id); - assert(incoming[v1_id].empty() && outgoing[v1_id].empty() && loops[v1_id].empty()); - assert(incoming[v2_id].empty() && outgoing[v2_id].empty() && loops[v2_id].empty()); + assert( + incoming[v1_id].empty() && outgoing[v1_id].empty() && + loops[v1_id].empty()); + assert( + incoming[v2_id].empty() && outgoing[v2_id].empty() && + loops[v2_id].empty()); // Remove old transitions and add new transitions. rewire_incoming_transitions(old_incoming, states, v1, v2, var); @@ -339,7 +343,8 @@ void TransitionSystem::print_statistics(utils::LogProxy &log) const { assert(get_num_loops() == total_loops); assert(get_num_non_loops() == total_outgoing_transitions); log << "Looping transitions: " << total_loops << endl; - log << "Non-looping transitions: " << total_outgoing_transitions << endl; + log << "Non-looping transitions: " << total_outgoing_transitions + << endl; } } } diff --git a/src/search/cartesian_abstractions/transition_system.h b/src/search/cartesian_abstractions/transition_system.h index b7b1e71063..7443fdeba1 100644 --- a/src/search/cartesian_abstractions/transition_system.h +++ b/src/search/cartesian_abstractions/transition_system.h @@ -48,16 +48,16 @@ class TransitionSystem { const Transitions &old_outgoing, const AbstractStates &states, const AbstractState &v1, const AbstractState &v2, int var); void rewire_loops( - const Loops &old_loops, - const AbstractState &v1, const AbstractState &v2, int var); + const Loops &old_loops, const AbstractState &v1, + const AbstractState &v2, int var); public: explicit TransitionSystem(const OperatorsProxy &ops); // Update transition system after v has been split for var into v1 and v2. void rewire( - const AbstractStates &states, int v_id, - const AbstractState &v1, const AbstractState &v2, int var); + const AbstractStates &states, int v_id, const AbstractState &v1, + const AbstractState &v2, int var); const std::vector &get_incoming_transitions() const; const std::vector &get_outgoing_transitions() const; diff --git a/src/search/cartesian_abstractions/utils_landmarks.cc b/src/search/cartesian_abstractions/utils_landmarks.cc index 1d9c0fb3b3..a81cbf6307 100644 --- a/src/search/cartesian_abstractions/utils_landmarks.cc +++ b/src/search/cartesian_abstractions/utils_landmarks.cc @@ -1,9 +1,9 @@ #include "utils_landmarks.h" -#include "../plugins/plugin.h" #include "../landmarks/landmark.h" #include "../landmarks/landmark_factory_hm.h" #include "../landmarks/landmark_graph.h" +#include "../plugins/plugin.h" #include "../utils/logging.h" #include @@ -15,7 +15,8 @@ using namespace landmarks; namespace cartesian_abstractions { static FactPair get_atom(const Landmark &landmark) { - // We assume that the given Landmarks are from an h^m landmark graph with m=1. + // We assume that the given Landmarks are from an h^m landmark graph with + // m=1. assert(landmark.type == ATOMIC); assert(landmark.atoms.size() == 1); return landmark.atoms[0]; @@ -43,8 +44,8 @@ utils::HashMap get_atom_to_landmark_map( const shared_ptr &graph) { // All landmarks are atomic, i.e., each has exactly one atom. assert(all_of(graph->begin(), graph->end(), [](auto &node) { - return node->get_landmark().atoms.size() == 1; - })); + return node->get_landmark().atoms.size() == 1; + })); utils::HashMap atom_to_landmark_map; for (const auto &node : *graph) { const FactPair &atom = node->get_landmark().atoms[0]; diff --git a/src/search/cartesian_abstractions/utils_landmarks.h b/src/search/cartesian_abstractions/utils_landmarks.h index dd9e808eeb..2eb585d325 100644 --- a/src/search/cartesian_abstractions/utils_landmarks.h +++ b/src/search/cartesian_abstractions/utils_landmarks.h @@ -23,7 +23,8 @@ extern std::shared_ptr get_landmark_graph( extern std::vector get_atom_landmarks( const landmarks::LandmarkGraph &graph); -extern utils::HashMap get_atom_to_landmark_map( +extern utils::HashMap +get_atom_to_landmark_map( const std::shared_ptr &graph); /* @@ -32,8 +33,7 @@ extern utils::HashMap get_atom_to_landmark_ atoms that have to be made true before the given node can be true for the first time. */ -extern VarToValues get_prev_landmarks( - const landmarks::LandmarkNode *node); +extern VarToValues get_prev_landmarks(const landmarks::LandmarkNode *node); } #endif diff --git a/src/search/command_line.cc b/src/search/command_line.cc index cada85ea46..53da3ce61c 100644 --- a/src/search/command_line.cc +++ b/src/search/command_line.cc @@ -24,7 +24,6 @@ static void input_error(const string &msg) { utils::exit_with(utils::ExitCode::SEARCH_INPUT_ERROR); } - static int parse_int_arg(const string &name, const string &value) { try { return stoi(value); @@ -35,7 +34,8 @@ static int parse_int_arg(const string &name, const string &value) { } } -static vector replace_old_style_predefinitions(const vector &args) { +static vector replace_old_style_predefinitions( + const vector &args) { vector new_args; int num_predefinitions = 0; bool has_search_argument = false; @@ -45,9 +45,11 @@ static vector replace_old_style_predefinitions(const vector &arg string arg = args[i]; bool is_last = (i == args.size() - 1); - if (arg == "--evaluator" || arg == "--heuristic" || arg == "--landmarks") { + if (arg == "--evaluator" || arg == "--heuristic" || + arg == "--landmarks") { if (has_search_argument) - input_error("predefinitions are forbidden after the '--search' argument"); + input_error( + "predefinitions are forbidden after the '--search' argument"); if (is_last) input_error("missing argument after " + arg); ++i; @@ -57,7 +59,8 @@ static vector replace_old_style_predefinitions(const vector &arg string key = predefinition[0]; string definition = predefinition[1]; if (!utils::is_alpha_numeric(key)) - input_error("predefinition key has to be alphanumeric: '" + key + "'"); + input_error( + "predefinition key has to be alphanumeric: '" + key + "'"); new_search_argument << "let(" << key << "," << definition << ","; num_predefinitions++; } else if (arg == "--search") { @@ -81,7 +84,8 @@ static vector replace_old_style_predefinitions(const vector &arg return new_args; } -static shared_ptr parse_cmd_line_aux(const vector &args) { +static shared_ptr parse_cmd_line_aux( + const vector &args) { string plan_filename = "sas_plan"; int num_previously_generated_plans = 0; bool is_part_of_anytime_portfolio = false; @@ -106,7 +110,8 @@ static shared_ptr parse_cmd_line_aux(const vector &args plugins::Any constructed = decorated->construct(); search_algorithm = plugins::any_cast(constructed); } catch (const plugins::BadAnyCast &) { - input_error("Could not interpret the argument of --search as a search algorithm."); + input_error( + "Could not interpret the argument of --search as a search algorithm."); } catch (const utils::ContextError &e) { input_error(e.get_message()); } @@ -122,12 +127,15 @@ static shared_ptr parse_cmd_line_aux(const vector &args plugin_names.push_back(help_arg); } } - plugins::Registry registry = plugins::RawRegistry::instance()->construct_registry(); + plugins::Registry registry = + plugins::RawRegistry::instance()->construct_registry(); unique_ptr doc_printer; if (txt2tags) - doc_printer = make_unique(cout, registry); + doc_printer = + make_unique(cout, registry); else - doc_printer = make_unique(cout, registry); + doc_printer = + make_unique(cout, registry); if (plugin_names.empty()) { doc_printer->print_all(); } else { @@ -144,12 +152,14 @@ static shared_ptr parse_cmd_line_aux(const vector &args plan_filename = args[i]; } else if (arg == "--internal-previous-portfolio-plans") { if (is_last) - input_error("missing argument after --internal-previous-portfolio-plans"); + input_error( + "missing argument after --internal-previous-portfolio-plans"); ++i; is_part_of_anytime_portfolio = true; num_previously_generated_plans = parse_int_arg(arg, args[i]); if (num_previously_generated_plans < 0) - input_error("argument for --internal-previous-portfolio-plans must be positive"); + input_error( + "argument for --internal-previous-portfolio-plans must be positive"); } else { input_error("unknown option " + arg); } @@ -158,8 +168,10 @@ static shared_ptr parse_cmd_line_aux(const vector &args if (search_algorithm) { PlanManager &plan_manager = search_algorithm->get_plan_manager(); plan_manager.set_plan_filename(plan_filename); - plan_manager.set_num_previously_generated_plans(num_previously_generated_plans); - plan_manager.set_is_part_of_anytime_portfolio(is_part_of_anytime_portfolio); + plan_manager.set_num_previously_generated_plans( + num_previously_generated_plans); + plan_manager.set_is_part_of_anytime_portfolio( + is_part_of_anytime_portfolio); } return search_algorithm; } @@ -190,8 +202,8 @@ string get_revision_info() { } string get_usage(const string &progname) { - return "usage: \n" + - progname + " [OPTIONS] --search SEARCH < OUTPUT\n\n" + return "usage: \n" + progname + + " [OPTIONS] --search SEARCH < OUTPUT\n\n" "* SEARCH (SearchAlgorithm): configuration of the search algorithm\n" "* OUTPUT (filename): translator output\n\n" "Options:\n" diff --git a/src/search/evaluation_context.cc b/src/search/evaluation_context.cc index 0863ac15c1..9103ea2e05 100644 --- a/src/search/evaluation_context.cc +++ b/src/search/evaluation_context.cc @@ -10,8 +10,7 @@ using namespace std; EvaluationContext::EvaluationContext( const EvaluatorCache &cache, const State &state, int g_value, - bool is_preferred, SearchStatistics *statistics, - bool calculate_preferred) + bool is_preferred, SearchStatistics *statistics, bool calculate_preferred) : cache(cache), state(state), g_value(g_value), @@ -20,34 +19,34 @@ EvaluationContext::EvaluationContext( calculate_preferred(calculate_preferred) { } - EvaluationContext::EvaluationContext( - const EvaluationContext &other, int g_value, - bool is_preferred, SearchStatistics *statistics, bool calculate_preferred) - : EvaluationContext(other.cache, other.state, g_value, is_preferred, - statistics, calculate_preferred) { + const EvaluationContext &other, int g_value, bool is_preferred, + SearchStatistics *statistics, bool calculate_preferred) + : EvaluationContext( + other.cache, other.state, g_value, is_preferred, statistics, + calculate_preferred) { } EvaluationContext::EvaluationContext( const State &state, int g_value, bool is_preferred, SearchStatistics *statistics, bool calculate_preferred) - : EvaluationContext(EvaluatorCache(), state, g_value, is_preferred, - statistics, calculate_preferred) { + : EvaluationContext( + EvaluatorCache(), state, g_value, is_preferred, statistics, + calculate_preferred) { } EvaluationContext::EvaluationContext( - const State &state, - SearchStatistics *statistics, bool calculate_preferred) - : EvaluationContext(EvaluatorCache(), state, INVALID, false, - statistics, calculate_preferred) { + const State &state, SearchStatistics *statistics, bool calculate_preferred) + : EvaluationContext( + EvaluatorCache(), state, INVALID, false, statistics, + calculate_preferred) { } const EvaluationResult &EvaluationContext::get_result(Evaluator *evaluator) { EvaluationResult &result = cache[evaluator]; if (result.is_uninitialized()) { result = evaluator->compute_result(*this); - if (statistics && - evaluator->is_used_for_counting_evaluations() && + if (statistics && evaluator->is_used_for_counting_evaluations() && result.get_count_evaluation()) { statistics->inc_evaluations(); } @@ -87,12 +86,11 @@ int EvaluationContext::get_evaluator_value_or_infinity(Evaluator *eval) { return get_result(eval).get_evaluator_value(); } -const vector & -EvaluationContext::get_preferred_operators(Evaluator *eval) { +const vector &EvaluationContext::get_preferred_operators( + Evaluator *eval) { return get_result(eval).get_preferred_operators(); } - bool EvaluationContext::get_calculate_preferred() const { return calculate_preferred; } diff --git a/src/search/evaluation_context.h b/src/search/evaluation_context.h index ad42d22ce7..acb8777cca 100644 --- a/src/search/evaluation_context.h +++ b/src/search/evaluation_context.h @@ -62,9 +62,8 @@ class EvaluationContext { TODO: Can we reuse caches? Can we move them instead of copying them? */ EvaluationContext( - const EvaluationContext &other, - int g_value, bool is_preferred, SearchStatistics *statistics, - bool calculate_preferred = false); + const EvaluationContext &other, int g_value, bool is_preferred, + SearchStatistics *statistics, bool calculate_preferred = false); /* Create new heuristic cache for caching heuristic values. Used for example by eager search. @@ -85,8 +84,8 @@ class EvaluationContext { contexts that don't need this information. */ EvaluationContext( - const State &state, - SearchStatistics *statistics = nullptr, bool calculate_preferred = false); + const State &state, SearchStatistics *statistics = nullptr, + bool calculate_preferred = false); const EvaluationResult &get_result(Evaluator *eval); const EvaluatorCache &get_cache() const; diff --git a/src/search/evaluator.cc b/src/search/evaluator.cc index 6fb87c64ce..abcb518ba3 100644 --- a/src/search/evaluator.cc +++ b/src/search/evaluator.cc @@ -8,7 +8,6 @@ using namespace std; - Evaluator::Evaluator( bool use_for_reporting_minima, bool use_for_boosting, bool use_for_counting_evaluations, const string &description, @@ -37,8 +36,7 @@ void Evaluator::report_value_for_initial_state( } } -void Evaluator::report_new_minimum_value( - const EvaluationResult &result) const { +void Evaluator::report_new_minimum_value(const EvaluationResult &result) const { if (log.is_at_least_normal()) { assert(use_for_reporting_minima); log << "New best heuristic value for " << description << ": " @@ -77,8 +75,7 @@ int Evaluator::get_cached_estimate(const State &) const { void add_evaluator_options_to_feature( plugins::Feature &feature, const string &description) { feature.add_option( - "description", - "description used to identify evaluator in logs", + "description", "description used to identify evaluator in logs", "\"" + description + "\""); utils::add_log_options_to_feature(feature); } @@ -87,11 +84,11 @@ tuple get_evaluator_arguments_from_options( const plugins::Options &opts) { return tuple_cat( make_tuple(opts.get("description")), - utils::get_log_arguments_from_options(opts) - ); + utils::get_log_arguments_from_options(opts)); } -static class EvaluatorCategoryPlugin : public plugins::TypedCategoryPlugin { +static class EvaluatorCategoryPlugin + : public plugins::TypedCategoryPlugin { public: EvaluatorCategoryPlugin() : TypedCategoryPlugin("Evaluator") { document_synopsis( @@ -110,5 +107,4 @@ static class EvaluatorCategoryPlugin : public plugins::TypedCategoryPlugin &evals) = 0; - virtual void notify_initial_state(const State & /*initial_state*/) { } virtual void notify_state_transition( - const State & /*parent_state*/, - OperatorID /*op_id*/, + const State & /*parent_state*/, OperatorID /*op_id*/, const State & /*state*/) { } diff --git a/src/search/evaluator_cache.cc b/src/search/evaluator_cache.cc index 63ce569d00..34d87a64bc 100644 --- a/src/search/evaluator_cache.cc +++ b/src/search/evaluator_cache.cc @@ -2,7 +2,6 @@ using namespace std; - EvaluationResult &EvaluatorCache::operator[](Evaluator *eval) { return eval_results[eval]; } diff --git a/src/search/evaluators/combining_evaluator.cc b/src/search/evaluators/combining_evaluator.cc index 4047f2b51f..1f6d01b63c 100644 --- a/src/search/evaluators/combining_evaluator.cc +++ b/src/search/evaluators/combining_evaluator.cc @@ -10,8 +10,8 @@ using namespace std; namespace combining_evaluator { CombiningEvaluator::CombiningEvaluator( - const vector> &evals, - const string &description, utils::Verbosity verbosity) + const vector> &evals, const string &description, + utils::Verbosity verbosity) : Evaluator(false, false, false, description, verbosity), subevaluators(evals) { utils::verify_list_not_empty(evals, "evals"); @@ -34,7 +34,8 @@ EvaluationResult CombiningEvaluator::compute_result( // Collect component values. Return infinity if any is infinite. for (const shared_ptr &subevaluator : subevaluators) { - int value = eval_context.get_evaluator_value_or_infinity(subevaluator.get()); + int value = + eval_context.get_evaluator_value_or_infinity(subevaluator.get()); if (value == EvaluationResult::INFTY) { result.set_evaluator_value(value); return result; @@ -61,11 +62,9 @@ void add_combining_evaluator_options_to_feature( } tuple>, const string, utils::Verbosity> -get_combining_evaluator_arguments_from_options( - const plugins::Options &opts) { +get_combining_evaluator_arguments_from_options(const plugins::Options &opts) { return tuple_cat( make_tuple(opts.get_list>("evals")), - get_evaluator_arguments_from_options(opts) - ); + get_evaluator_arguments_from_options(opts)); } } diff --git a/src/search/evaluators/combining_evaluator.h b/src/search/evaluators/combining_evaluator.h index d6fd893ffe..81d81c0b0d 100644 --- a/src/search/evaluators/combining_evaluator.h +++ b/src/search/evaluators/combining_evaluator.h @@ -46,10 +46,10 @@ class CombiningEvaluator : public Evaluator { extern void add_combining_evaluator_options_to_feature( plugins::Feature &feature, const std::string &description); -extern std::tuple>, - const std::string, utils::Verbosity> -get_combining_evaluator_arguments_from_options( - const plugins::Options &opts); +extern std::tuple< + std::vector>, const std::string, + utils::Verbosity> +get_combining_evaluator_arguments_from_options(const plugins::Options &opts); } #endif diff --git a/src/search/evaluators/const_evaluator.cc b/src/search/evaluators/const_evaluator.cc index 60f3a6badc..e3f10e3c2d 100644 --- a/src/search/evaluators/const_evaluator.cc +++ b/src/search/evaluators/const_evaluator.cc @@ -7,8 +7,7 @@ using namespace std; namespace const_evaluator { ConstEvaluator::ConstEvaluator( int value, const string &description, utils::Verbosity verbosity) - : Evaluator(false, false, false, description, verbosity), - value(value) { + : Evaluator(false, false, false, description, verbosity), value(value) { } EvaluationResult ConstEvaluator::compute_result(EvaluationContext &) { @@ -26,19 +25,15 @@ class ConstEvaluatorFeature document_synopsis("Returns a constant value."); add_option( - "value", - "the constant value", - "1", + "value", "the constant value", "1", plugins::Bounds("0", "infinity")); add_evaluator_options_to_feature(*this, "const"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - opts.get("value"), - get_evaluator_arguments_from_options(opts) - ); + opts.get("value"), get_evaluator_arguments_from_options(opts)); } }; diff --git a/src/search/evaluators/const_evaluator.h b/src/search/evaluators/const_evaluator.h index 19523c021c..bb90ae737d 100644 --- a/src/search/evaluators/const_evaluator.h +++ b/src/search/evaluators/const_evaluator.h @@ -17,11 +17,10 @@ class ConstEvaluator : public Evaluator { public: ConstEvaluator( - int value, - const std::string &description, - utils::Verbosity verbosity); + int value, const std::string &description, utils::Verbosity verbosity); virtual void get_path_dependent_evaluators( - std::set &) override {} + std::set &) override { + } }; } diff --git a/src/search/evaluators/g_evaluator.cc b/src/search/evaluators/g_evaluator.cc index 303ada74cc..f3a04b723d 100644 --- a/src/search/evaluators/g_evaluator.cc +++ b/src/search/evaluators/g_evaluator.cc @@ -2,25 +2,23 @@ #include "../evaluation_context.h" #include "../evaluation_result.h" + #include "../plugins/plugin.h" using namespace std; namespace g_evaluator { -GEvaluator::GEvaluator(const string &description, - utils::Verbosity verbosity) +GEvaluator::GEvaluator(const string &description, utils::Verbosity verbosity) : Evaluator(false, false, false, description, verbosity) { } - EvaluationResult GEvaluator::compute_result(EvaluationContext &eval_context) { EvaluationResult result; result.set_evaluator_value(eval_context.get_g_value()); return result; } -class GEvaluatorFeature - : public plugins::TypedFeature { +class GEvaluatorFeature : public plugins::TypedFeature { public: GEvaluatorFeature() : TypedFeature("g") { document_subcategory("evaluators_basic"); @@ -30,11 +28,10 @@ class GEvaluatorFeature add_evaluator_options_to_feature(*this, "g"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - get_evaluator_arguments_from_options(opts) - ); + get_evaluator_arguments_from_options(opts)); } }; diff --git a/src/search/evaluators/g_evaluator.h b/src/search/evaluators/g_evaluator.h index 70919d26a2..7aace03131 100644 --- a/src/search/evaluators/g_evaluator.h +++ b/src/search/evaluators/g_evaluator.h @@ -6,13 +6,14 @@ namespace g_evaluator { class GEvaluator : public Evaluator { public: - GEvaluator( - const std::string &description, utils::Verbosity verbosity); + GEvaluator(const std::string &description, utils::Verbosity verbosity); virtual EvaluationResult compute_result( EvaluationContext &eval_context) override; - virtual void get_path_dependent_evaluators(std::set &) override {} + virtual void get_path_dependent_evaluators( + std::set &) override { + } }; } diff --git a/src/search/evaluators/max_evaluator.cc b/src/search/evaluators/max_evaluator.cc index 4e3bc27975..b86da86095 100644 --- a/src/search/evaluators/max_evaluator.cc +++ b/src/search/evaluators/max_evaluator.cc @@ -8,8 +8,8 @@ using namespace std; namespace max_evaluator { MaxEvaluator::MaxEvaluator( - const vector> &evals, - const string &description, utils::Verbosity verbosity) + const vector> &evals, const string &description, + utils::Verbosity verbosity) : CombiningEvaluator(evals, description, verbosity) { } @@ -28,14 +28,13 @@ class MaxEvaluatorFeature MaxEvaluatorFeature() : TypedFeature("max") { document_subcategory("evaluators_basic"); document_title("Max evaluator"); - document_synopsis( - "Calculates the maximum of the sub-evaluators."); + document_synopsis("Calculates the maximum of the sub-evaluators."); combining_evaluator::add_combining_evaluator_options_to_feature( *this, "max"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( combining_evaluator::get_combining_evaluator_arguments_from_options( opts)); diff --git a/src/search/evaluators/pref_evaluator.cc b/src/search/evaluators/pref_evaluator.cc index c4275edbf8..32b33621ff 100644 --- a/src/search/evaluators/pref_evaluator.cc +++ b/src/search/evaluators/pref_evaluator.cc @@ -2,6 +2,7 @@ #include "../evaluation_context.h" #include "../evaluation_result.h" + #include "../plugins/plugin.h" using namespace std; @@ -33,11 +34,10 @@ class PrefEvaluatorFeature add_evaluator_options_to_feature(*this, "pref"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - get_evaluator_arguments_from_options(opts) - ); + get_evaluator_arguments_from_options(opts)); } }; diff --git a/src/search/evaluators/pref_evaluator.h b/src/search/evaluators/pref_evaluator.h index 68a9dbce66..29b4023a31 100644 --- a/src/search/evaluators/pref_evaluator.h +++ b/src/search/evaluators/pref_evaluator.h @@ -9,12 +9,13 @@ namespace pref_evaluator { class PrefEvaluator : public Evaluator { public: - PrefEvaluator( - const std::string &description, utils::Verbosity verbosity); + PrefEvaluator(const std::string &description, utils::Verbosity verbosity); virtual EvaluationResult compute_result( EvaluationContext &eval_context) override; - virtual void get_path_dependent_evaluators(std::set &) override {} + virtual void get_path_dependent_evaluators( + std::set &) override { + } }; } diff --git a/src/search/evaluators/subcategory.cc b/src/search/evaluators/subcategory.cc index 2a9ad66625..365bdea554 100644 --- a/src/search/evaluators/subcategory.cc +++ b/src/search/evaluators/subcategory.cc @@ -6,6 +6,5 @@ static class EvaluatorGroupPlugin : public plugins::SubcategoryPlugin { EvaluatorGroupPlugin() : SubcategoryPlugin("evaluators_basic") { document_title("Basic Evaluators"); } -} -_subcategory_plugin; +} _subcategory_plugin; } diff --git a/src/search/evaluators/sum_evaluator.cc b/src/search/evaluators/sum_evaluator.cc index 6fb98d308c..0c6d3f4c0c 100644 --- a/src/search/evaluators/sum_evaluator.cc +++ b/src/search/evaluators/sum_evaluator.cc @@ -8,8 +8,8 @@ using namespace std; namespace sum_evaluator { SumEvaluator::SumEvaluator( - const vector> &evals, - const string &description, utils::Verbosity verbosity) + const vector> &evals, const string &description, + utils::Verbosity verbosity) : CombiningEvaluator(evals, description, verbosity) { } @@ -35,8 +35,8 @@ class SumEvaluatorFeature *this, "sum"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( combining_evaluator::get_combining_evaluator_arguments_from_options( opts)); diff --git a/src/search/evaluators/weighted_evaluator.cc b/src/search/evaluators/weighted_evaluator.cc index 47dde6b0a5..f549c0caed 100644 --- a/src/search/evaluators/weighted_evaluator.cc +++ b/src/search/evaluators/weighted_evaluator.cc @@ -2,6 +2,7 @@ #include "../evaluation_context.h" #include "../evaluation_result.h" + #include "../plugins/plugin.h" #include @@ -11,14 +12,13 @@ using namespace std; namespace weighted_evaluator { WeightedEvaluator::WeightedEvaluator( - const shared_ptr &eval, int weight, - const string &description, utils::Verbosity verbosity) + const shared_ptr &eval, int weight, const string &description, + utils::Verbosity verbosity) : Evaluator(false, false, false, description, verbosity), evaluator(eval), weight(weight) { } - bool WeightedEvaluator::dead_ends_are_reliable() const { return evaluator->dead_ends_are_reliable(); } @@ -54,13 +54,11 @@ class WeightedEvaluatorFeature add_evaluator_options_to_feature(*this, "weight"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - opts.get>("eval"), - opts.get("weight"), - get_evaluator_arguments_from_options(opts) - ); + opts.get>("eval"), opts.get("weight"), + get_evaluator_arguments_from_options(opts)); } }; diff --git a/src/search/evaluators/weighted_evaluator.h b/src/search/evaluators/weighted_evaluator.h index edfdf769b1..8a8ff45d41 100644 --- a/src/search/evaluators/weighted_evaluator.h +++ b/src/search/evaluators/weighted_evaluator.h @@ -22,7 +22,8 @@ class WeightedEvaluator : public Evaluator { virtual bool dead_ends_are_reliable() const override; virtual EvaluationResult compute_result( EvaluationContext &eval_context) override; - virtual void get_path_dependent_evaluators(std::set &evals) override; + virtual void get_path_dependent_evaluators( + std::set &evals) override; }; } diff --git a/src/search/heuristic.cc b/src/search/heuristic.cc index 251910fec5..5c73b24899 100644 --- a/src/search/heuristic.cc +++ b/src/search/heuristic.cc @@ -14,11 +14,11 @@ using namespace std; Heuristic::Heuristic( - const shared_ptr &transform, - bool cache_estimates, const string &description, - utils::Verbosity verbosity) + const shared_ptr &transform, bool cache_estimates, + const string &description, utils::Verbosity verbosity) : Evaluator(true, true, true, description, verbosity), - heuristic_cache(HEntry(NO_VALUE, true)), //TODO: is true really a good idea here? + heuristic_cache( + HEntry(NO_VALUE, true)), // TODO: is true really a good idea here? cache_evaluator_values(cache_estimates), task(transform), task_proxy(*task) { @@ -28,7 +28,8 @@ Heuristic::~Heuristic() { } void Heuristic::set_preferred(const OperatorProxy &op) { - preferred_operators.insert(op.get_ancestor_operator_id(tasks::g_root_task.get())); + preferred_operators.insert( + op.get_ancestor_operator_id(tasks::g_root_task.get())); } State Heuristic::convert_ancestor_state(const State &ancestor_state) const { @@ -42,7 +43,8 @@ void add_heuristic_options_to_feature( "Optional task transformation for the heuristic." " Currently, adapt_costs() and no_transform() are available.", "no_transform()"); - feature.add_option("cache_estimates", "cache heuristic estimates", "true"); + feature.add_option( + "cache_estimates", "cache heuristic estimates", "true"); add_evaluator_options_to_feature(feature, description); } @@ -51,8 +53,7 @@ get_heuristic_arguments_from_options(const plugins::Options &opts) { return tuple_cat( make_tuple( opts.get>("transform"), - opts.get("cache_estimates") - ), + opts.get("cache_estimates")), get_evaluator_arguments_from_options(opts)); } @@ -97,7 +98,8 @@ EvaluationResult Heuristic::compute_result(EvaluationContext &eval_context) { OperatorsProxy global_operators = global_task_proxy.get_operators(); if (heuristic != EvaluationResult::INFTY) { for (OperatorID op_id : preferred_operators) - assert(task_properties::is_applicable(global_operators[op_id], state)); + assert( + task_properties::is_applicable(global_operators[op_id], state)); } #endif diff --git a/src/search/heuristic.h b/src/search/heuristic.h index 379f31b734..5b46b2a0c3 100644 --- a/src/search/heuristic.h +++ b/src/search/heuristic.h @@ -25,8 +25,7 @@ class Heuristic : public Evaluator { int h : 31; unsigned int dirty : 1; - HEntry(int h, bool dirty) - : h(h), dirty(dirty) { + HEntry(int h, bool dirty) : h(h), dirty(dirty) { } }; static_assert(sizeof(HEntry) == 4, "HEntry has unexpected size."); @@ -47,19 +46,23 @@ class Heuristic : public Evaluator { protected: /* Cache for saving h values - Before accessing this cache always make sure that the cache_evaluator_values - flag is set to true - as soon as the cache is accessed it will create - entries for all existing states + Before accessing this cache always make sure that the + cache_evaluator_values flag is set to true - as soon as the cache is + accessed it will create entries for all existing states */ PerStateInformation heuristic_cache; bool cache_evaluator_values; - // Hold a reference to the task implementation and pass it to objects that need it. + // Hold a reference to the task implementation and pass it to objects that + // need it. const std::shared_ptr task; // Use task_proxy to access task information. TaskProxy task_proxy; - enum {DEAD_END = -1, NO_VALUE = -2}; + enum { + DEAD_END = -1, + NO_VALUE = -2 + }; virtual int compute_heuristic(const State &ancestor_state) = 0; @@ -74,9 +77,8 @@ class Heuristic : public Evaluator { public: Heuristic( - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); virtual ~Heuristic() override; virtual void get_path_dependent_evaluators( diff --git a/src/search/heuristics/additive_heuristic.cc b/src/search/heuristics/additive_heuristic.cc index f8cc39fd0e..8f44399d45 100644 --- a/src/search/heuristics/additive_heuristic.cc +++ b/src/search/heuristics/additive_heuristic.cc @@ -1,7 +1,6 @@ #include "additive_heuristic.h" #include "../plugins/plugin.h" - #include "../task_utils/task_properties.h" #include "../utils/logging.h" @@ -14,12 +13,10 @@ namespace additive_heuristic { const int AdditiveHeuristic::MAX_COST_VALUE; AdditiveHeuristic::AdditiveHeuristic( - tasks::AxiomHandlingType axioms, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity) + tasks::AxiomHandlingType axioms, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity) : RelaxationHeuristic( - axioms, transform, cache_estimates, description, - verbosity), + axioms, transform, cache_estimates, description, verbosity), did_write_overflow_warning(false) { if (log.is_at_least_normal()) { log << "Initializing additive heuristic..." << endl; @@ -87,8 +84,7 @@ void AdditiveHeuristic::relaxed_exploration() { --unary_op->unsatisfied_preconditions; assert(unary_op->unsatisfied_preconditions >= 0); if (unary_op->unsatisfied_preconditions == 0) - enqueue_if_necessary(unary_op->effect, - unary_op->cost, op_id); + enqueue_if_necessary(unary_op->effect, unary_op->cost, op_id); } } } @@ -155,7 +151,8 @@ class AdditiveHeuristicFeature AdditiveHeuristicFeature() : TypedFeature("add") { document_title("Additive heuristic"); - relaxation_heuristic::add_relaxation_heuristic_options_to_feature(*this, "add"); + relaxation_heuristic::add_relaxation_heuristic_options_to_feature( + *this, "add"); document_language_support("action costs", "supported"); document_language_support("conditional effects", "supported"); @@ -167,11 +164,11 @@ class AdditiveHeuristicFeature document_property("preferred operators", "yes"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - relaxation_heuristic::get_relaxation_heuristic_arguments_from_options(opts) - ); + relaxation_heuristic:: + get_relaxation_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/additive_heuristic.h b/src/search/heuristics/additive_heuristic.h index ad76e40417..a0192dfcd4 100644 --- a/src/search/heuristics/additive_heuristic.h +++ b/src/search/heuristics/additive_heuristic.h @@ -11,8 +11,8 @@ class State; namespace additive_heuristic { -using relaxation_heuristic::PropID; using relaxation_heuristic::OpID; +using relaxation_heuristic::PropID; using relaxation_heuristic::NO_OP; @@ -67,9 +67,8 @@ class AdditiveHeuristic : public relaxation_heuristic::RelaxationHeuristic { public: AdditiveHeuristic( tasks::AxiomHandlingType axioms, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); /* TODO: The two methods below are temporarily needed for the CEGAR diff --git a/src/search/heuristics/array_pool.h b/src/search/heuristics/array_pool.h index c1e878fb4f..785acc05de 100644 --- a/src/search/heuristics/array_pool.h +++ b/src/search/heuristics/array_pool.h @@ -24,12 +24,10 @@ using Value = int; class ArrayPoolIndex { friend class ArrayPool; int position; - ArrayPoolIndex(int position) - : position(position) { + ArrayPoolIndex(int position) : position(position) { } public: - ArrayPoolIndex() - : position(INVALID_INDEX) { + ArrayPoolIndex() : position(INVALID_INDEX) { } }; @@ -49,9 +47,7 @@ class ArrayPoolSlice { Iterator first; Iterator last; - ArrayPoolSlice(Iterator first, Iterator last) - : first(first), - last(last) { + ArrayPoolSlice(Iterator first, Iterator last) : first(first), last(last) { } }; @@ -65,10 +61,12 @@ class ArrayPool { } ArrayPoolSlice get_slice(ArrayPoolIndex index, int size) const { - assert(index.position >= 0 && - size >= 0 && - index.position + size <= static_cast(data.size())); - return ArrayPoolSlice(data.begin() + index.position, data.begin() + index.position + size); + assert( + index.position >= 0 && size >= 0 && + index.position + size <= static_cast(data.size())); + return ArrayPoolSlice( + data.begin() + index.position, + data.begin() + index.position + size); } }; } diff --git a/src/search/heuristics/blind_search_heuristic.cc b/src/search/heuristics/blind_search_heuristic.cc index 2a215b36fc..e2de3ef5ca 100644 --- a/src/search/heuristics/blind_search_heuristic.cc +++ b/src/search/heuristics/blind_search_heuristic.cc @@ -1,7 +1,6 @@ #include "blind_search_heuristic.h" #include "../plugins/plugin.h" - #include "../task_utils/task_properties.h" #include "../utils/logging.h" @@ -16,8 +15,7 @@ BlindSearchHeuristic::BlindSearchHeuristic( const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) : Heuristic(transform, cache_estimates, description, verbosity), - min_operator_cost( - task_properties::get_min_operator_cost(task_proxy)) { + min_operator_cost(task_properties::get_min_operator_cost(task_proxy)) { if (log.is_at_least_normal()) { log << "Initializing blind search heuristic..." << endl; } @@ -52,11 +50,10 @@ class BlindSearchHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/blind_search_heuristic.h b/src/search/heuristics/blind_search_heuristic.h index 9b88be0047..4b3abd0d05 100644 --- a/src/search/heuristics/blind_search_heuristic.h +++ b/src/search/heuristics/blind_search_heuristic.h @@ -10,9 +10,8 @@ class BlindSearchHeuristic : public Heuristic { virtual int compute_heuristic(const State &ancestor_state) override; public: BlindSearchHeuristic( - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/heuristics/cea_heuristic.cc b/src/search/heuristics/cea_heuristic.cc index 12aba20f85..bb7ebdd9b1 100644 --- a/src/search/heuristics/cea_heuristic.cc +++ b/src/search/heuristics/cea_heuristic.cc @@ -3,7 +3,6 @@ #include "domain_transition_graph.h" #include "../plugins/plugin.h" - #include "../task_utils/task_properties.h" #include "../utils/logging.h" @@ -60,9 +59,12 @@ struct LocalTransition { LocalTransition( LocalProblemNode *source_, LocalProblemNode *target_, const ValueTransitionLabel *label_, int action_cost_) - : source(source_), target(target_), - label(label_), action_cost(action_cost_), - target_cost(-1), unreached_conditions(-1) { + : source(source_), + target(target_), + label(label_), + action_cost(action_cost_), + target_cost(-1), + unreached_conditions(-1) { // target_cost and unreached_cost are initialized by // expand_transition. } @@ -71,7 +73,6 @@ struct LocalTransition { } }; - struct LocalProblemNode { // Attributes fixed during initialization. LocalProblem *owner; @@ -110,8 +111,7 @@ struct LocalProblem { vector nodes; vector *context_variables; public: - LocalProblem() - : base_priority(-1) { + LocalProblem() : base_priority(-1) { } ~LocalProblem() { @@ -120,7 +120,7 @@ struct LocalProblem { LocalProblem *ContextEnhancedAdditiveHeuristic::get_local_problem( int var_no, int value) { - LocalProblem * &table_entry = local_problem_index[var_no][value]; + LocalProblem *&table_entry = local_problem_index[var_no][value]; if (!table_entry) { table_entry = build_problem_for_variable(var_no); local_problems.push_back(table_entry); @@ -151,9 +151,9 @@ LocalProblem *ContextEnhancedAdditiveHeuristic::build_problem_for_variable( int target_value = dtg_trans.target->value; LocalProblemNode &target = problem->nodes[target_value]; for (const ValueTransitionLabel &label : dtg_trans.labels) { - OperatorProxy op = label.is_axiom ? - task_proxy.get_axioms()[label.op_id] : - task_proxy.get_operators()[label.op_id]; + OperatorProxy op = + label.is_axiom ? task_proxy.get_axioms()[label.op_id] + : task_proxy.get_operators()[label.op_id]; LocalTransition trans(&node, &target, &label, op.get_cost()); node.outgoing_transitions.push_back(trans); } @@ -180,7 +180,8 @@ LocalProblem *ContextEnhancedAdditiveHeuristic::build_problem_for_goal() const { goals.push_back(LocalAssignment(goal_no, goal_value)); } vector no_effects; - ValueTransitionLabel *label = new ValueTransitionLabel(0, true, goals, no_effects); + ValueTransitionLabel *label = + new ValueTransitionLabel(0, true, goals, no_effects); LocalTransition trans(&problem->nodes[0], &problem->nodes[1], label, 0); problem->nodes[0].outgoing_transitions.push_back(trans); return problem; @@ -213,8 +214,8 @@ bool ContextEnhancedAdditiveHeuristic::is_local_problem_set_up( } void ContextEnhancedAdditiveHeuristic::set_up_local_problem( - LocalProblem *problem, int base_priority, - int start_value, const State &state) { + LocalProblem *problem, int base_priority, int start_value, + const State &state) { assert(problem->base_priority == -1); problem->base_priority = base_priority; @@ -294,9 +295,8 @@ void ContextEnhancedAdditiveHeuristic::expand_transition( trans->unreached_conditions = 0; const vector &precond = trans->label->precond; - vector::const_iterator - curr_precond = precond.begin(), - last_precond = precond.end(); + vector::const_iterator curr_precond = precond.begin(), + last_precond = precond.end(); vector::const_iterator context = trans->source->context.begin(); vector::const_iterator parent_vars = @@ -311,8 +311,8 @@ void ContextEnhancedAdditiveHeuristic::expand_transition( if (current_val == precond_value) continue; - LocalProblem *subproblem = get_local_problem( - precond_var_no, current_val); + LocalProblem *subproblem = + get_local_problem(precond_var_no, current_val); if (!is_local_problem_set_up(subproblem)) { set_up_local_problem( @@ -359,14 +359,16 @@ void ContextEnhancedAdditiveHeuristic::mark_helpful_transitions( assert(node->cost >= 0 && node->cost < numeric_limits::max()); LocalTransition *first_on_path = node->reached_by; if (first_on_path) { - node->reached_by = nullptr; // Clear to avoid revisiting this node later. + node->reached_by = + nullptr; // Clear to avoid revisiting this node later. if (first_on_path->target_cost == first_on_path->action_cost) { // Transition possibly applicable. const ValueTransitionLabel &label = *first_on_path->label; - OperatorProxy op = label.is_axiom ? - task_proxy.get_axioms()[label.op_id] : - task_proxy.get_operators()[label.op_id]; - if (min_action_cost != 0 || task_properties::is_applicable(op, state)) { + OperatorProxy op = label.is_axiom + ? task_proxy.get_axioms()[label.op_id] + : task_proxy.get_operators()[label.op_id]; + if (min_action_cost != 0 || + task_properties::is_applicable(op, state)) { // If there are no zero-cost actions, the target_cost/ // action_cost test above already guarantees applicability. assert(!op.is_axiom()); @@ -409,18 +411,17 @@ int ContextEnhancedAdditiveHeuristic::compute_heuristic( } ContextEnhancedAdditiveHeuristic::ContextEnhancedAdditiveHeuristic( - tasks::AxiomHandlingType axioms, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity) - : Heuristic(tasks::get_default_value_axioms_task_if_needed( - transform, axioms), - cache_estimates, description, verbosity), + tasks::AxiomHandlingType axioms, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity) + : Heuristic( + tasks::get_default_value_axioms_task_if_needed(transform, axioms), + cache_estimates, description, verbosity), min_action_cost(task_properties::get_min_operator_cost(task_proxy)) { if (log.is_at_least_normal()) { log << "Initializing context-enhanced additive heuristic..." << endl; } - DTGFactory factory(task_proxy, true, [](int, int) {return false;}); + DTGFactory factory(task_proxy, true, [](int, int) { return false; }); transition_graphs = factory.build_dtgs(); goal_problem = build_problem_for_goal(); @@ -429,7 +430,8 @@ ContextEnhancedAdditiveHeuristic::ContextEnhancedAdditiveHeuristic( VariablesProxy vars = task_proxy.get_variables(); local_problem_index.resize(vars.size()); for (VariableProxy var : vars) - local_problem_index[var.get_id()].resize(var.get_domain_size(), nullptr); + local_problem_index[var.get_id()].resize( + var.get_domain_size(), nullptr); } ContextEnhancedAdditiveHeuristic::~ContextEnhancedAdditiveHeuristic() { @@ -448,7 +450,8 @@ bool ContextEnhancedAdditiveHeuristic::dead_ends_are_reliable() const { } class ContextEnhancedAdditiveHeuristicFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + Evaluator, ContextEnhancedAdditiveHeuristic> { public: ContextEnhancedAdditiveHeuristicFeature() : TypedFeature("cea") { document_title("Context-enhanced additive heuristic"); @@ -466,12 +469,12 @@ class ContextEnhancedAdditiveHeuristicFeature document_property("preferred operators", "yes"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + ContextEnhancedAdditiveHeuristic>( tasks::get_axioms_arguments_from_options(opts), - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/cea_heuristic.h b/src/search/heuristics/cea_heuristic.h index 1774a3f8f8..2f91d532da 100644 --- a/src/search/heuristics/cea_heuristic.h +++ b/src/search/heuristics/cea_heuristic.h @@ -18,7 +18,8 @@ struct LocalProblemNode; struct LocalTransition; class ContextEnhancedAdditiveHeuristic : public Heuristic { - std::vector> transition_graphs; + std::vector> + transition_graphs; std::vector local_problems; std::vector> local_problem_index; LocalProblem *goal_problem; @@ -36,8 +37,9 @@ class ContextEnhancedAdditiveHeuristic : public Heuristic { void add_to_heap(LocalProblemNode *node); bool is_local_problem_set_up(const LocalProblem *problem) const; - void set_up_local_problem(LocalProblem *problem, int base_priority, - int start_value, const State &state); + void set_up_local_problem( + LocalProblem *problem, int base_priority, int start_value, + const State &state); void try_to_fire_transition(LocalTransition *trans); void expand_node(LocalProblemNode *node); @@ -53,9 +55,8 @@ class ContextEnhancedAdditiveHeuristic : public Heuristic { public: ContextEnhancedAdditiveHeuristic( tasks::AxiomHandlingType axioms, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); ~ContextEnhancedAdditiveHeuristic(); virtual bool dead_ends_are_reliable() const override; }; diff --git a/src/search/heuristics/cg_cache.cc b/src/search/heuristics/cg_cache.cc index 2479136d34..b0c747bb07 100644 --- a/src/search/heuristics/cg_cache.cc +++ b/src/search/heuristics/cg_cache.cc @@ -17,7 +17,8 @@ using namespace std; namespace cg_heuristic { const int CGCache::NOT_COMPUTED; -CGCache::CGCache(const TaskProxy &task_proxy, int max_cache_size, utils::LogProxy &log) +CGCache::CGCache( + const TaskProxy &task_proxy, int max_cache_size, utils::LogProxy &log) : task_proxy(task_proxy) { if (log.is_at_least_normal()) { log << "Initializing heuristic cache... " << flush; @@ -45,21 +46,22 @@ CGCache::CGCache(const TaskProxy &task_proxy, int max_cache_size, utils::LogProx for (size_t i = 0; i < num_affectors; ++i) { int affector = depends_on[var][i]; assert(affector < var); - depends_on[var].insert(depends_on[var].end(), - depends_on[affector].begin(), - depends_on[affector].end()); + depends_on[var].insert( + depends_on[var].end(), depends_on[affector].begin(), + depends_on[affector].end()); } sort(depends_on[var].begin(), depends_on[var].end()); - depends_on[var].erase(unique(depends_on[var].begin(), depends_on[var].end()), - depends_on[var].end()); + depends_on[var].erase( + unique(depends_on[var].begin(), depends_on[var].end()), + depends_on[var].end()); } cache.resize(var_count); helpful_transition_cache.resize(var_count); for (int var = 0; var < var_count; ++var) { - int required_cache_size = compute_required_cache_size( - var, depends_on[var], max_cache_size); + int required_cache_size = + compute_required_cache_size(var, depends_on[var], max_cache_size); if (required_cache_size != -1) { cache[var].resize(required_cache_size, NOT_COMPUTED); helpful_transition_cache[var].resize(required_cache_size, nullptr); @@ -83,8 +85,8 @@ int CGCache::compute_required_cache_size( VariablesProxy variables = task_proxy.get_variables(); int var_domain = variables[var_id].get_domain_size(); - if (!utils::is_product_within_limit(var_domain, var_domain - 1, - max_cache_size)) + if (!utils::is_product_within_limit( + var_domain, var_domain - 1, max_cache_size)) return -1; int required_size = var_domain * (var_domain - 1); @@ -102,8 +104,8 @@ int CGCache::compute_required_cache_size( if (cache[depend_var_id].empty()) return -1; - if (!utils::is_product_within_limit(required_size, depend_var_domain, - max_cache_size)) + if (!utils::is_product_within_limit( + required_size, depend_var_domain, max_cache_size)) return -1; required_size *= depend_var_domain; @@ -112,8 +114,8 @@ int CGCache::compute_required_cache_size( return required_size; } -int CGCache::get_index(int var, const State &state, - int from_val, int to_val) const { +int CGCache::get_index( + int var, const State &state, int from_val, int to_val) const { assert(is_cached(var)); assert(from_val != to_val); int index = from_val; diff --git a/src/search/heuristics/cg_cache.h b/src/search/heuristics/cg_cache.h index 87e43da766..b4b2040c69 100644 --- a/src/search/heuristics/cg_cache.h +++ b/src/search/heuristics/cg_cache.h @@ -17,16 +17,19 @@ namespace cg_heuristic { class CGCache { TaskProxy task_proxy; std::vector> cache; - std::vector> helpful_transition_cache; + std::vector> + helpful_transition_cache; std::vector> depends_on; int get_index(int var, const State &state, int from_val, int to_val) const; int compute_required_cache_size( - int var_id, const std::vector &depends_on, int max_cache_size) const; + int var_id, const std::vector &depends_on, + int max_cache_size) const; public: static const int NOT_COMPUTED = -2; - CGCache(const TaskProxy &task_proxy, int max_cache_size, utils::LogProxy &log); + CGCache( + const TaskProxy &task_proxy, int max_cache_size, utils::LogProxy &log); bool is_cached(int var) const { return !cache[var].empty(); @@ -36,8 +39,8 @@ class CGCache { return cache[var][get_index(var, state, from_val, to_val)]; } - void store(int var, const State &state, - int from_val, int to_val, int cost) { + void store( + int var, const State &state, int from_val, int to_val, int cost) { cache[var][get_index(var, state, from_val, to_val)] = cost; } diff --git a/src/search/heuristics/cg_heuristic.cc b/src/search/heuristics/cg_heuristic.cc index 8ec18a3deb..541d55210a 100644 --- a/src/search/heuristics/cg_heuristic.cc +++ b/src/search/heuristics/cg_heuristic.cc @@ -18,12 +18,11 @@ using namespace domain_transition_graph; namespace cg_heuristic { CGHeuristic::CGHeuristic( int max_cache_size, tasks::AxiomHandlingType axioms, - const shared_ptr &transform, - bool cache_estimates, const string &description, - utils::Verbosity verbosity) - : Heuristic(tasks::get_default_value_axioms_task_if_needed( - transform, axioms), - cache_estimates, description, verbosity), + const shared_ptr &transform, bool cache_estimates, + const string &description, utils::Verbosity verbosity) + : Heuristic( + tasks::get_default_value_axioms_task_if_needed(transform, axioms), + cache_estimates, description, verbosity), cache_hits(0), cache_misses(0), helpful_transition_extraction_counter(0), @@ -40,8 +39,9 @@ CGHeuristic::CGHeuristic( for (size_t i = 0; i < num_vars; ++i) prio_queues.push_back(make_unique()); - function pruning_condition = - [](int dtg_var, int cond_var) {return dtg_var <= cond_var;}; + function pruning_condition = [](int dtg_var, int cond_var) { + return dtg_var <= cond_var; + }; DTGFactory factory(task_proxy, false, pruning_condition); transition_graphs = factory.build_dtgs(); } @@ -82,10 +82,9 @@ void CGHeuristic::setup_domain_transition_graphs() { ++helpful_transition_extraction_counter; } -int CGHeuristic::get_transition_cost(const State &state, - DomainTransitionGraph *dtg, - int start_val, - int goal_val) { +int CGHeuristic::get_transition_cost( + const State &state, DomainTransitionGraph *dtg, int start_val, + int goal_val) { if (start_val == goal_val) return 0; @@ -118,7 +117,8 @@ int CGHeuristic::get_transition_cost(const State &state, } // Initialize Heap for Dijkstra's algorithm. - priority_queues::AdaptiveQueue &prio_queue = *prio_queues[var_no]; + priority_queues::AdaptiveQueue &prio_queue = + *prio_queues[var_no]; prio_queue.clear(); prio_queue.push(0, start); @@ -150,13 +150,15 @@ int CGHeuristic::get_transition_cost(const State &state, // Scan labels of the transition. for (ValueTransitionLabel &label : transition.labels) { - OperatorProxy op = label.is_axiom ? - task_proxy.get_axioms()[label.op_id] : - task_proxy.get_operators()[label.op_id]; + OperatorProxy op = + label.is_axiom + ? task_proxy.get_axioms()[label.op_id] + : task_proxy.get_operators()[label.op_id]; int new_distance = source_distance + op.get_cost(); for (LocalAssignment &assignment : label.precond) { if (new_distance >= *target_distance_ptr) - break; // We already know this isn't an improved path. + break; // We already know this isn't an improved + // path. int local_var = assignment.local_var; int current_val = source->children_state[local_var]; int global_var = dtg->local_to_global_child[local_var]; @@ -195,7 +197,8 @@ int CGHeuristic::get_transition_cost(const State &state, // no helpful transitions recorded yet. start->helpful_transitions[target->value] = &label; } else { - start->helpful_transitions[target->value] = current_helpful_transition; + start->helpful_transitions[target->value] = + current_helpful_transition; } prio_queue.push(new_distance, target); @@ -223,8 +226,8 @@ int CGHeuristic::get_transition_cost(const State &state, return start->distances[goal_val]; } -void CGHeuristic::mark_helpful_transitions(const State &state, - DomainTransitionGraph *dtg, int to) { +void CGHeuristic::mark_helpful_transitions( + const State &state, DomainTransitionGraph *dtg, int to) { int var_no = dtg->var; int from = state[var_no].get_value(); if (from == to) @@ -268,20 +271,21 @@ void CGHeuristic::mark_helpful_transitions(const State &state, cost = start_node->distances[to]; } - OperatorProxy op = helpful->is_axiom ? - task_proxy.get_axioms()[helpful->op_id] : - task_proxy.get_operators()[helpful->op_id]; - if (cost == op.get_cost() && - !op.is_axiom() && + OperatorProxy op = helpful->is_axiom + ? task_proxy.get_axioms()[helpful->op_id] + : task_proxy.get_operators()[helpful->op_id]; + if (cost == op.get_cost() && !op.is_axiom() && task_properties::is_applicable(op, state)) { // Transition immediately applicable, all preconditions true. set_preferred(op); } else { - // Recursively compute helpful transitions for the precondition variables. + // Recursively compute helpful transitions for the precondition + // variables. for (const LocalAssignment &assignment : helpful->precond) { int local_var = assignment.local_var; int global_var = dtg->local_to_global_child[local_var]; - DomainTransitionGraph *precond_dtg = transition_graphs[global_var].get(); + DomainTransitionGraph *precond_dtg = + transition_graphs[global_var].get(); mark_helpful_transitions(state, precond_dtg, assignment.value); } } @@ -296,8 +300,7 @@ class CGHeuristicFeature add_option( "max_cache_size", "maximum number of cached entries per variable (set to 0 to disable cache)", - "1000000", - plugins::Bounds("0", "infinity")); + "1000000", plugins::Bounds("0", "infinity")); tasks::add_axioms_option_to_feature(*this); add_heuristic_options_to_feature(*this, "cg"); @@ -311,13 +314,12 @@ class CGHeuristicFeature document_property("preferred operators", "yes"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get("max_cache_size"), tasks::get_axioms_arguments_from_options(opts), - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/cg_heuristic.h b/src/search/heuristics/cg_heuristic.h index 7252e1607c..39724c7e77 100644 --- a/src/search/heuristics/cg_heuristic.h +++ b/src/search/heuristics/cg_heuristic.h @@ -19,9 +19,11 @@ namespace cg_heuristic { class CGCache; class CGHeuristic : public Heuristic { - using ValueNodeQueue = priority_queues::AdaptiveQueue; + using ValueNodeQueue = + priority_queues::AdaptiveQueue; std::vector> prio_queues; - std::vector> transition_graphs; + std::vector> + transition_graphs; std::unique_ptr cache; int cache_hits; @@ -33,23 +35,18 @@ class CGHeuristic : public Heuristic { void setup_domain_transition_graphs(); int get_transition_cost( - const State &state, - domain_transition_graph::DomainTransitionGraph *dtg, - int start_val, - int goal_val); + const State &state, domain_transition_graph::DomainTransitionGraph *dtg, + int start_val, int goal_val); void mark_helpful_transitions( - const State &state, - domain_transition_graph::DomainTransitionGraph *dtg, + const State &state, domain_transition_graph::DomainTransitionGraph *dtg, int to); protected: virtual int compute_heuristic(const State &ancestor_state) override; public: explicit CGHeuristic( - int max_cache_size, - tasks::AxiomHandlingType axiom_hanlding, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + int max_cache_size, tasks::AxiomHandlingType axiom_hanlding, + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); virtual bool dead_ends_are_reliable() const override; }; } diff --git a/src/search/heuristics/domain_transition_graph.cc b/src/search/heuristics/domain_transition_graph.cc index 3f9263a27c..49c78505a3 100644 --- a/src/search/heuristics/domain_transition_graph.cc +++ b/src/search/heuristics/domain_transition_graph.cc @@ -9,9 +9,9 @@ using namespace std; namespace domain_transition_graph { -DTGFactory::DTGFactory(const TaskProxy &task_proxy, - bool collect_transition_side_effects, - const function &pruning_condition) +DTGFactory::DTGFactory( + const TaskProxy &task_proxy, bool collect_transition_side_effects, + const function &pruning_condition) : task_proxy(task_proxy), collect_transition_side_effects(collect_transition_side_effects), pruning_condition(pruning_condition) { @@ -55,8 +55,8 @@ void DTGFactory::create_transitions(DTGs &dtgs) { process_effect(eff, ax, dtgs); } -void DTGFactory::process_effect(const EffectProxy &eff, const OperatorProxy &op, - DTGs &dtgs) { +void DTGFactory::process_effect( + const EffectProxy &eff, const OperatorProxy &op, DTGs &dtgs) { FactProxy fact = eff.get_fact(); int var_id = fact.get_variable().get_id(); DomainTransitionGraph *dtg = dtgs[var_id].get(); @@ -88,23 +88,23 @@ void DTGFactory::process_effect(const EffectProxy &eff, const OperatorProxy &op, } if (origin != -1) { ValueTransition *trans = get_transition(origin, target, dtg); - trans->labels.push_back( - ValueTransitionLabel(op.get_id(), op.is_axiom(), transition_condition, side_effect)); + trans->labels.push_back(ValueTransitionLabel( + op.get_id(), op.is_axiom(), transition_condition, side_effect)); } else { int domain_size = fact.get_variable().get_domain_size(); for (int origin = 0; origin < domain_size; ++origin) { if (origin == target) continue; ValueTransition *trans = get_transition(origin, target, dtg); - trans->labels.push_back( - ValueTransitionLabel(op.get_id(), op.is_axiom(), transition_condition, side_effect)); + trans->labels.push_back(ValueTransitionLabel( + op.get_id(), op.is_axiom(), transition_condition, side_effect)); } } } -void DTGFactory::update_transition_condition(const FactProxy &fact, - DomainTransitionGraph *dtg, - vector &condition) { +void DTGFactory::update_transition_condition( + const FactProxy &fact, DomainTransitionGraph *dtg, + vector &condition) { int fact_var = fact.get_variable().get_id(); if (!pruning_condition(dtg->var, fact_var)) { extend_global_to_local_mapping_if_necessary(dtg, fact_var); @@ -116,23 +116,24 @@ void DTGFactory::update_transition_condition(const FactProxy &fact, void DTGFactory::extend_global_to_local_mapping_if_necessary( DomainTransitionGraph *dtg, int global_var) { if (!global_to_local_var[dtg->var].count(global_var)) { - global_to_local_var[dtg->var][global_var] = dtg->local_to_global_child.size(); + global_to_local_var[dtg->var][global_var] = + dtg->local_to_global_child.size(); dtg->local_to_global_child.push_back(global_var); } } -void DTGFactory::revert_new_local_vars(DomainTransitionGraph *dtg, - unsigned int first_local_var) { +void DTGFactory::revert_new_local_vars( + DomainTransitionGraph *dtg, unsigned int first_local_var) { vector &loc_to_glob = dtg->local_to_global_child; for (unsigned int l = first_local_var; l < loc_to_glob.size(); ++l) global_to_local_var[dtg->var].erase(loc_to_glob[l]); if (loc_to_glob.size() > first_local_var) - loc_to_glob.erase(loc_to_glob.begin() + first_local_var, - loc_to_glob.end()); + loc_to_glob.erase( + loc_to_glob.begin() + first_local_var, loc_to_glob.end()); } -ValueTransition *DTGFactory::get_transition(int origin, int target, - DomainTransitionGraph *dtg) { +ValueTransition *DTGFactory::get_transition( + int origin, int target, DomainTransitionGraph *dtg) { utils::HashMap, int> &trans_map = transition_index[dtg->var]; pair arc = make_pair(origin, target); ValueNode &origin_node = dtg->nodes[origin]; @@ -148,13 +149,13 @@ ValueTransition *DTGFactory::get_transition(int origin, int target, void DTGFactory::collect_all_side_effects(DTGs &dtgs) { for (auto &dtg : dtgs) { for (auto &node : dtg->nodes) - for (auto &transition: node.transitions) + for (auto &transition : node.transitions) collect_side_effects(dtg.get(), transition.labels); } } -void DTGFactory::collect_side_effects(DomainTransitionGraph *dtg, - vector &labels) { +void DTGFactory::collect_side_effects( + DomainTransitionGraph *dtg, vector &labels) { const vector &loc_to_glob = dtg->local_to_global_child; const unordered_map &glob_to_loc = global_to_local_var[dtg->var]; @@ -202,8 +203,9 @@ void DTGFactory::collect_side_effects(DomainTransitionGraph *dtg, } sort(triggercond_pairs.begin(), triggercond_pairs.end()); - if (includes(precond_pairs.begin(), precond_pairs.end(), - triggercond_pairs.begin(), triggercond_pairs.end())) { + if (includes( + precond_pairs.begin(), precond_pairs.end(), + triggercond_pairs.begin(), triggercond_pairs.end())) { int local_var = glob_to_loc.at(var_no); side_effects.push_back(LocalAssignment(local_var, post)); } @@ -214,8 +216,8 @@ void DTGFactory::collect_side_effects(DomainTransitionGraph *dtg, void DTGFactory::simplify_transitions(DTGs &dtgs) { for (auto &dtg : dtgs) - for (ValueNode & node : dtg->nodes) - for (ValueTransition & transition : node.transitions) + for (ValueNode &node : dtg->nodes) + for (ValueTransition &transition : node.transitions) simplify_labels(transition.labels); } @@ -257,7 +259,8 @@ void DTGFactory::simplify_labels(vector &labels) { for (auto &entry : label_index) { const HashKey &key = entry.first; int label_no = entry.second; - int powerset_size = (1 << key.size()) - 1; // -1: only consider proper subsets + int powerset_size = + (1 << key.size()) - 1; // -1: only consider proper subsets bool match = false; if (powerset_size <= 31) { // HACK! Don't spend too much time here... OperatorProxy op = get_op_for_label(old_labels[label_no]); @@ -268,7 +271,8 @@ void DTGFactory::simplify_labels(vector &labels) { subset.push_back(key[i]); HashMap::iterator found = label_index.find(subset); if (found != label_index.end()) { - const ValueTransitionLabel &f_label = old_labels[found->second]; + const ValueTransitionLabel &f_label = + old_labels[found->second]; OperatorProxy f_op = get_op_for_label(f_label); if (op.get_cost() >= f_op.get_cost()) { /* TODO: Depending on how clever we want to @@ -289,7 +293,6 @@ void DTGFactory::simplify_labels(vector &labels) { } } - DomainTransitionGraph::DomainTransitionGraph(int var_index, int node_count) { var = var_index; nodes.reserve(node_count); diff --git a/src/search/heuristics/domain_transition_graph.h b/src/search/heuristics/domain_transition_graph.h index 14529850c0..4f8d09a125 100644 --- a/src/search/heuristics/domain_transition_graph.h +++ b/src/search/heuristics/domain_transition_graph.h @@ -7,7 +7,6 @@ #include #include - namespace cea_heuristic { class ContextEnhancedAdditiveHeuristic; } @@ -39,28 +38,28 @@ class DTGFactory { void allocate_graphs_and_nodes(DTGs &dtgs); void initialize_index_structures(int num_dtgs); void create_transitions(DTGs &dtgs); - void process_effect(const EffectProxy &eff, const OperatorProxy &op, - DTGs &dtgs); - void update_transition_condition(const FactProxy &fact, - DomainTransitionGraph *dtg, - std::vector &condition); + void process_effect( + const EffectProxy &eff, const OperatorProxy &op, DTGs &dtgs); + void update_transition_condition( + const FactProxy &fact, DomainTransitionGraph *dtg, + std::vector &condition); void extend_global_to_local_mapping_if_necessary( DomainTransitionGraph *dtg, int global_var); - void revert_new_local_vars(DomainTransitionGraph *dtg, - unsigned int first_local_var); - ValueTransition *get_transition(int origin, int target, - DomainTransitionGraph *dtg); + void revert_new_local_vars( + DomainTransitionGraph *dtg, unsigned int first_local_var); + ValueTransition *get_transition( + int origin, int target, DomainTransitionGraph *dtg); void simplify_transitions(DTGs &dtgs); void simplify_labels(std::vector &labels); void collect_all_side_effects(DTGs &dtgs); - void collect_side_effects(DomainTransitionGraph *dtg, - std::vector &labels); + void collect_side_effects( + DomainTransitionGraph *dtg, std::vector &labels); OperatorProxy get_op_for_label(const ValueTransitionLabel &label); public: - DTGFactory(const TaskProxy &task_proxy, - bool collect_transition_side_effects, - const std::function &pruning_condition); + DTGFactory( + const TaskProxy &task_proxy, bool collect_transition_side_effects, + const std::function &pruning_condition); DTGs build_dtgs(); }; @@ -69,8 +68,7 @@ struct LocalAssignment { short local_var; short value; - LocalAssignment(int var, int val) - : local_var(var), value(val) { + LocalAssignment(int var, int val) : local_var(var), value(val) { // Check overflow. assert(local_var == var); assert(value == val); @@ -83,18 +81,19 @@ struct ValueTransitionLabel { std::vector precond; std::vector effect; - ValueTransitionLabel(int op_id, bool axiom, - const std::vector &precond, - const std::vector &effect) - : op_id(op_id), is_axiom(axiom), precond(precond), effect(effect) {} + ValueTransitionLabel( + int op_id, bool axiom, const std::vector &precond, + const std::vector &effect) + : op_id(op_id), is_axiom(axiom), precond(precond), effect(effect) { + } }; struct ValueTransition { ValueNode *target; std::vector labels; - ValueTransition(ValueNode *targ) - : target(targ) {} + ValueTransition(ValueNode *targ) : target(targ) { + } void simplify(const TaskProxy &task_proxy); }; @@ -111,8 +110,11 @@ struct ValueNode { ValueTransitionLabel *reached_by; ValueNode(DomainTransitionGraph *parent, int val) - : parent_graph(parent), value(val), reached_from(nullptr), - reached_by(nullptr) {} + : parent_graph(parent), + value(val), + reached_from(nullptr), + reached_by(nullptr) { + } }; class DomainTransitionGraph { @@ -129,7 +131,8 @@ class DomainTransitionGraph { // used for mapping variables in conditions to their global index // (only needed for initializing child_state for the start node?) - DomainTransitionGraph(const DomainTransitionGraph &other); // copying forbidden + DomainTransitionGraph( + const DomainTransitionGraph &other); // copying forbidden public: DomainTransitionGraph(int var_index, int node_count); }; diff --git a/src/search/heuristics/ff_heuristic.cc b/src/search/heuristics/ff_heuristic.cc index 8d5bf7875a..f3b9cf6d23 100644 --- a/src/search/heuristics/ff_heuristic.cc +++ b/src/search/heuristics/ff_heuristic.cc @@ -1,7 +1,6 @@ #include "ff_heuristic.h" #include "../plugins/plugin.h" - #include "../task_utils/task_properties.h" #include "../utils/logging.h" @@ -12,12 +11,10 @@ using namespace std; namespace ff_heuristic { // construction and destruction FFHeuristic::FFHeuristic( - tasks::AxiomHandlingType axioms, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity) + tasks::AxiomHandlingType axioms, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity) : AdditiveHeuristic( - axioms, transform, cache_estimates, description, - verbosity), + axioms, transform, cache_estimates, description, verbosity), relaxed_plan(task_proxy.get_operators().size(), false) { if (log.is_at_least_normal()) { log << "Initializing FF heuristic..." << endl; @@ -34,8 +31,7 @@ void FFHeuristic::mark_preferred_operators_and_relaxed_plan( UnaryOperator *unary_op = get_operator(op_id); bool is_preferred = true; for (PropID precond : get_preconditions(op_id)) { - mark_preferred_operators_and_relaxed_plan( - state, precond); + mark_preferred_operators_and_relaxed_plan(state, precond); if (get_proposition(precond)->reached_by != NO_OP) { is_preferred = false; } @@ -80,7 +76,8 @@ class FFHeuristicFeature FFHeuristicFeature() : TypedFeature("ff") { document_title("FF heuristic"); - relaxation_heuristic::add_relaxation_heuristic_options_to_feature(*this, "ff"); + relaxation_heuristic::add_relaxation_heuristic_options_to_feature( + *this, "ff"); document_language_support("action costs", "supported"); document_language_support("conditional effects", "supported"); @@ -92,11 +89,11 @@ class FFHeuristicFeature document_property("preferred operators", "yes"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - relaxation_heuristic::get_relaxation_heuristic_arguments_from_options(opts) - ); + relaxation_heuristic:: + get_relaxation_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/ff_heuristic.h b/src/search/heuristics/ff_heuristic.h index d0302ed30c..b6f4e9347a 100644 --- a/src/search/heuristics/ff_heuristic.h +++ b/src/search/heuristics/ff_heuristic.h @@ -6,8 +6,8 @@ #include namespace ff_heuristic { -using relaxation_heuristic::PropID; using relaxation_heuristic::OpID; +using relaxation_heuristic::PropID; using relaxation_heuristic::NO_OP; @@ -34,9 +34,8 @@ class FFHeuristic : public additive_heuristic::AdditiveHeuristic { public: FFHeuristic( tasks::AxiomHandlingType axioms, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/heuristics/goal_count_heuristic.cc b/src/search/heuristics/goal_count_heuristic.cc index 93b9900ff3..5ec953149f 100644 --- a/src/search/heuristics/goal_count_heuristic.cc +++ b/src/search/heuristics/goal_count_heuristic.cc @@ -1,7 +1,6 @@ #include "goal_count_heuristic.h" #include "../plugins/plugin.h" - #include "../utils/logging.h" #include @@ -48,11 +47,10 @@ class GoalCountHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/goal_count_heuristic.h b/src/search/heuristics/goal_count_heuristic.h index cba4b55818..8d457af5f2 100644 --- a/src/search/heuristics/goal_count_heuristic.h +++ b/src/search/heuristics/goal_count_heuristic.h @@ -9,9 +9,8 @@ class GoalCountHeuristic : public Heuristic { virtual int compute_heuristic(const State &ancestor_state) override; public: GoalCountHeuristic( - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/heuristics/hm_heuristic.cc b/src/search/heuristics/hm_heuristic.cc index 5325202af7..f98ef6c088 100644 --- a/src/search/heuristics/hm_heuristic.cc +++ b/src/search/heuristics/hm_heuristic.cc @@ -1,7 +1,6 @@ #include "hm_heuristic.h" #include "../plugins/plugin.h" - #include "../task_utils/task_properties.h" #include "../utils/logging.h" @@ -13,9 +12,8 @@ using namespace std; namespace hm_heuristic { HMHeuristic::HMHeuristic( - int m, const shared_ptr &transform, - bool cache_estimates, const string &description, - utils::Verbosity verbosity) + int m, const shared_ptr &transform, bool cache_estimates, + const string &description, utils::Verbosity verbosity) : Heuristic(transform, cache_estimates, description, verbosity), m(m), has_cond_effects(task_properties::has_conditional_effects(task_proxy)), @@ -29,12 +27,10 @@ HMHeuristic::HMHeuristic( generate_all_tuples(); } - bool HMHeuristic::dead_ends_are_reliable() const { return !task_properties::has_axioms(task_proxy) && !has_cond_effects; } - int HMHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); if (task_properties::is_goal_state(task_proxy, state)) { @@ -53,7 +49,6 @@ int HMHeuristic::compute_heuristic(const State &ancestor_state) { } } - void HMHeuristic::init_hm_table(const Tuple &t) { for (auto &hm_ent : hm_table) { const Tuple &tuple = hm_ent.first; @@ -62,7 +57,6 @@ void HMHeuristic::init_hm_table(const Tuple &t) { } } - void HMHeuristic::update_hm_table() { do { was_updated = false; @@ -88,7 +82,6 @@ void HMHeuristic::update_hm_table() { } while (was_updated); } - void HMHeuristic::extend_tuple(const Tuple &t, const OperatorProxy &op) { for (auto &hm_ent : hm_table) { const Tuple &tuple = hm_ent.first; @@ -99,7 +92,8 @@ void HMHeuristic::extend_tuple(const Tuple &t, const OperatorProxy &op) { break; } } - if (!contradict && (tuple.size() > t.size()) && (check_tuple_in_tuple(t, tuple) == 0)) { + if (!contradict && (tuple.size() > t.size()) && + (check_tuple_in_tuple(t, tuple) == 0)) { Tuple pre = get_operator_pre(op); Tuple others; @@ -134,7 +128,6 @@ void HMHeuristic::extend_tuple(const Tuple &t, const OperatorProxy &op) { } } - int HMHeuristic::eval(const Tuple &t) const { vector partial; generate_all_partial_tuples(t, partial); @@ -150,7 +143,6 @@ int HMHeuristic::eval(const Tuple &t) const { return max; } - int HMHeuristic::update_hm_entry(const Tuple &t, int val) { assert(hm_table.count(t) == 1); if (hm_table[t] > val) { @@ -160,7 +152,6 @@ int HMHeuristic::update_hm_entry(const Tuple &t, int val) { return val; } - int HMHeuristic::check_tuple_in_tuple( const Tuple &tuple, const Tuple &big_tuple) const { for (const FactPair &fact0 : tuple) { @@ -178,15 +169,16 @@ int HMHeuristic::check_tuple_in_tuple( return 0; } - -HMHeuristic::Tuple HMHeuristic::get_operator_pre(const OperatorProxy &op) const { - Tuple preconditions = task_properties::get_fact_pairs(op.get_preconditions()); +HMHeuristic::Tuple HMHeuristic::get_operator_pre( + const OperatorProxy &op) const { + Tuple preconditions = + task_properties::get_fact_pairs(op.get_preconditions()); sort(preconditions.begin(), preconditions.end()); return preconditions; } - -HMHeuristic::Tuple HMHeuristic::get_operator_eff(const OperatorProxy &op) const { +HMHeuristic::Tuple HMHeuristic::get_operator_eff( + const OperatorProxy &op) const { Tuple effects; for (EffectProxy eff : op.get_effects()) { effects.push_back(eff.get_fact().get_pair()); @@ -195,7 +187,6 @@ HMHeuristic::Tuple HMHeuristic::get_operator_eff(const OperatorProxy &op) const return effects; } - bool HMHeuristic::contradict_effect_of( const OperatorProxy &op, int var, int val) const { for (EffectProxy eff : op.get_effects()) { @@ -207,13 +198,11 @@ bool HMHeuristic::contradict_effect_of( return false; } - void HMHeuristic::generate_all_tuples() { Tuple t; generate_all_tuples_aux(0, m, t); } - void HMHeuristic::generate_all_tuples_aux(int var, int sz, const Tuple &base) { int num_variables = task_proxy.get_variables().size(); for (int i = var; i < num_variables; ++i) { @@ -229,16 +218,15 @@ void HMHeuristic::generate_all_tuples_aux(int var, int sz, const Tuple &base) { } } - void HMHeuristic::generate_all_partial_tuples( const Tuple &base_tuple, vector &res) const { Tuple t; generate_all_partial_tuples_aux(base_tuple, t, 0, m, res); } - void HMHeuristic::generate_all_partial_tuples_aux( - const Tuple &base_tuple, const Tuple &t, int index, int sz, vector &res) const { + const Tuple &base_tuple, const Tuple &t, int index, int sz, + vector &res) const { if (sz == 1) { for (size_t i = index; i < base_tuple.size(); ++i) { Tuple tuple(t); @@ -250,12 +238,12 @@ void HMHeuristic::generate_all_partial_tuples_aux( Tuple tuple(t); tuple.push_back(base_tuple[i]); res.push_back(tuple); - generate_all_partial_tuples_aux(base_tuple, tuple, i + 1, sz - 1, res); + generate_all_partial_tuples_aux( + base_tuple, tuple, i + 1, sz - 1, res); } } } - void HMHeuristic::dump_table() const { if (log.is_at_least_debug()) { for (auto &hm_ent : hm_table) { @@ -270,7 +258,8 @@ class HMHeuristicFeature HMHeuristicFeature() : TypedFeature("hm") { document_title("h^m heuristic"); - add_option("m", "subset size", "2", plugins::Bounds("1", "infinity")); + add_option( + "m", "subset size", "2", plugins::Bounds("1", "infinity")); add_heuristic_options_to_feature(*this, "hm"); document_language_support("action costs", "supported"); @@ -284,17 +273,14 @@ class HMHeuristicFeature "consistent", "yes for tasks without conditional effects or axioms"); document_property( - "safe", - "yes for tasks without conditional effects or axioms"); + "safe", "yes for tasks without conditional effects or axioms"); document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - opts.get("m"), - get_heuristic_arguments_from_options(opts) - ); + opts.get("m"), get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/hm_heuristic.h b/src/search/heuristics/hm_heuristic.h index ad0379b458..9b34b6980f 100644 --- a/src/search/heuristics/hm_heuristic.h +++ b/src/search/heuristics/hm_heuristic.h @@ -50,10 +50,11 @@ class HMHeuristic : public Heuristic { void generate_all_tuples(); void generate_all_tuples_aux(int var, int sz, const Tuple &base); - void generate_all_partial_tuples(const Tuple &base_tuple, - std::vector &res) const; - void generate_all_partial_tuples_aux(const Tuple &base_tuple, const Tuple &t, int index, - int sz, std::vector &res) const; + void generate_all_partial_tuples( + const Tuple &base_tuple, std::vector &res) const; + void generate_all_partial_tuples_aux( + const Tuple &base_tuple, const Tuple &t, int index, int sz, + std::vector &res) const; void dump_table() const; diff --git a/src/search/heuristics/lm_cut_heuristic.cc b/src/search/heuristics/lm_cut_heuristic.cc index ad916b305a..15096edbf3 100644 --- a/src/search/heuristics/lm_cut_heuristic.cc +++ b/src/search/heuristics/lm_cut_heuristic.cc @@ -27,8 +27,7 @@ int LandmarkCutHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); int total_cost = 0; bool dead_end = landmark_generator->compute_landmarks( - state, - [&total_cost](int cut_cost) {total_cost += cut_cost;}, + state, [&total_cost](int cut_cost) { total_cost += cut_cost; }, nullptr); if (dead_end) @@ -54,11 +53,10 @@ class LandmarkCutHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/lm_cut_heuristic.h b/src/search/heuristics/lm_cut_heuristic.h index fe57ebba22..a4fa5d660d 100644 --- a/src/search/heuristics/lm_cut_heuristic.h +++ b/src/search/heuristics/lm_cut_heuristic.h @@ -18,9 +18,8 @@ class LandmarkCutHeuristic : public Heuristic { virtual int compute_heuristic(const State &ancestor_state) override; public: LandmarkCutHeuristic( - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/heuristics/lm_cut_landmarks.cc b/src/search/heuristics/lm_cut_landmarks.cc index 590c070e6f..f26618926a 100644 --- a/src/search/heuristics/lm_cut_landmarks.cc +++ b/src/search/heuristics/lm_cut_landmarks.cc @@ -68,8 +68,7 @@ void LandmarkCutLandmarks::build_relaxed_operator(const OperatorProxy &op) { void LandmarkCutLandmarks::add_relaxed_operator( vector &&precondition, - vector &&effects, - int op_id, int base_cost) { + vector &&effects, int op_id, int base_cost) { RelaxedOperator relaxed_op( move(precondition), move(effects), op_id, base_cost); if (relaxed_op.preconditions.empty()) diff --git a/src/search/heuristics/lm_cut_landmarks.h b/src/search/heuristics/lm_cut_landmarks.h index 76b967d164..b23f270c27 100644 --- a/src/search/heuristics/lm_cut_landmarks.h +++ b/src/search/heuristics/lm_cut_landmarks.h @@ -31,11 +31,16 @@ struct RelaxedOperator { int unsatisfied_preconditions; int h_max_supporter_cost; // h_max_cost of h_max_supporter RelaxedProposition *h_max_supporter; - RelaxedOperator(std::vector &&pre, - std::vector &&eff, - int op_id, int base) - : original_op_id(op_id), preconditions(pre), effects(eff), base_cost(base), - cost(-1), unsatisfied_preconditions(-1), h_max_supporter_cost(-1), + RelaxedOperator( + std::vector &&pre, + std::vector &&eff, int op_id, int base) + : original_op_id(op_id), + preconditions(pre), + effects(eff), + base_cost(base), + cost(-1), + unsatisfied_preconditions(-1), + h_max_supporter_cost(-1), h_max_supporter(nullptr) { } @@ -59,17 +64,18 @@ class LandmarkCutLandmarks { priority_queues::AdaptiveQueue priority_queue; void build_relaxed_operator(const OperatorProxy &op); - void add_relaxed_operator(std::vector &&precondition, - std::vector &&effects, - int op_id, int base_cost); + void add_relaxed_operator( + std::vector &&precondition, + std::vector &&effects, int op_id, int base_cost); RelaxedProposition *get_proposition(const FactProxy &fact); void setup_exploration_queue(); void setup_exploration_queue_state(const State &state); void first_exploration(const State &state); void first_exploration_incremental(std::vector &cut); - void second_exploration(const State &state, - std::vector &second_exploration_queue, - std::vector &cut); + void second_exploration( + const State &state, + std::vector &second_exploration_queue, + std::vector &cut); void enqueue_if_necessary(RelaxedProposition *prop, int cost) { assert(cost >= 0); @@ -84,8 +90,8 @@ class LandmarkCutLandmarks { void validate_h_max() const; public: using Landmark = std::vector; - using CostCallback = std::function; - using LandmarkCallback = std::function; + using CostCallback = std::function; + using LandmarkCallback = std::function; LandmarkCutLandmarks(const TaskProxy &task_proxy); @@ -102,8 +108,9 @@ class LandmarkCutLandmarks { Returns true iff state is detected as a dead end. */ - bool compute_landmarks(const State &state, const CostCallback &cost_callback, - const LandmarkCallback &landmark_callback); + bool compute_landmarks( + const State &state, const CostCallback &cost_callback, + const LandmarkCallback &landmark_callback); }; inline void RelaxedOperator::update_h_max_supporter() { diff --git a/src/search/heuristics/max_heuristic.cc b/src/search/heuristics/max_heuristic.cc index da5f073ccb..842ba9366d 100644 --- a/src/search/heuristics/max_heuristic.cc +++ b/src/search/heuristics/max_heuristic.cc @@ -23,12 +23,10 @@ namespace max_heuristic { // construction and destruction HSPMaxHeuristic::HSPMaxHeuristic( - tasks::AxiomHandlingType axioms, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity) + tasks::AxiomHandlingType axioms, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity) : RelaxationHeuristic( - axioms, transform, cache_estimates, description, - verbosity) { + axioms, transform, cache_estimates, description, verbosity) { if (log.is_at_least_normal()) { log << "Initializing HSP max heuristic..." << endl; } @@ -75,8 +73,8 @@ void HSPMaxHeuristic::relaxed_exploration() { for (OpID op_id : precondition_of_pool.get_slice( prop->precondition_of, prop->num_precondition_occurences)) { UnaryOperator *unary_op = get_operator(op_id); - unary_op->cost = max(unary_op->cost, - unary_op->base_cost + prop_cost); + unary_op->cost = + max(unary_op->cost, unary_op->base_cost + prop_cost); --unary_op->unsatisfied_preconditions; assert(unary_op->unsatisfied_preconditions >= 0); if (unary_op->unsatisfied_preconditions == 0) @@ -109,7 +107,8 @@ class HSPMaxHeuristicFeature HSPMaxHeuristicFeature() : TypedFeature("hmax") { document_title("Max heuristic"); - relaxation_heuristic::add_relaxation_heuristic_options_to_feature(*this, "hmax"); + relaxation_heuristic::add_relaxation_heuristic_options_to_feature( + *this, "hmax"); document_language_support("action costs", "supported"); document_language_support("conditional effects", "supported"); @@ -121,11 +120,11 @@ class HSPMaxHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - relaxation_heuristic::get_relaxation_heuristic_arguments_from_options(opts) - ); + relaxation_heuristic:: + get_relaxation_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/heuristics/max_heuristic.h b/src/search/heuristics/max_heuristic.h index 06364721d2..680aae6fd5 100644 --- a/src/search/heuristics/max_heuristic.h +++ b/src/search/heuristics/max_heuristic.h @@ -8,8 +8,8 @@ #include namespace max_heuristic { -using relaxation_heuristic::PropID; using relaxation_heuristic::OpID; +using relaxation_heuristic::PropID; using relaxation_heuristic::Proposition; using relaxation_heuristic::UnaryOperator; @@ -35,9 +35,8 @@ class HSPMaxHeuristic : public relaxation_heuristic::RelaxationHeuristic { public: HSPMaxHeuristic( tasks::AxiomHandlingType axioms, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/heuristics/relaxation_heuristic.cc b/src/search/heuristics/relaxation_heuristic.cc index f98b3c6fbc..0e245f3f53 100644 --- a/src/search/heuristics/relaxation_heuristic.cc +++ b/src/search/heuristics/relaxation_heuristic.cc @@ -23,7 +23,6 @@ Proposition::Proposition() num_precondition_occurences(-1) { } - UnaryOperator::UnaryOperator( int num_preconditions, array_pool::ArrayPoolIndex preconditions, PropID effect, int operator_no, int base_cost) @@ -40,23 +39,22 @@ void add_relaxation_heuristic_options_to_feature( add_heuristic_options_to_feature(feature, description); } -tuple, bool, string, - utils::Verbosity> +tuple< + tasks::AxiomHandlingType, shared_ptr, bool, string, + utils::Verbosity> get_relaxation_heuristic_arguments_from_options(const plugins::Options &opts) { return tuple_cat( tasks::get_axioms_arguments_from_options(opts), get_heuristic_arguments_from_options(opts)); } - // construction and destruction RelaxationHeuristic::RelaxationHeuristic( - tasks::AxiomHandlingType axioms, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity) - : Heuristic(tasks::get_default_value_axioms_task_if_needed( - transform, axioms), - cache_estimates, description, verbosity) { + tasks::AxiomHandlingType axioms, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity) + : Heuristic( + tasks::get_default_value_axioms_task_if_needed(transform, axioms), + cache_estimates, description, verbosity) { // Build propositions. propositions.resize(task_properties::get_num_facts(task_proxy)); @@ -80,8 +78,7 @@ RelaxationHeuristic::RelaxationHeuristic( } // Build unary operators for operators and axioms. - unary_operators.reserve( - task_properties::get_num_total_effects(task_proxy)); + unary_operators.reserve(task_properties::get_num_total_effects(task_proxy)); for (OperatorProxy op : task_proxy.get_operators()) build_unary_operators(op); for (OperatorProxy axiom : task_proxy.get_axioms()) @@ -108,7 +105,8 @@ RelaxationHeuristic::RelaxationHeuristic( const auto &precondition_of_vec = precondition_of_vectors[prop_id]; propositions[prop_id].precondition_of = precondition_of_pool.append(precondition_of_vec); - propositions[prop_id].num_precondition_occurences = precondition_of_vec.size(); + propositions[prop_id].num_precondition_occurences = + precondition_of_vec.size(); } } @@ -160,9 +158,11 @@ void RelaxationHeuristic::build_unary_operators(const OperatorProxy &op) { array_pool::ArrayPoolIndex precond_index = preconditions_pool.append(preconditions_copy); unary_operators.emplace_back( - preconditions_copy.size(), precond_index, effect_prop, - op_no, base_cost); - precondition_props.erase(precondition_props.end() - eff_conds.size(), precondition_props.end()); + preconditions_copy.size(), precond_index, effect_prop, op_no, + base_cost); + precondition_props.erase( + precondition_props.end() - eff_conds.size(), + precondition_props.end()); } } @@ -194,7 +194,8 @@ void RelaxationHeuristic::simplify() { const int MAX_PRECONDITIONS_TO_TEST = 5; if (log.is_at_least_normal()) { - log << "Simplifying " << unary_operators.size() << " unary operators..." << flush; + log << "Simplifying " << unary_operators.size() << " unary operators..." + << flush; } /* @@ -229,8 +230,8 @@ void RelaxationHeuristic::simplify() { Key key(get_preconditions_vector(op_no), op.effect); Value value(op.base_cost, op_no); - auto inserted = unary_operator_index.insert( - make_pair(move(key), value)); + auto inserted = + unary_operator_index.insert(make_pair(move(key), value)); if (!inserted.second) { // We already had an element with this key; check its cost. Map::iterator iter = inserted.first; @@ -251,74 +252,73 @@ void RelaxationHeuristic::simplify() { operator in the map. */ auto is_dominated = [&](const UnaryOperator &op) { - /* - Check all possible subsets X of pre(op) to see if there is a - dominating operator with preconditions X represented in the - map. - */ + /* + Check all possible subsets X of pre(op) to see if there is a + dominating operator with preconditions X represented in the + map. + */ - OpID op_id = get_op_id(op); - int cost = op.base_cost; + OpID op_id = get_op_id(op); + int cost = op.base_cost; - const vector precondition = get_preconditions_vector(op_id); + const vector precondition = get_preconditions_vector(op_id); - /* - We handle the case X = pre(op) specially for efficiency and - to ensure that an operator is not considered to be dominated - by itself. - - From the discussion above that operators with the same - precondition and effect are actually totally ordered, it is - enough to test here whether looking up the key of op in the - map results in an entry including op itself. - */ - if (unary_operator_index[make_pair(precondition, op.effect)].second != op_id) - return true; + /* + We handle the case X = pre(op) specially for efficiency and + to ensure that an operator is not considered to be dominated + by itself. + + From the discussion above that operators with the same + precondition and effect are actually totally ordered, it is + enough to test here whether looking up the key of op in the + map results in an entry including op itself. + */ + if (unary_operator_index[make_pair(precondition, op.effect)].second != + op_id) + return true; + /* + We now handle all cases where X is a strict subset of pre(op). + Our map lookup ensures conditions 1. and 2., and because X is + a strict subset, we also have 4a (which means we don't need 4b). + So it only remains to check 3 for all hits. + */ + if (op.num_preconditions > MAX_PRECONDITIONS_TO_TEST) { /* - We now handle all cases where X is a strict subset of pre(op). - Our map lookup ensures conditions 1. and 2., and because X is - a strict subset, we also have 4a (which means we don't need 4b). - So it only remains to check 3 for all hits. + The runtime of the following code grows exponentially + with the number of preconditions. */ - if (op.num_preconditions > MAX_PRECONDITIONS_TO_TEST) { - /* - The runtime of the following code grows exponentially - with the number of preconditions. - */ - return false; - } + return false; + } - vector &dominating_precondition = dominating_key.first; - dominating_key.second = op.effect; - - // We subtract "- 1" to generate all *strict* subsets of precondition. - int powerset_size = (1 << precondition.size()) - 1; - for (int mask = 0; mask < powerset_size; ++mask) { - dominating_precondition.clear(); - for (size_t i = 0; i < precondition.size(); ++i) - if (mask & (1 << i)) - dominating_precondition.push_back(precondition[i]); - Map::iterator found = unary_operator_index.find(dominating_key); - if (found != unary_operator_index.end()) { - Value dominator_value = found->second; - int dominator_cost = dominator_value.first; - if (dominator_cost <= cost) - return true; - } + vector &dominating_precondition = dominating_key.first; + dominating_key.second = op.effect; + + // We subtract "- 1" to generate all *strict* subsets of precondition. + int powerset_size = (1 << precondition.size()) - 1; + for (int mask = 0; mask < powerset_size; ++mask) { + dominating_precondition.clear(); + for (size_t i = 0; i < precondition.size(); ++i) + if (mask & (1 << i)) + dominating_precondition.push_back(precondition[i]); + Map::iterator found = unary_operator_index.find(dominating_key); + if (found != unary_operator_index.end()) { + Value dominator_value = found->second; + int dominator_cost = dominator_value.first; + if (dominator_cost <= cost) + return true; } - return false; - }; + } + return false; + }; unary_operators.erase( - remove_if( - unary_operators.begin(), - unary_operators.end(), - is_dominated), + remove_if(unary_operators.begin(), unary_operators.end(), is_dominated), unary_operators.end()); if (log.is_at_least_normal()) { - log << " done! [" << unary_operators.size() << " unary operators]" << endl; + log << " done! [" << unary_operators.size() << " unary operators]" + << endl; } } } diff --git a/src/search/heuristics/relaxation_heuristic.h b/src/search/heuristics/relaxation_heuristic.h index 218f461090..6e2523e1c8 100644 --- a/src/search/heuristics/relaxation_heuristic.h +++ b/src/search/heuristics/relaxation_heuristic.h @@ -39,10 +39,9 @@ struct Proposition { static_assert(sizeof(Proposition) == 16, "Proposition has wrong size"); struct UnaryOperator { - UnaryOperator(int num_preconditions, - array_pool::ArrayPoolIndex preconditions, - PropID effect, - int operator_no, int base_cost); + UnaryOperator( + int num_preconditions, array_pool::ArrayPoolIndex preconditions, + PropID effect, int operator_no, int base_cost); int cost; // Used for h^max cost or h^add cost; // includes operator cost (base_cost) int unsatisfied_preconditions; @@ -71,7 +70,8 @@ class RelaxationHeuristic : public Heuristic { array_pool::ArrayPoolSlice get_preconditions(OpID op_id) const { const UnaryOperator &op = unary_operators[op_id]; - return preconditions_pool.get_slice(op.preconditions, op.num_preconditions); + return preconditions_pool.get_slice( + op.preconditions, op.num_preconditions); } // HACK! @@ -113,17 +113,17 @@ class RelaxationHeuristic : public Heuristic { public: RelaxationHeuristic( tasks::AxiomHandlingType axioms, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); virtual bool dead_ends_are_reliable() const override; }; extern void add_relaxation_heuristic_options_to_feature( plugins::Feature &feature, const std::string &description); -extern std::tuple, - bool, std::string, utils::Verbosity> +extern std::tuple< + tasks::AxiomHandlingType, std::shared_ptr, bool, std::string, + utils::Verbosity> get_relaxation_heuristic_arguments_from_options(const plugins::Options &opts); } #endif diff --git a/src/search/landmarks/exploration.cc b/src/search/landmarks/exploration.cc index 1fb20669d6..aa3e6902b9 100644 --- a/src/search/landmarks/exploration.cc +++ b/src/search/landmarks/exploration.cc @@ -92,10 +92,11 @@ static vector get_sorted_extended_preconditions( vector extended_preconditions( preconditions.size() + effect_conditions.size(), FactPair::no_fact); - merge(preconditions.begin(), preconditions.end(), effect_conditions.begin(), - effect_conditions.end(), extended_preconditions.begin()); + merge( + preconditions.begin(), preconditions.end(), effect_conditions.begin(), + effect_conditions.end(), extended_preconditions.begin()); assert(is_sorted( - extended_preconditions.begin(), extended_preconditions.end())); + extended_preconditions.begin(), extended_preconditions.end())); return extended_preconditions; } @@ -178,8 +179,8 @@ unordered_set Exploration::get_excluded_operators( for (OperatorProxy op : task_proxy.get_operators()) { for (EffectProxy effect : op.get_effects()) { auto [var, value] = effect.get_fact().get_pair(); - if (effect.get_conditions().empty() - && propositions[var][value].excluded) { + if (effect.get_conditions().empty() && + propositions[var][value].excluded) { excluded_op_ids.insert(op.get_id()); break; } @@ -200,8 +201,8 @@ void Exploration::initialize_operator_data(bool use_unary_relaxation) { `excluded_op_ids` we also exclude UnaryOperators that have an excluded proposition as effect (see comment for `get_excluded_operators`). */ - if (op.effect->excluded - || excluded_op_ids.contains(op.op_or_axiom_id)) { + if (op.effect->excluded || + excluded_op_ids.contains(op.op_or_axiom_id)) { // Operator will not be applied during relaxed exploration. op.excluded = true; continue; @@ -284,8 +285,8 @@ vector> Exploration::bundle_reachability_information() const { vector> Exploration::compute_relaxed_reachability( const vector &excluded_props, bool use_unary_relaxation) { - setup_exploration_queue(task_proxy.get_initial_state(), excluded_props, - use_unary_relaxation); + setup_exploration_queue( + task_proxy.get_initial_state(), excluded_props, use_unary_relaxation); relaxed_exploration(); return bundle_reachability_information(); } diff --git a/src/search/landmarks/exploration.h b/src/search/landmarks/exploration.h index b668f91be3..71dfb05612 100644 --- a/src/search/landmarks/exploration.h +++ b/src/search/landmarks/exploration.h @@ -24,10 +24,7 @@ struct Proposition { bool reached; bool excluded; - Proposition() - : fact(FactPair::no_fact), - reached(false), - excluded(false) { + Proposition() : fact(FactPair::no_fact), reached(false), excluded(false) { } bool operator<(const Proposition &other) const { @@ -42,13 +39,15 @@ struct UnaryOperator { int num_unsatisfied_preconditions; bool excluded; - UnaryOperator(const std::vector &preconditions, - Proposition *eff, int op_or_axiom_id) + UnaryOperator( + const std::vector &preconditions, Proposition *eff, + int op_or_axiom_id) : op_or_axiom_id(op_or_axiom_id), num_preconditions(static_cast(preconditions.size())), effect(eff), num_unsatisfied_preconditions(num_preconditions), - excluded(false) {} + excluded(false) { + } }; class Exploration { diff --git a/src/search/landmarks/landmark.cc b/src/search/landmarks/landmark.cc index f9fb0f787c..f3d67161e7 100644 --- a/src/search/landmarks/landmark.cc +++ b/src/search/landmarks/landmark.cc @@ -7,8 +7,8 @@ using namespace std; namespace landmarks { bool Landmark::is_true_in_state(const State &state) const { auto is_atom_true_in_state = [&](const FactPair &atom) { - return state[atom.var].get_value() == atom.value; - }; + return state[atom.var].get_value() == atom.value; + }; if (type == DISJUNCTIVE) { return ranges::any_of(atoms, is_atom_true_in_state); } else { diff --git a/src/search/landmarks/landmark.h b/src/search/landmarks/landmark.h index bfbec05e0e..0c4460964e 100644 --- a/src/search/landmarks/landmark.h +++ b/src/search/landmarks/landmark.h @@ -24,12 +24,16 @@ enum LandmarkType { class Landmark { public: - Landmark(std::vector _atoms, LandmarkType type, - bool is_true_in_goal = false, bool is_derived = false) - : atoms(move(_atoms)), type(type), - is_true_in_goal(is_true_in_goal), is_derived(is_derived) { - assert((type == ATOMIC && atoms.size() == 1) || - (type != ATOMIC && atoms.size() > 1)); + Landmark( + std::vector _atoms, LandmarkType type, + bool is_true_in_goal = false, bool is_derived = false) + : atoms(move(_atoms)), + type(type), + is_true_in_goal(is_true_in_goal), + is_derived(is_derived) { + assert( + (type == ATOMIC && atoms.size() == 1) || + (type != ATOMIC && atoms.size() > 1)); } bool operator==(const Landmark &other) const { diff --git a/src/search/landmarks/landmark_cost_partitioning_algorithms.cc b/src/search/landmarks/landmark_cost_partitioning_algorithms.cc index 6c752fac0c..18a1c0b4bc 100644 --- a/src/search/landmarks/landmark_cost_partitioning_algorithms.cc +++ b/src/search/landmarks/landmark_cost_partitioning_algorithms.cc @@ -35,8 +35,7 @@ UniformCostPartitioningAlgorithm::UniformCostPartitioningAlgorithm( /* Compute which operator achieves how many landmarks. Along the way, mark action landmarks and sum up their costs. */ double UniformCostPartitioningAlgorithm::first_pass( - vector &landmarks_achieved_by_operator, - vector &action_landmarks, + vector &landmarks_achieved_by_operator, vector &action_landmarks, ConstBitsetView &past, ConstBitsetView &future) { double action_landmarks_cost = 0; for (const auto &node : landmark_graph) { @@ -57,7 +56,8 @@ double UniformCostPartitioningAlgorithm::first_pass( } } else { for (int op_id : achievers) { - assert(utils::in_bounds(op_id, landmarks_achieved_by_operator)); + assert(utils::in_bounds( + op_id, landmarks_achieved_by_operator)); ++landmarks_achieved_by_operator[op_id]; } } @@ -73,8 +73,8 @@ double UniformCostPartitioningAlgorithm::first_pass( */ vector UniformCostPartitioningAlgorithm::second_pass( vector &landmarks_achieved_by_operator, - const vector &action_landmarks, - ConstBitsetView &past, ConstBitsetView &future) { + const vector &action_landmarks, ConstBitsetView &past, + ConstBitsetView &future) { vector uncovered_landmarks; for (const auto &node : landmark_graph) { int id = node->get_id(); @@ -92,7 +92,7 @@ vector UniformCostPartitioningAlgorithm::second_pass( if (covered_by_action_landmark) { for (int op_id : achievers) { assert(utils::in_bounds( - op_id, landmarks_achieved_by_operator)); + op_id, landmarks_achieved_by_operator)); --landmarks_achieved_by_operator[op_id]; } } else { @@ -106,8 +106,8 @@ vector UniformCostPartitioningAlgorithm::second_pass( // Compute the cost partitioning. double UniformCostPartitioningAlgorithm::third_pass( const vector &uncovered_landmarks, - const vector &landmarks_achieved_by_operator, - ConstBitsetView &past, ConstBitsetView &future) { + const vector &landmarks_achieved_by_operator, ConstBitsetView &past, + ConstBitsetView &future) { double cost = 0; for (const LandmarkNode *node : uncovered_landmarks) { // TODO: Iterate over Landmarks instead of LandmarkNodes. @@ -162,7 +162,6 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( return cost_of_action_landmarks + cost_partitioning_cost; } - OptimalCostPartitioningAlgorithm::OptimalCostPartitioningAlgorithm( const vector &operator_costs, const LandmarkGraph &graph, lp::LPSolverType solver_type) @@ -199,8 +198,9 @@ lp::LinearProgram OptimalCostPartitioningAlgorithm::build_initial_lp() { /* Coefficients of constraints will be updated and recreated in each state. We ignore them for the initial LP. */ - return lp::LinearProgram(lp::LPObjectiveSense::MAXIMIZE, move(lp_variables), - {}, lp_solver.get_infinity()); + return lp::LinearProgram( + lp::LPObjectiveSense::MAXIMIZE, move(lp_variables), {}, + lp_solver.get_infinity()); } /* @@ -261,7 +261,6 @@ bool OptimalCostPartitioningAlgorithm::define_constraint_matrix( return false; } - double OptimalCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( const LandmarkStatusManager &landmark_status_manager, const State &ancestor_state) { diff --git a/src/search/landmarks/landmark_cost_partitioning_algorithms.h b/src/search/landmarks/landmark_cost_partitioning_algorithms.h index 664c9873f1..7961b77d34 100644 --- a/src/search/landmarks/landmark_cost_partitioning_algorithms.h +++ b/src/search/landmarks/landmark_cost_partitioning_algorithms.h @@ -1,6 +1,7 @@ #ifndef LANDMARKS_LANDMARK_COST_PARTITIONING_ALGORITHMS_H #define LANDMARKS_LANDMARK_COST_PARTITIONING_ALGORITHMS_H +#include "../per_state_bitset.h" #include "../task_proxy.h" #include "../lp/lp_solver.h" @@ -8,8 +9,6 @@ #include #include -#include "../per_state_bitset.h" - class ConstBitsetView; class OperatorsProxy; @@ -24,8 +23,8 @@ class CostPartitioningAlgorithm { const LandmarkGraph &landmark_graph; const std::vector operator_costs; public: - CostPartitioningAlgorithm(const std::vector &operator_costs, - const LandmarkGraph &graph); + CostPartitioningAlgorithm( + const std::vector &operator_costs, const LandmarkGraph &graph); virtual ~CostPartitioningAlgorithm() = default; virtual double get_cost_partitioned_heuristic_value( @@ -47,8 +46,8 @@ class UniformCostPartitioningAlgorithm : public CostPartitioningAlgorithm { */ double first_pass( std::vector &landmarks_achieved_by_operator, - std::vector &action_landmarks, - ConstBitsetView &past, ConstBitsetView &future); + std::vector &action_landmarks, ConstBitsetView &past, + ConstBitsetView &future); std::vector second_pass( std::vector &landmarks_achieved_by_operator, const std::vector &action_landmarks, ConstBitsetView &past, @@ -58,9 +57,9 @@ class UniformCostPartitioningAlgorithm : public CostPartitioningAlgorithm { const std::vector &landmarks_achieved_by_operator, ConstBitsetView &past, ConstBitsetView &future); public: - UniformCostPartitioningAlgorithm(const std::vector &operator_costs, - const LandmarkGraph &graph, - bool use_action_landmarks); + UniformCostPartitioningAlgorithm( + const std::vector &operator_costs, const LandmarkGraph &graph, + bool use_action_landmarks); virtual double get_cost_partitioned_heuristic_value( const LandmarkStatusManager &landmark_status_manager, @@ -85,9 +84,9 @@ class OptimalCostPartitioningAlgorithm : public CostPartitioningAlgorithm { bool define_constraint_matrix( ConstBitsetView &past, ConstBitsetView &future, int num_cols); public: - OptimalCostPartitioningAlgorithm(const std::vector &operator_costs, - const LandmarkGraph &graph, - lp::LPSolverType solver_type); + OptimalCostPartitioningAlgorithm( + const std::vector &operator_costs, const LandmarkGraph &graph, + lp::LPSolverType solver_type); virtual double get_cost_partitioned_heuristic_value( const LandmarkStatusManager &landmark_status_manager, diff --git a/src/search/landmarks/landmark_cost_partitioning_heuristic.cc b/src/search/landmarks/landmark_cost_partitioning_heuristic.cc index 487d889289..a95c026f2d 100644 --- a/src/search/landmarks/landmark_cost_partitioning_heuristic.cc +++ b/src/search/landmarks/landmark_cost_partitioning_heuristic.cc @@ -16,10 +16,9 @@ using namespace std; namespace landmarks { LandmarkCostPartitioningHeuristic::LandmarkCostPartitioningHeuristic( - const shared_ptr &lm_factory, bool pref, - bool prog_goal, bool prog_gn, bool prog_r, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity, + const shared_ptr &lm_factory, bool pref, bool prog_goal, + bool prog_gn, bool prog_r, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity, CostPartitioningMethod cost_partitioning, bool alm, lp::LPSolverType lpsolver) : LandmarkHeuristic( @@ -39,8 +38,8 @@ void LandmarkCostPartitioningHeuristic::check_unsupported_features( utils::exit_with(utils::ExitCode::SEARCH_UNSUPPORTED); } - if (!landmark_factory->supports_conditional_effects() - && task_properties::has_conditional_effects(task_proxy)) { + if (!landmark_factory->supports_conditional_effects() && + task_properties::has_conditional_effects(task_proxy)) { cerr << "Conditional effects not supported by the landmark " << "generation method." << endl; utils::exit_with(utils::ExitCode::SEARCH_UNSUPPORTED); @@ -84,9 +83,11 @@ bool LandmarkCostPartitioningHeuristic::dead_ends_are_reliable() const { } class LandmarkCostPartitioningHeuristicFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + Evaluator, LandmarkCostPartitioningHeuristic> { public: - LandmarkCostPartitioningHeuristicFeature() : TypedFeature("landmark_cost_partitioning") { + LandmarkCostPartitioningHeuristicFeature() + : TypedFeature("landmark_cost_partitioning") { document_title("Landmark cost partitioning heuristic"); document_synopsis( "Formerly known as the admissible landmark heuristic.\n" @@ -97,9 +98,7 @@ class LandmarkCostPartitioningHeuristicFeature "https://www.ijcai.org/Proceedings/09/Papers/288.pdf", "Proceedings of the 21st International Joint Conference on " "Artificial Intelligence (IJCAI 2009)", - "1728-1733", - "AAAI Press", - "2009") + + "1728-1733", "AAAI Press", "2009") + "and" + utils::format_conference_reference( {"Emil Keyder and Silvia Richter and Malte Helmert"}, @@ -107,9 +106,7 @@ class LandmarkCostPartitioningHeuristicFeature "https://ai.dmi.unibas.ch/papers/keyder-et-al-ecai2010.pdf", "Proceedings of the 19th European Conference on Artificial " "Intelligence (ECAI 2010)", - "335-340", - "IOS Press", - "2010")); + "335-340", "IOS Press", "2010")); /* We usually have the options of base classes behind the options @@ -162,29 +159,27 @@ class LandmarkCostPartitioningHeuristicFeature document_language_support("axioms", "not allowed"); document_property("admissible", "yes"); - document_property("consistent", - "no; see document note about consistency"); + document_property( + "consistent", "no; see document note about consistency"); document_property("safe", "yes"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + LandmarkCostPartitioningHeuristic>( get_landmark_heuristic_arguments_from_options(opts), opts.get("cost_partitioning"), opts.get("alm"), - lp::get_lp_solver_arguments_from_options(opts) - ); + lp::get_lp_solver_arguments_from_options(opts)); } }; static plugins::FeaturePlugin _plugin; static plugins::TypedEnumPlugin _enum_plugin({ - {"optimal", - "use optimal (LP-based) cost partitioning"}, - {"uniform", - "partition operator costs uniformly among all landmarks " - "achieved by that operator"}, - }); + {"optimal", "use optimal (LP-based) cost partitioning"}, + {"uniform", "partition operator costs uniformly among all landmarks " + "achieved by that operator"}, +}); } diff --git a/src/search/landmarks/landmark_cost_partitioning_heuristic.h b/src/search/landmarks/landmark_cost_partitioning_heuristic.h index a3e4cd5a3f..344a0a5774 100644 --- a/src/search/landmarks/landmark_cost_partitioning_heuristic.h +++ b/src/search/landmarks/landmark_cost_partitioning_heuristic.h @@ -2,6 +2,7 @@ #define LANDMARKS_LANDMARK_COST_PARTITIONING_HEURISTIC_H #include "landmark_heuristic.h" + #include "../lp/lp_solver.h" namespace landmarks { @@ -18,17 +19,16 @@ class LandmarkCostPartitioningHeuristic : public LandmarkHeuristic { void check_unsupported_features( const std::shared_ptr &landmark_factory); void set_cost_partitioning_algorithm( - CostPartitioningMethod cost_partitioning, - lp::LPSolverType lpsolver, bool use_action_landmarks); + CostPartitioningMethod cost_partitioning, lp::LPSolverType lpsolver, + bool use_action_landmarks); int get_heuristic_value(const State &ancestor_state) override; public: LandmarkCostPartitioningHeuristic( const std::shared_ptr &lm_factory, bool pref, bool prog_goal, bool prog_gn, bool prog_r, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity, + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity, CostPartitioningMethod cost_partitioning, bool alm, lp::LPSolverType lpsolver); diff --git a/src/search/landmarks/landmark_factory.cc b/src/search/landmarks/landmark_factory.cc index a0a140510e..ca4c39e03c 100644 --- a/src/search/landmarks/landmark_factory.cc +++ b/src/search/landmarks/landmark_factory.cc @@ -13,7 +13,6 @@ #include #include - using namespace std; namespace landmarks { @@ -116,8 +115,8 @@ void LandmarkFactory::log_landmark_graph_info( const TaskProxy &task_proxy, const utils::Timer &landmark_generation_timer) const { if (log.is_at_least_normal()) { - log << "Landmarks generation time: " - << landmark_generation_timer << endl; + log << "Landmarks generation time: " << landmark_generation_timer + << endl; if (landmark_graph->get_num_landmarks() == 0) { if (log.is_warning()) { log << "Warning! No landmarks found. Task unsolvable?" << endl; @@ -193,17 +192,15 @@ tuple get_landmark_factory_arguments_from_options( void add_use_orders_option_to_feature(plugins::Feature &feature) { feature.add_option( - "use_orders", - "use orders between landmarks", - "true"); + "use_orders", "use orders between landmarks", "true"); } -bool get_use_orders_arguments_from_options( - const plugins::Options &opts) { +bool get_use_orders_arguments_from_options(const plugins::Options &opts) { return opts.get("use_orders"); } -static class LandmarkFactoryCategoryPlugin : public plugins::TypedCategoryPlugin { +static class LandmarkFactoryCategoryPlugin + : public plugins::TypedCategoryPlugin { public: LandmarkFactoryCategoryPlugin() : TypedCategoryPlugin("LandmarkFactory") { document_synopsis( @@ -214,6 +211,5 @@ static class LandmarkFactoryCategoryPlugin : public plugins::TypedCategoryPlugin "OptionSyntax#Landmark_Predefinitions."); allow_variable_binding(); } -} -_category_plugin; +} _category_plugin; } diff --git a/src/search/landmarks/landmark_factory.h b/src/search/landmarks/landmark_factory.h index 29aaea2ff9..8b65dbcfc1 100644 --- a/src/search/landmarks/landmark_factory.h +++ b/src/search/landmarks/landmark_factory.h @@ -68,8 +68,7 @@ extern void add_landmark_factory_options_to_feature(plugins::Feature &feature); extern std::tuple get_landmark_factory_arguments_from_options( const plugins::Options &opts); extern void add_use_orders_option_to_feature(plugins::Feature &feature); -extern bool get_use_orders_arguments_from_options( - const plugins::Options &opts); +extern bool get_use_orders_arguments_from_options(const plugins::Options &opts); } #endif diff --git a/src/search/landmarks/landmark_factory_hm.cc b/src/search/landmarks/landmark_factory_hm.cc index f7f2b63bba..75b49d6ae7 100644 --- a/src/search/landmarks/landmark_factory_hm.cc +++ b/src/search/landmarks/landmark_factory_hm.cc @@ -1,7 +1,5 @@ #include "landmark_factory_hm.h" -#include - #include "exploration.h" #include "landmark.h" @@ -14,6 +12,7 @@ #include "../utils/markup.h" #include "../utils/system.h" +#include #include #include #include @@ -38,10 +37,12 @@ static void set_difference(vector &set1, const vector &set2) { swap(set1, result); } -static bool are_mutex(const VariablesProxy &variables, - const FactPair &atom1, const FactPair &atom2) { - return variables[atom1.var].get_fact(atom1.value).is_mutex( - variables[atom2.var].get_fact(atom2.value)); +static bool are_mutex( + const VariablesProxy &variables, const FactPair &atom1, + const FactPair &atom2) { + return variables[atom1.var] + .get_fact(atom1.value) + .is_mutex(variables[atom2.var].get_fact(atom2.value)); } void LandmarkFactoryHM::get_m_sets_including_current_var( @@ -51,12 +52,12 @@ void LandmarkFactoryHM::get_m_sets_including_current_var( for (int value = 0; value < domain_size; ++value) { FactPair atom(current_var, value); bool use_var = ranges::none_of(current, [&](const FactPair &other) { - return are_mutex(variables, atom, other); - }); + return are_mutex(variables, atom, other); + }); if (use_var) { current.push_back(atom); - get_m_sets(variables, num_included + 1, current_var + 1, - current, subsets); + get_m_sets( + variables, num_included + 1, current_var + 1, current, subsets); current.pop_back(); } } @@ -83,17 +84,18 @@ void LandmarkFactoryHM::get_m_sets( } void LandmarkFactoryHM::get_m_sets_of_set_including_current_proposition( - const VariablesProxy &variables, int num_included, - int current_index, Propositions ¤t, - vector &subsets, const Propositions &superset) { + const VariablesProxy &variables, int num_included, int current_index, + Propositions ¤t, vector &subsets, + const Propositions &superset) { const FactPair &atom = superset[current_index]; bool use_proposition = ranges::none_of(current, [&](const FactPair &other) { - return are_mutex(variables, atom, other); - }); + return are_mutex(variables, atom, other); + }); if (use_proposition) { current.push_back(atom); - get_m_sets_of_set(variables, num_included + 1, current_index + 1, - current, subsets, superset); + get_m_sets_of_set( + variables, num_included + 1, current_index + 1, current, subsets, + superset); current.pop_back(); } } @@ -116,24 +118,25 @@ void LandmarkFactoryHM::get_m_sets_of_set( get_m_sets_of_set_including_current_proposition( variables, num_included, current_index, current, subsets, superset); // Do not include proposition at `current_index` in set. - get_m_sets_of_set(variables, num_included, current_index + 1, - current, subsets, superset); + get_m_sets_of_set( + variables, num_included, current_index + 1, current, subsets, superset); } -void LandmarkFactoryHM::get_split_m_sets_including_current_proposition_from_first( - const VariablesProxy &variables, int num_included1, int num_included2, - int current_index1, int current_index2, Propositions ¤t, - vector &subsets, const Propositions &superset1, - const Propositions &superset2) { +void LandmarkFactoryHM:: + get_split_m_sets_including_current_proposition_from_first( + const VariablesProxy &variables, int num_included1, int num_included2, + int current_index1, int current_index2, Propositions ¤t, + vector &subsets, const Propositions &superset1, + const Propositions &superset2) { const FactPair &atom = superset1[current_index1]; bool use_proposition = ranges::none_of(current, [&](const FactPair &other) { - return are_mutex(variables, atom, other); - }); + return are_mutex(variables, atom, other); + }); if (use_proposition) { current.push_back(atom); - get_split_m_sets(variables, num_included1 + 1, num_included2, - current_index1 + 1, current_index2, - current, subsets, superset1, superset2); + get_split_m_sets( + variables, num_included1 + 1, num_included2, current_index1 + 1, + current_index2, current, subsets, superset1, superset2); current.pop_back(); } } @@ -142,9 +145,9 @@ void LandmarkFactoryHM::get_split_m_sets_including_current_proposition_from_firs that all subsets have >= 1 elements from each superset. */ void LandmarkFactoryHM::get_split_m_sets( const VariablesProxy &variables, int num_included1, int num_included2, - int current_index1, int current_index2, - Propositions ¤t, vector &subsets, - const Propositions &superset1, const Propositions &superset2) { + int current_index1, int current_index2, Propositions ¤t, + vector &subsets, const Propositions &superset1, + const Propositions &superset2) { int superset1_size = static_cast(superset1.size()); int superset2_size = static_cast(superset2.size()); assert(superset1_size > 0); @@ -172,8 +175,9 @@ void LandmarkFactoryHM::get_split_m_sets( } else { /* Switching order of 1 and 2 here to avoid code duplication in the form - of a function `get_split_m_sets_including_current_proposition_from_second` - analogous to `get_split_m_sets_including_current_proposition_from_first`. + of a function + `get_split_m_sets_including_current_proposition_from_second` analogous + to `get_split_m_sets_including_current_proposition_from_first`. */ get_split_m_sets_including_current_proposition_from_first( variables, num_included2, num_included1, current_index2, @@ -223,8 +227,8 @@ static bool proposition_variables_disjoint( We assume the variables in `superset1` and `superset2` are disjoint. */ vector LandmarkFactoryHM::get_split_m_sets( - const VariablesProxy &variables, - const Propositions &superset1, const Propositions &superset2) { + const VariablesProxy &variables, const Propositions &superset1, + const Propositions &superset2) { assert(proposition_variables_disjoint(superset1, superset2)); Propositions c; vector subsets; @@ -256,9 +260,9 @@ void LandmarkFactoryHM::print_proposition( if (log.is_at_least_verbose()) { VariableProxy var = variables[proposition.var]; FactProxy atom = var.get_fact(proposition.value); - log << atom.get_name() << " (" - << var.get_name() << "(" << atom.get_variable().get_id() << ")" - << "->" << atom.get_value() << ")"; + log << atom.get_name() << " (" << var.get_name() << "(" + << atom.get_variable().get_id() << ")" << "->" << atom.get_value() + << ")"; } } @@ -339,7 +343,8 @@ void LandmarkFactoryHM::print_proposition_set( } set LandmarkFactoryHM::print_effect_condition( - const VariablesProxy &variables, const vector &effect_conditions) const { + const VariablesProxy &variables, + const vector &effect_conditions) const { set effect_condition_set; log << "effect conditions:\n"; for (int effect_condition : effect_conditions) { @@ -454,8 +459,8 @@ Propositions LandmarkFactoryHM::initialize_preconditions( Propositions LandmarkFactoryHM::initialize_postconditions( const VariablesProxy &variables, const OperatorProxy &op, PiMOperator &pm_op) { - Propositions postcondition = get_operator_postcondition( - static_cast(variables.size()), op); + Propositions postcondition = + get_operator_postcondition(static_cast(variables.size()), op); vector subsets = get_m_sets(variables, postcondition); pm_op.effect.reserve(subsets.size()); @@ -486,7 +491,7 @@ vector LandmarkFactoryHM::compute_noop_effect( const vector &postconditions) { vector noop_effect; noop_effect.reserve(postconditions.size()); - for (const auto &subset: postconditions) { + for (const auto &subset : postconditions) { assert(static_cast(subset.size()) <= m); assert(set_indices.contains(subset)); int set_index = set_indices[subset]; @@ -496,9 +501,9 @@ vector LandmarkFactoryHM::compute_noop_effect( } void LandmarkFactoryHM::add_conditional_noop( - PiMOperator &pm_op, int op_id, - const VariablesProxy &variables, const Propositions &propositions, - const Propositions &preconditions, const Propositions &postconditions) { + PiMOperator &pm_op, int op_id, const VariablesProxy &variables, + const Propositions &propositions, const Propositions &preconditions, + const Propositions &postconditions) { int noop_index = static_cast(pm_op.conditional_noops.size()); /* @@ -533,12 +538,13 @@ void LandmarkFactoryHM::initialize_noops( if (static_cast(propositions.size()) >= m) { break; } - if (proposition_set_variables_disjoint(postconditions, propositions) - && proposition_sets_are_mutex(variables, postconditions, - propositions)) { + if (proposition_set_variables_disjoint(postconditions, propositions) && + proposition_sets_are_mutex( + variables, postconditions, propositions)) { // For each such set, add a "conditional effect" to the operator. - add_conditional_noop(pm_op, op_id, variables, - propositions, preconditions, postconditions); + add_conditional_noop( + pm_op, op_id, variables, propositions, preconditions, + postconditions); } } pm_op.conditional_noops.shrink_to_fit(); @@ -619,10 +625,9 @@ void LandmarkFactoryHM::discard_conjunctive_landmarks() { log << "Discarding " << landmark_graph->get_num_conjunctive_landmarks() << " conjunctive landmarks" << endl; } - landmark_graph->remove_node_if( - [](const LandmarkNode &node) { - return node.get_landmark().type == CONJUNCTIVE; - }); + landmark_graph->remove_node_if([](const LandmarkNode &node) { + return node.get_landmark().type == CONJUNCTIVE; + }); } static bool operator_can_achieve_landmark( @@ -638,8 +643,8 @@ static bool operator_can_achieve_landmark( continue; } auto mutex = [&](const FactPair &other) { - return are_mutex(variables, atom, other); - }; + return are_mutex(variables, atom, other); + }; if (ranges::any_of(postcondition, mutex)) { return false; } @@ -742,7 +747,8 @@ void LandmarkFactoryHM::propagate_pm_propositions( } } -LandmarkFactoryHM::TriggerSet LandmarkFactoryHM::mark_state_propositions_reached( +LandmarkFactoryHM::TriggerSet +LandmarkFactoryHM::mark_state_propositions_reached( const State &state, const VariablesProxy &variables) { vector state_propositions = get_m_sets(variables, state); TriggerSet triggers; @@ -772,8 +778,8 @@ void LandmarkFactoryHM::collect_condition_landmarks( const vector &condition, vector &landmarks) const { for (int proposition : condition) { const vector &other_landmarks = hm_table[proposition].landmarks; - landmarks.insert(landmarks.end(), other_landmarks.begin(), - other_landmarks.end()); + landmarks.insert( + landmarks.end(), other_landmarks.begin(), other_landmarks.end()); } // Each proposition is a landmark for itself but not stored for itself. landmarks.insert(landmarks.end(), condition.begin(), condition.end()); @@ -807,8 +813,8 @@ void LandmarkFactoryHM::update_proposition_landmark( if (ranges::find(landmarks, proposition) == landmarks.end()) { hm_entry.first_achievers.insert(op_id); if (use_orders) { - set_intersection(hm_entry.precondition_landmarks, - precondition_landmarks); + set_intersection( + hm_entry.precondition_landmarks, precondition_landmarks); } } @@ -823,20 +829,20 @@ void LandmarkFactoryHM::update_effect_landmarks( for (int proposition : effect) { HMEntry &hm_entry = hm_table[proposition]; if (hm_entry.reached) { - update_proposition_landmark(op_id, proposition, landmarks, - precondition_landmarks, triggers); + update_proposition_landmark( + op_id, proposition, landmarks, precondition_landmarks, + triggers); } else { initialize_proposition_landmark( - op_id, hm_entry, landmarks, precondition_landmarks, - triggers); + op_id, hm_entry, landmarks, precondition_landmarks, triggers); } } } void LandmarkFactoryHM::update_noop_landmarks( const unordered_set ¤t_triggers, const PiMOperator &op, - const vector &landmarks, - const vector &prerequisites, TriggerSet &next_triggers) { + const vector &landmarks, const vector &prerequisites, + TriggerSet &next_triggers) { if (current_triggers.empty()) { /* The landmarks for the operator have changed, so we have to recompute @@ -876,8 +882,9 @@ void LandmarkFactoryHM::compute_hm_landmarks(const TaskProxy &task_proxy) { precondition.end()); utils::sort_unique(precondition_landmarks); } - update_effect_landmarks(op_id, op.effect, landmarks, - precondition_landmarks, next_trigger); + update_effect_landmarks( + op_id, op.effect, landmarks, precondition_landmarks, + next_trigger); update_noop_landmarks( triggers, op, landmarks, precondition_landmarks, next_trigger); } @@ -910,8 +917,8 @@ void LandmarkFactoryHM::compute_noop_landmarks( } update_effect_landmarks( - op_id, effect, conditional_noop_landmarks, - conditional_noop_necessary, next_trigger); + op_id, effect, conditional_noop_landmarks, conditional_noop_necessary, + next_trigger); } void LandmarkFactoryHM::add_landmark_node(int set_index, bool goal) { @@ -922,14 +929,15 @@ void LandmarkFactoryHM::add_landmark_node(int set_index, bool goal) { assert(!facts.empty()); LandmarkType type = facts.size() == 1 ? ATOMIC : CONJUNCTIVE; Landmark landmark(move(facts), type, goal); - landmark.first_achievers.insert(hm_entry.first_achievers.begin(), - hm_entry.first_achievers.end()); + landmark.first_achievers.insert( + hm_entry.first_achievers.begin(), hm_entry.first_achievers.end()); landmark_nodes[set_index] = &landmark_graph->add_landmark(move(landmark)); } } -unordered_set LandmarkFactoryHM::collect_and_add_landmarks_to_landmark_graph( +unordered_set +LandmarkFactoryHM::collect_and_add_landmarks_to_landmark_graph( const VariablesProxy &variables, const Propositions &goals) { unordered_set landmarks; for (const Propositions &goal_subset : get_m_sets(variables, goals)) { @@ -1008,8 +1016,7 @@ void LandmarkFactoryHM::add_landmark_orderings( } } -void LandmarkFactoryHM::construct_landmark_graph( - const TaskProxy &task_proxy) { +void LandmarkFactoryHM::construct_landmark_graph(const TaskProxy &task_proxy) { Propositions goals = task_properties::get_fact_pairs(task_proxy.get_goals()); VariablesProxy variables = task_proxy.get_variables(); @@ -1050,29 +1057,23 @@ class LandmarkFactoryHMFeature "https://ai.dmi.unibas.ch/papers/keyder-et-al-ecai2010.pdf", "Proceedings of the 19th European Conference on Artificial " "Intelligence (ECAI 2010)", - "335-340", - "IOS Press", - "2010")); + "335-340", "IOS Press", "2010")); add_option( "m", "subset size (if unsure, use the default of 2)", "2"); add_option( - "conjunctive_landmarks", - "keep conjunctive landmarks", - "true"); + "conjunctive_landmarks", "keep conjunctive landmarks", "true"); add_use_orders_option_to_feature(*this); add_landmark_factory_options_to_feature(*this); document_language_support( - "conditional_effects", - "ignored, i.e. not supported"); + "conditional_effects", "ignored, i.e. not supported"); } virtual shared_ptr create_component( const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - opts.get("m"), - opts.get("conjunctive_landmarks"), + opts.get("m"), opts.get("conjunctive_landmarks"), get_use_orders_arguments_from_options(opts), get_landmark_factory_arguments_from_options(opts)); } diff --git a/src/search/landmarks/landmark_factory_hm.h b/src/search/landmarks/landmark_factory_hm.h index 008ce73e8e..8dcc32f597 100644 --- a/src/search/landmarks/landmark_factory_hm.h +++ b/src/search/landmarks/landmark_factory_hm.h @@ -30,8 +30,8 @@ struct ConditionalNoop { std::vector effect_condition; std::vector effect; - ConditionalNoop(std::vector &&effect_condition, - std::vector &&effect) + ConditionalNoop( + std::vector &&effect_condition, std::vector &&effect) : effect_condition(move(effect_condition)), effect(move(effect)) { } }; @@ -107,30 +107,24 @@ class LandmarkFactoryHM : public LandmarkFactory { TriggerSet mark_state_propositions_reached( const State &state, const VariablesProxy &variables); void collect_condition_landmarks( - const std::vector &condition, - std::vector &landmarks) const; + const std::vector &condition, std::vector &landmarks) const; void initialize_proposition_landmark( int op_id, HMEntry &hm_entry, const std::vector &landmarks, - const std::vector &precondition_landmarks, - TriggerSet &triggers); + const std::vector &precondition_landmarks, TriggerSet &triggers); void update_proposition_landmark( int op_id, int proposition, const std::vector &landmarks, - const std::vector &precondition_landmarks, - TriggerSet &triggers); + const std::vector &precondition_landmarks, TriggerSet &triggers); void update_effect_landmarks( int op_id, const std::vector &effect, const std::vector &landmarks, - const std::vector &precondition_landmarks, - TriggerSet &triggers); + const std::vector &precondition_landmarks, TriggerSet &triggers); void update_noop_landmarks( const std::unordered_set ¤t_triggers, const PiMOperator &op, const std::vector &landmarks, const std::vector &prerequisites, TriggerSet &next_triggers); void compute_noop_landmarks( - int op_id, int noop_index, - const std::vector &local_landmarks, - const std::vector &local_necessary, - TriggerSet &next_trigger); + int op_id, int noop_index, const std::vector &local_landmarks, + const std::vector &local_necessary, TriggerSet &next_trigger); void compute_hm_landmarks(const TaskProxy &task_proxy); void trigger_operator( @@ -152,9 +146,9 @@ class LandmarkFactoryHM : public LandmarkFactory { std::vector compute_noop_effect( const std::vector &postconditions); void add_conditional_noop( - PiMOperator &pm_op, int op_id, - const VariablesProxy &variables, const Propositions &propositions, - const Propositions &preconditions, const Propositions &postconditions); + PiMOperator &pm_op, int op_id, const VariablesProxy &variables, + const Propositions &propositions, const Propositions &preconditions, + const Propositions &postconditions); void initialize_noops( const VariablesProxy &variables, PiMOperator &pm_op, int op_id, const Propositions &preconditions, const Propositions &postconditions); @@ -176,13 +170,15 @@ class LandmarkFactoryHM : public LandmarkFactory { void free_unneeded_memory(); void print_proposition_set( - const VariablesProxy &variables, const Propositions &propositions) const; + const VariablesProxy &variables, + const Propositions &propositions) const; void print_pm_operator( const VariablesProxy &variables, const PiMOperator &op) const; void print_conditional_noop( const VariablesProxy &variables, const ConditionalNoop &conditional_noop, - std::vector, std::set>> &conditions) const; + std::vector, std::set>> + &conditions) const; std::set print_effect_condition( const VariablesProxy &variables, const std::vector &effect_condition) const; @@ -190,7 +186,8 @@ class LandmarkFactoryHM : public LandmarkFactory { const VariablesProxy &variables, const std::vector &effect) const; void print_action( const VariablesProxy &variables, const PiMOperator &op, - const std::vector, std::set>> &conditions) const; + const std::vector, std::set>> + &conditions) const; void get_m_sets_including_current_var( const VariablesProxy &variables, int num_included, int current_var, @@ -200,24 +197,24 @@ class LandmarkFactoryHM : public LandmarkFactory { Propositions ¤t, std::vector &subsets); void get_m_sets_of_set_including_current_proposition( - const VariablesProxy &variables, int num_included, - int current_index, Propositions ¤t, - std::vector &subsets, const Propositions &superset); + const VariablesProxy &variables, int num_included, int current_index, + Propositions ¤t, std::vector &subsets, + const Propositions &superset); void get_m_sets_of_set( - const VariablesProxy &variables, int num_included, - int current_index, Propositions ¤t, - std::vector &subsets, const Propositions &superset); + const VariablesProxy &variables, int num_included, int current_index, + Propositions ¤t, std::vector &subsets, + const Propositions &superset); void get_split_m_sets_including_current_proposition_from_first( - const VariablesProxy &variables, int num_included1, - int num_included2, int current_index1, int current_index2, - Propositions ¤t, std::vector &subsets, - const Propositions &superset1, const Propositions &superset2); + const VariablesProxy &variables, int num_included1, int num_included2, + int current_index1, int current_index2, Propositions ¤t, + std::vector &subsets, const Propositions &superset1, + const Propositions &superset2); void get_split_m_sets( - const VariablesProxy &variables, int num_included1, - int num_included2, int current_index1, int current_index2, - Propositions ¤t, std::vector &subsets, - const Propositions &superset1, const Propositions &superset2); + const VariablesProxy &variables, int num_included1, int num_included2, + int current_index1, int current_index2, Propositions ¤t, + std::vector &subsets, const Propositions &superset1, + const Propositions &superset2); std::vector get_m_sets(const VariablesProxy &variables); @@ -228,15 +225,16 @@ class LandmarkFactoryHM : public LandmarkFactory { const VariablesProxy &variables, const State &state); std::vector get_split_m_sets( - const VariablesProxy &variables, - const Propositions &superset1, const Propositions &superset2); + const VariablesProxy &variables, const Propositions &superset1, + const Propositions &superset2); void print_proposition( const VariablesProxy &variables, const FactPair &proposition) const; public: - LandmarkFactoryHM(int m, bool conjunctive_landmarks, - bool use_orders, utils::Verbosity verbosity); + LandmarkFactoryHM( + int m, bool conjunctive_landmarks, bool use_orders, + utils::Verbosity verbosity); virtual bool supports_conditional_effects() const override; }; diff --git a/src/search/landmarks/landmark_factory_merged.cc b/src/search/landmarks/landmark_factory_merged.cc index 6d0a10c030..765e30be89 100644 --- a/src/search/landmarks/landmark_factory_merged.cc +++ b/src/search/landmarks/landmark_factory_merged.cc @@ -2,11 +2,10 @@ #include "landmark.h" #include "landmark_graph.h" +#include "util.h" -#include "../utils/component_errors.h" #include "../plugins/plugin.h" - -#include "util.h" +#include "../utils/component_errors.h" #include @@ -19,8 +18,7 @@ class LandmarkNode; LandmarkFactoryMerged::LandmarkFactoryMerged( const vector> &lm_factories, utils::Verbosity verbosity) - : LandmarkFactory(verbosity), - landmark_factories(lm_factories) { + : LandmarkFactory(verbosity), landmark_factories(lm_factories) { utils::verify_list_not_empty(lm_factories, "lm_factories"); } @@ -49,12 +47,14 @@ LandmarkNode *LandmarkFactoryMerged::get_matching_landmark( return nullptr; } -vector> LandmarkFactoryMerged::generate_landmark_graphs_of_subfactories( +vector> +LandmarkFactoryMerged::generate_landmark_graphs_of_subfactories( const shared_ptr &task) { vector> landmark_graphs; landmark_graphs.reserve(landmark_factories.size()); achievers_calculated = true; - for (const shared_ptr &landmark_factory : landmark_factories) { + for (const shared_ptr &landmark_factory : + landmark_factories) { landmark_graphs.push_back( landmark_factory->compute_landmark_graph(task)); achievers_calculated &= landmark_factory->achievers_are_calculated(); @@ -105,8 +105,8 @@ void LandmarkFactoryMerged::add_disjunctive_landmarks( request (e.g., heuristics that consider orders might want to keep all landmarks). */ - bool exists = ranges::any_of( - landmark.atoms, [&](const FactPair &atom) { + bool exists = + ranges::any_of(landmark.atoms, [&](const FactPair &atom) { return landmark_graph->contains_landmark(atom); }); if (!exists) { @@ -148,8 +148,8 @@ void LandmarkFactoryMerged::add_landmark_orderings( void LandmarkFactoryMerged::generate_landmarks( const shared_ptr &task) { if (log.is_at_least_normal()) { - log << "Merging " << landmark_factories.size() - << " landmark graphs" << endl; + log << "Merging " << landmark_factories.size() << " landmark graphs" + << endl; } vector> landmark_graphs = generate_landmark_graphs_of_subfactories(task); @@ -188,12 +188,10 @@ class LandmarkFactoryMergedFeature "orderings take precedence in the usual manner " "(gn > nat > reas > o_reas). "); document_note( - "Note", - "Does not currently support conjunctive landmarks"); + "Note", "Does not currently support conjunctive landmarks"); document_language_support( - "conditional_effects", - "supported if all components support them"); + "conditional_effects", "supported if all components support them"); } virtual shared_ptr create_component( diff --git a/src/search/landmarks/landmark_factory_merged.h b/src/search/landmarks/landmark_factory_merged.h index 70a6cab917..789b5a991e 100644 --- a/src/search/landmarks/landmark_factory_merged.h +++ b/src/search/landmarks/landmark_factory_merged.h @@ -9,14 +9,17 @@ namespace landmarks { class LandmarkFactoryMerged : public LandmarkFactory { std::vector> landmark_factories; - std::vector> generate_landmark_graphs_of_subfactories( + std::vector> + generate_landmark_graphs_of_subfactories( const std::shared_ptr &task); - void add_atomic_landmarks( - const std::vector> &landmark_graphs) const; + void add_atomic_landmarks(const std::vector> + &landmark_graphs) const; void add_disjunctive_landmarks( - const std::vector> &landmark_graphs) const; + const std::vector> &landmark_graphs) + const; void add_landmark_orderings( - const std::vector> &landmark_graphs) const; + const std::vector> &landmark_graphs) + const; virtual void generate_landmarks( const std::shared_ptr &task) override; void postprocess(); diff --git a/src/search/landmarks/landmark_factory_reasonable_orders_hps.cc b/src/search/landmarks/landmark_factory_reasonable_orders_hps.cc index e15b9b0484..c312677a61 100644 --- a/src/search/landmarks/landmark_factory_reasonable_orders_hps.cc +++ b/src/search/landmarks/landmark_factory_reasonable_orders_hps.cc @@ -2,7 +2,6 @@ #include "landmark.h" #include "landmark_graph.h" - #include "util.h" #include "../plugins/plugin.h" @@ -14,10 +13,8 @@ using namespace std; namespace landmarks { LandmarkFactoryReasonableOrdersHPS::LandmarkFactoryReasonableOrdersHPS( - const shared_ptr &lm_factory, - utils::Verbosity verbosity) - : LandmarkFactory(verbosity), - landmark_factory(lm_factory) { + const shared_ptr &lm_factory, utils::Verbosity verbosity) + : LandmarkFactory(verbosity), landmark_factory(lm_factory) { } void LandmarkFactoryReasonableOrdersHPS::generate_landmarks( @@ -52,8 +49,8 @@ void LandmarkFactoryReasonableOrdersHPS::approximate_goal_orderings( } } -static void collect_ancestors(unordered_set &result, - const LandmarkNode &node) { +static void collect_ancestors( + unordered_set &result, const LandmarkNode &node) { for (const auto &[parent, type] : node.parents) { if (type >= OrderingType::NATURAL && !result.contains(parent)) { result.insert(parent); @@ -68,7 +65,7 @@ static unordered_set collect_reasonable_ordering_candidates( for (const auto &[child, type] : node.children) { if (type >= OrderingType::GREEDY_NECESSARY) { // Found a landmark such that `node` ->_gn `child`. - for (const auto &[parent, parent_type]: child->parents) { + for (const auto &[parent, parent_type] : child->parents) { if (parent->get_landmark().type == DISJUNCTIVE) { continue; } @@ -88,8 +85,8 @@ static unordered_set collect_reasonable_ordering_candidates( with `landmark`. */ void LandmarkFactoryReasonableOrdersHPS::insert_reasonable_orderings( const TaskProxy &task_proxy, - const unordered_set &candidates, - LandmarkNode &node, const Landmark &landmark) const { + const unordered_set &candidates, LandmarkNode &node, + const Landmark &landmark) const { for (LandmarkNode *other : candidates) { const Landmark &other_landmark = other->get_landmark(); if (landmark == other_landmark || other_landmark.type == DISJUNCTIVE) { @@ -137,8 +134,8 @@ struct EffectConditionSet { utils::HashSet conditions; }; -static unordered_map compute_effect_conditions_by_variable( - const EffectsProxy &effects) { +static unordered_map +compute_effect_conditions_by_variable(const EffectsProxy &effects) { // Variables that occur in multiple effects with different values. unordered_set nogood_effect_vars; unordered_map effect_conditions_by_variable; @@ -265,7 +262,8 @@ static utils::HashSet get_effects_on_other_variables( return next_effect; } -utils::HashSet LandmarkFactoryReasonableOrdersHPS::get_shared_effects_of_achievers( +utils::HashSet +LandmarkFactoryReasonableOrdersHPS::get_shared_effects_of_achievers( const FactPair &atom, const TaskProxy &task_proxy) const { utils::HashSet shared_effects; @@ -323,9 +321,9 @@ bool LandmarkFactoryReasonableOrdersHPS::interferes( utils::HashSet shared_effects = get_shared_effects_of_achievers(atom_a, task_proxy); return ranges::any_of(shared_effects, [&](const FactPair &atom) { - const FactProxy &e = variables[atom.var].get_fact(atom.value); - return e != a && e != b && e.is_mutex(b); - }); + const FactProxy &e = variables[atom.var].get_fact(atom.value); + return e != a && e != b && e.is_mutex(b); + }); /* Experimentally commenting this out -- see issue202. @@ -380,9 +378,11 @@ bool LandmarkFactoryReasonableOrdersHPS::supports_conditional_effects() const { } class LandmarkFactoryReasonableOrdersHPSFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + LandmarkFactory, LandmarkFactoryReasonableOrdersHPS> { public: - LandmarkFactoryReasonableOrdersHPSFeature() : TypedFeature("lm_reasonable_orders_hps") { + LandmarkFactoryReasonableOrdersHPSFeature() + : TypedFeature("lm_reasonable_orders_hps") { document_title("HPS Orders"); document_synopsis( "Adds reasonable orders described in the following paper" + @@ -390,9 +390,7 @@ class LandmarkFactoryReasonableOrdersHPSFeature {"Jörg Hoffmann", "Julie Porteous", "Laura Sebastia"}, "Ordered Landmarks in Planning", "https://jair.org/index.php/jair/article/view/10390/24882", - "Journal of Artificial Intelligence Research", - "22", - "215-278", + "Journal of Artificial Intelligence Research", "22", "215-278", "2004")); document_note( @@ -412,17 +410,18 @@ class LandmarkFactoryReasonableOrdersHPSFeature // TODO: correct? document_language_support( - "conditional_effects", - "supported if subcomponent supports them"); + "conditional_effects", "supported if subcomponent supports them"); } virtual shared_ptr create_component( const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + return plugins::make_shared_from_arg_tuples< + LandmarkFactoryReasonableOrdersHPS>( opts.get>("lm_factory"), get_landmark_factory_arguments_from_options(opts)); } }; -static plugins::FeaturePlugin _plugin; +static plugins::FeaturePlugin + _plugin; } diff --git a/src/search/landmarks/landmark_factory_relaxation.cc b/src/search/landmarks/landmark_factory_relaxation.cc index 5b8c8ff238..60f2b32759 100644 --- a/src/search/landmarks/landmark_factory_relaxation.cc +++ b/src/search/landmarks/landmark_factory_relaxation.cc @@ -8,8 +8,7 @@ using namespace std; namespace landmarks { -LandmarkFactoryRelaxation::LandmarkFactoryRelaxation( - utils::Verbosity verbosity) +LandmarkFactoryRelaxation::LandmarkFactoryRelaxation(utils::Verbosity verbosity) : LandmarkFactory(verbosity) { } diff --git a/src/search/landmarks/landmark_factory_rpg_exhaust.cc b/src/search/landmarks/landmark_factory_rpg_exhaust.cc index 4baf2a8191..8ba95ca107 100644 --- a/src/search/landmarks/landmark_factory_rpg_exhaust.cc +++ b/src/search/landmarks/landmark_factory_rpg_exhaust.cc @@ -56,8 +56,9 @@ void LandmarkFactoryRpgExhaust::generate_all_atomic_landmarks( const FactPair atom(var.get_id(), value); if (!landmark_graph->contains_atomic_landmark(atom)) { Landmark landmark({atom}, ATOMIC); - if (!relaxed_task_solvable(task_proxy, exploration, landmark, - use_unary_relaxation)) { + if (!relaxed_task_solvable( + task_proxy, exploration, landmark, + use_unary_relaxation)) { landmark_graph->add_landmark(move(landmark)); } } @@ -102,8 +103,7 @@ class LandmarkFactoryRpgExhaustFeature add_landmark_factory_options_to_feature(*this); document_language_support( - "conditional_effects", - "ignored, i.e. not supported"); + "conditional_effects", "ignored, i.e. not supported"); } virtual shared_ptr create_component( diff --git a/src/search/landmarks/landmark_factory_rpg_exhaust.h b/src/search/landmarks/landmark_factory_rpg_exhaust.h index 782ad95f21..7bc6246bf9 100644 --- a/src/search/landmarks/landmark_factory_rpg_exhaust.h +++ b/src/search/landmarks/landmark_factory_rpg_exhaust.h @@ -14,8 +14,8 @@ class LandmarkFactoryRpgExhaust : public LandmarkFactoryRelaxation { Exploration &exploration) override; public: - explicit LandmarkFactoryRpgExhaust(bool use_unary_relaxation, - utils::Verbosity verbosity); + explicit LandmarkFactoryRpgExhaust( + bool use_unary_relaxation, utils::Verbosity verbosity); virtual bool supports_conditional_effects() const override; }; diff --git a/src/search/landmarks/landmark_factory_rpg_sasp.cc b/src/search/landmarks/landmark_factory_rpg_sasp.cc index 7608ac696e..775342c87c 100644 --- a/src/search/landmarks/landmark_factory_rpg_sasp.cc +++ b/src/search/landmarks/landmark_factory_rpg_sasp.cc @@ -53,8 +53,7 @@ void LandmarkFactoryRpgSasp::compute_dtg_successors( } else if (effect_conditions.contains(var)) { add_dtg_successor(var, effect_conditions.at(var), post); } else { - int domain_size = - effect.get_fact().get_variable().get_domain_size(); + int domain_size = effect.get_fact().get_variable().get_domain_size(); for (int pre = 0; pre < domain_size; ++pre) { add_dtg_successor(var, pre, post); } @@ -121,10 +120,10 @@ static void add_binary_variable_conditions( for (const FactPair &atom : landmark.atoms) { if (atom.var == var_id && initial_state[var_id].get_value() != atom.value) { - assert(ranges::none_of(result, - [&](const FactPair &result_atom) { - return result_atom.var == var_id; - })); + assert(ranges::none_of( + result, [&](const FactPair &result_atom) { + return result_atom.var == var_id; + })); result.insert(initial_state[var_id].get_pair()); break; } @@ -206,9 +205,9 @@ static vector get_natural_parents(const LandmarkNode *node) { vector parents; parents.reserve(node->parents.size()); assert(ranges::all_of( - node->parents, [](const pair &parent) { - return parent.second >= OrderingType::NATURAL; - })); + node->parents, [](const pair &parent) { + return parent.second >= OrderingType::NATURAL; + })); for (auto &parent : views::keys(node->parents)) { parents.push_back(parent); } @@ -271,8 +270,8 @@ bool LandmarkFactoryRpgSasp::deal_with_overlapping_landmarks( const utils::HashSet &atoms, LandmarkNode &node, OrderingType type) const { if (ranges::any_of(atoms, [&](const FactPair &atom) { - return landmark_graph->contains_atomic_landmark(atom); - })) { + return landmark_graph->contains_atomic_landmark(atom); + })) { /* Do not add the landmark because the atomic one is stronger. Do not add the ordering(s) to the corresponding atomic landmark(s) as they are @@ -344,8 +343,8 @@ utils::HashSet LandmarkFactoryRpgSasp::compute_shared_preconditions( return shared_preconditions; } -static string get_predicate_for_atom(const VariablesProxy &variables, - int var_id, int value) { +static string get_predicate_for_atom( + const VariablesProxy &variables, int var_id, int value) { const string atom_name = variables[var_id].get_fact(value).get_name(); if (atom_name == "") { return ""; @@ -363,8 +362,7 @@ static string get_predicate_for_atom(const VariablesProxy &variables, } return { atom_name.begin() + predicate_pos, - atom_name.begin() + static_cast(paren_pos) - }; + atom_name.begin() + static_cast(paren_pos)}; } /* @@ -446,11 +444,14 @@ void LandmarkFactoryRpgSasp::extend_disjunction_class_lookups( } static vector> get_disjunctive_preconditions( - const unordered_map> &preconditions_by_disjunction_class, - const unordered_map> &used_operators_by_disjunction_class, + const unordered_map> + &preconditions_by_disjunction_class, + const unordered_map> + &used_operators_by_disjunction_class, int num_ops) { vector> disjunctive_preconditions; - for (const auto &[disjunction_class, atoms] : preconditions_by_disjunction_class) { + for (const auto &[disjunction_class, atoms] : + preconditions_by_disjunction_class) { int used_operators = static_cast( used_operators_by_disjunction_class.at(disjunction_class).size()); if (used_operators == num_ops) { @@ -470,17 +471,16 @@ static vector> get_disjunctive_preconditions( atom from each of the operators, which we additionally restrict so that each atom in the set stems from the same disjunction class. */ -vector> LandmarkFactoryRpgSasp::compute_disjunctive_preconditions( +vector> +LandmarkFactoryRpgSasp::compute_disjunctive_preconditions( const TaskProxy &task_proxy, const Landmark &landmark, const vector> &reached) const { - vector op_or_axiom_ids = - get_operators_achieving_landmark(landmark); + vector op_or_axiom_ids = get_operators_achieving_landmark(landmark); int num_ops = 0; unordered_map> preconditions_by_disjunction_class; unordered_map> used_operators_by_disjunction_class; for (int op_id : op_or_axiom_ids) { - const OperatorProxy &op = - get_operator_or_axiom(task_proxy, op_id); + const OperatorProxy &op = get_operator_or_axiom(task_proxy, op_id); if (possibly_reaches_landmark(op, reached, landmark)) { ++num_ops; utils::HashSet landmark_preconditions = @@ -493,8 +493,8 @@ vector> LandmarkFactoryRpgSasp::compute_disjunctive_pre } } return get_disjunctive_preconditions( - preconditions_by_disjunction_class, - used_operators_by_disjunction_class, num_ops); + preconditions_by_disjunction_class, used_operators_by_disjunction_class, + num_ops); } void LandmarkFactoryRpgSasp::generate_goal_landmarks( @@ -507,8 +507,8 @@ void LandmarkFactoryRpgSasp::generate_goal_landmarks( } void LandmarkFactoryRpgSasp::generate_shared_precondition_landmarks( - const TaskProxy &task_proxy, const Landmark &landmark, - LandmarkNode *node, const vector> &reached) { + const TaskProxy &task_proxy, const Landmark &landmark, LandmarkNode *node, + const vector> &reached) { utils::HashSet shared_preconditions = compute_shared_preconditions(task_proxy, landmark, reached); /* All shared preconditions are landmarks, and greedy-necessary @@ -528,10 +528,10 @@ void LandmarkFactoryRpgSasp::generate_disjunctive_precondition_landmarks( for (const auto &preconditions : disjunctive_preconditions) { /* We don't want disjunctive landmarks to get too big. Also, they should not hold in the initial state. */ - if (preconditions.size() < 5 && ranges::none_of( - preconditions, [&](const FactPair &atom) { - return initial_state[atom.var].get_value() == atom.value; - })) { + if (preconditions.size() < 5 && + ranges::none_of(preconditions, [&](const FactPair &atom) { + return initial_state[atom.var].get_value() == atom.value; + })) { add_disjunctive_landmark_and_ordering( preconditions, *node, OrderingType::GREEDY_NECESSARY); } @@ -631,7 +631,8 @@ static vector get_critical_dtg_predecessors( vector critical; critical.reserve(domain_size); for (int value = 0; value < domain_size; ++value) { - if (reached[value] && value_critical_to_reach_landmark( + if (reached[value] && + value_critical_to_reach_landmark( init_value, landmark_value, value, reached, successors)) { critical.push_back(value); } @@ -656,11 +657,11 @@ void LandmarkFactoryRpgSasp::approximate_lookahead_orderings( const FactPair init_atom = task_proxy.get_initial_state()[landmark_atom.var].get_pair(); vector critical_predecessors = get_critical_dtg_predecessors( - init_atom.value, landmark_atom.value, - reached[landmark_atom.var], dtg_successors[landmark_atom.var]); + init_atom.value, landmark_atom.value, reached[landmark_atom.var], + dtg_successors[landmark_atom.var]); for (int value : critical_predecessors) { - add_atomic_landmark_and_ordering(FactPair(landmark_atom.var, value), - *node, OrderingType::NATURAL); + add_atomic_landmark_and_ordering( + FactPair(landmark_atom.var, value), *node, OrderingType::NATURAL); } } @@ -696,7 +697,8 @@ bool LandmarkFactoryRpgSasp::atom_and_landmark_achievable_together( the landmark graph in `add_landmark_forward_orderings` when it is known which atoms are actually landmarks. */ -utils::HashSet LandmarkFactoryRpgSasp::compute_atoms_unreachable_without_landmark( +utils::HashSet +LandmarkFactoryRpgSasp::compute_atoms_unreachable_without_landmark( const VariablesProxy &variables, const Landmark &landmark, const vector> &reached) const { utils::HashSet unreachable_atoms; @@ -734,13 +736,13 @@ void LandmarkFactoryRpgSasp::discard_disjunctive_landmarks() const { */ if (landmark_graph->get_num_disjunctive_landmarks() > 0) { if (log.is_at_least_normal()) { - log << "Discarding " << landmark_graph->get_num_disjunctive_landmarks() + log << "Discarding " + << landmark_graph->get_num_disjunctive_landmarks() << " disjunctive landmarks" << endl; } - landmark_graph->remove_node_if( - [](const LandmarkNode &node) { - return node.get_landmark().type == DISJUNCTIVE; - }); + landmark_graph->remove_node_if([](const LandmarkNode &node) { + return node.get_landmark().type == DISJUNCTIVE; + }); } } @@ -753,20 +755,15 @@ class LandmarkFactoryRpgSaspFeature public: LandmarkFactoryRpgSaspFeature() : TypedFeature("lm_rhw") { document_title("RHW Landmarks"); - document_synopsis( - "The landmark generation method introduced by " - "Richter, Helmert and Westphal (AAAI 2008)."); + document_synopsis("The landmark generation method introduced by " + "Richter, Helmert and Westphal (AAAI 2008)."); add_option( - "disjunctive_landmarks", - "keep disjunctive landmarks", - "true"); + "disjunctive_landmarks", "keep disjunctive landmarks", "true"); add_use_orders_option_to_feature(*this); add_landmark_factory_options_to_feature(*this); - document_language_support( - "conditional_effects", - "supported"); + document_language_support("conditional_effects", "supported"); } virtual shared_ptr create_component( diff --git a/src/search/landmarks/landmark_factory_rpg_sasp.h b/src/search/landmarks/landmark_factory_rpg_sasp.h index e55e985b80..7662907be2 100644 --- a/src/search/landmarks/landmark_factory_rpg_sasp.h +++ b/src/search/landmarks/landmark_factory_rpg_sasp.h @@ -16,7 +16,8 @@ class LandmarkFactoryRpgSasp : public LandmarkFactoryRelaxation { std::deque open_landmarks; std::vector> disjunction_classes; - std::unordered_map> forward_orderings; + std::unordered_map> + forward_orderings; /* The entry `dtg_successors[var][val]` contains all successor values of the atom var->val in the domain transition graph (aka atomic projection). */ @@ -82,8 +83,9 @@ class LandmarkFactoryRpgSasp : public LandmarkFactoryRelaxation { void discard_disjunctive_landmarks() const; public: - LandmarkFactoryRpgSasp(bool disjunctive_landmarks, bool use_orders, - utils::Verbosity verbosity); + LandmarkFactoryRpgSasp( + bool disjunctive_landmarks, bool use_orders, + utils::Verbosity verbosity); virtual bool supports_conditional_effects() const override; }; diff --git a/src/search/landmarks/landmark_factory_zhu_givan.cc b/src/search/landmarks/landmark_factory_zhu_givan.cc index 1d42b7864f..2bd6742768 100644 --- a/src/search/landmarks/landmark_factory_zhu_givan.cc +++ b/src/search/landmarks/landmark_factory_zhu_givan.cc @@ -17,8 +17,7 @@ using namespace std; namespace landmarks { LandmarkFactoryZhuGivan::LandmarkFactoryZhuGivan( bool use_orders, utils::Verbosity verbosity) - : LandmarkFactoryRelaxation(verbosity), - use_orders(use_orders) { + : LandmarkFactoryRelaxation(verbosity), use_orders(use_orders) { } void LandmarkFactoryZhuGivan::generate_relaxed_landmarks( @@ -42,7 +41,8 @@ void LandmarkFactoryZhuGivan::generate_relaxed_landmarks( bool LandmarkFactoryZhuGivan::goal_is_reachable( const TaskProxy &task_proxy, const PropositionLayer &prop_layer) const { for (FactProxy goal : task_proxy.get_goals()) { - if (!prop_layer[goal.get_variable().get_id()][goal.get_value()].reached()) { + if (!prop_layer[goal.get_variable().get_id()][goal.get_value()] + .reached()) { if (log.is_at_least_normal()) { log << "Problem not solvable, even if relaxed." << endl; } @@ -109,7 +109,8 @@ void LandmarkFactoryZhuGivan::extract_landmarks( } } -LandmarkFactoryZhuGivan::PropositionLayer LandmarkFactoryZhuGivan::initialize_relaxed_plan_graph( +LandmarkFactoryZhuGivan::PropositionLayer +LandmarkFactoryZhuGivan::initialize_relaxed_plan_graph( const TaskProxy &task_proxy, unordered_set &triggered_ops) const { const State &initial_state = task_proxy.get_initial_state(); const VariablesProxy &variables = task_proxy.get_variables(); @@ -157,8 +158,8 @@ void LandmarkFactoryZhuGivan::propagate_labels_until_fixed_point_reached( } } - -LandmarkFactoryZhuGivan::PropositionLayer LandmarkFactoryZhuGivan::build_relaxed_plan_graph_with_labels( +LandmarkFactoryZhuGivan::PropositionLayer +LandmarkFactoryZhuGivan::build_relaxed_plan_graph_with_labels( const TaskProxy &task_proxy) const { assert(!triggers.empty()); @@ -172,8 +173,9 @@ LandmarkFactoryZhuGivan::PropositionLayer LandmarkFactoryZhuGivan::build_relaxed no conditional effects, it is only necessary to apply them once. If they have conditional effects, they will be triggered again at later stages. */ - triggered_ops.insert(operators_without_preconditions.begin(), - operators_without_preconditions.end()); + triggered_ops.insert( + operators_without_preconditions.begin(), + operators_without_preconditions.end()); propagate_labels_until_fixed_point_reached( task_proxy, move(triggered_ops), current_layer); return current_layer; @@ -315,9 +317,8 @@ class LandmarkFactoryZhuGivanFeature public: LandmarkFactoryZhuGivanFeature() : TypedFeature("lm_zg") { document_title("Zhu/Givan Landmarks"); - document_synopsis( - "The landmark generation method introduced by " - "Zhu & Givan (ICAPS 2003 Doctoral Consortium)."); + document_synopsis("The landmark generation method introduced by " + "Zhu & Givan (ICAPS 2003 Doctoral Consortium)."); add_use_orders_option_to_feature(*this); add_landmark_factory_options_to_feature(*this); diff --git a/src/search/landmarks/landmark_factory_zhu_givan.h b/src/search/landmarks/landmark_factory_zhu_givan.h index 4f62b275d6..6da61c727a 100644 --- a/src/search/landmarks/landmark_factory_zhu_givan.h +++ b/src/search/landmarks/landmark_factory_zhu_givan.h @@ -61,16 +61,17 @@ class LandmarkFactoryZhuGivan : public LandmarkFactoryRelaxation { PropositionLayer build_relaxed_plan_graph_with_labels( const TaskProxy &task_proxy) const; - bool goal_is_reachable(const TaskProxy &task_proxy, - const PropositionLayer &prop_layer) const; + bool goal_is_reachable( + const TaskProxy &task_proxy, const PropositionLayer &prop_layer) const; LandmarkNode *create_goal_landmark(const FactPair &goal) const; void extract_landmarks_and_orderings_from_goal_labels( const FactPair &goal, const PropositionLayer &prop_layer, LandmarkNode *goal_landmark_node) const; /* Construct a landmark graph using the landmarks on the given proposition layer. */ - void extract_landmarks(const TaskProxy &task_proxy, - const PropositionLayer &last_prop_layer) const; + void extract_landmarks( + const TaskProxy &task_proxy, + const PropositionLayer &last_prop_layer) const; // Link an operators to its propositions in the trigger list. void add_operator_to_triggers(const OperatorProxy &op); @@ -80,8 +81,7 @@ class LandmarkFactoryZhuGivan : public LandmarkFactoryRelaxation { Exploration &exploration) override; public: - LandmarkFactoryZhuGivan( - bool use_orders, utils::Verbosity verbosity); + LandmarkFactoryZhuGivan(bool use_orders, utils::Verbosity verbosity); virtual bool supports_conditional_effects() const override; }; diff --git a/src/search/landmarks/landmark_graph.cc b/src/search/landmarks/landmark_graph.cc index 8f1f298fde..426a922d7a 100644 --- a/src/search/landmarks/landmark_graph.cc +++ b/src/search/landmarks/landmark_graph.cc @@ -40,7 +40,6 @@ LandmarkNode &LandmarkGraph::get_disjunctive_landmark_node( return *(disjunctive_landmarks_to_nodes.find(atom)->second); } - bool LandmarkGraph::contains_atomic_landmark(const FactPair &atom) const { return atomic_landmarks_to_nodes.contains(atom); } @@ -52,8 +51,8 @@ bool LandmarkGraph::contains_disjunctive_landmark(const FactPair &atom) const { bool LandmarkGraph::contains_overlapping_disjunctive_landmark( const utils::HashSet &atoms) const { return ranges::any_of(atoms, [&](const FactPair &atom) { - return contains_disjunctive_landmark(atom); - }); + return contains_disjunctive_landmark(atom); + }); } bool LandmarkGraph::contains_superset_disjunctive_landmark( @@ -90,10 +89,11 @@ LandmarkNode *LandmarkGraph::add_node(Landmark &&landmark) { } LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark_to_add) { - assert(landmark_to_add.type == CONJUNCTIVE || ranges::all_of( - landmark_to_add.atoms, [&](const FactPair &atom) { - return !contains_landmark(atom); - })); + assert( + landmark_to_add.type == CONJUNCTIVE || + ranges::all_of(landmark_to_add.atoms, [&](const FactPair &atom) { + return !contains_landmark(atom); + })); /* TODO: Avoid having to fetch landmark after moving it. This will only be possible after removing the assumption that landmarks don't overlap @@ -148,25 +148,23 @@ void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { void LandmarkGraph::remove_node(LandmarkNode *node) { remove_node_occurrences(node); - const auto it = - ranges::find_if(nodes, [&node](const auto &other) { - return other.get() == node; - }); + const auto it = ranges::find_if( + nodes, [&node](const auto &other) { return other.get() == node; }); assert(it != nodes.end()); nodes.erase(it); } void LandmarkGraph::remove_node_if( - const function &remove_node_condition) { + const function &remove_node_condition) { for (const auto &node : nodes) { if (remove_node_condition(*node)) { remove_node_occurrences(node.get()); } } - erase_if(nodes, - [&remove_node_condition](const unique_ptr &node) { - return remove_node_condition(*node); - }); + erase_if( + nodes, [&remove_node_condition](const unique_ptr &node) { + return remove_node_condition(*node); + }); } void LandmarkGraph::set_landmark_ids() { diff --git a/src/search/landmarks/landmark_graph.h b/src/search/landmarks/landmark_graph.h index 5d429ca267..b309634aa7 100644 --- a/src/search/landmarks/landmark_graph.h +++ b/src/search/landmarks/landmark_graph.h @@ -153,7 +153,7 @@ class LandmarkGraph { when moving landmark graph creation there. */ void remove_node(LandmarkNode *node); void remove_node_if( - const std::function &remove_node_condition); + const std::function &remove_node_condition); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ diff --git a/src/search/landmarks/landmark_heuristic.cc b/src/search/landmarks/landmark_heuristic.cc index 1a32088235..80da5cfe69 100644 --- a/src/search/landmarks/landmark_heuristic.cc +++ b/src/search/landmarks/landmark_heuristic.cc @@ -14,9 +14,8 @@ using namespace std; namespace landmarks { LandmarkHeuristic::LandmarkHeuristic( - bool use_preferred_operators, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity) + bool use_preferred_operators, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity) : Heuristic(transform, cache_estimates, description, verbosity), initial_landmark_graph_has_cycle_of_natural_orderings(false), use_preferred_operators(use_preferred_operators), @@ -70,9 +69,9 @@ void LandmarkHeuristic::initialize( no good way to do this, so we use this incomplete, slightly less safe test. */ - if (task != tasks::g_root_task - && dynamic_cast(task.get()) == nullptr - && dynamic_cast(task.get()) == nullptr) { + if (task != tasks::g_root_task && + dynamic_cast(task.get()) == nullptr && + dynamic_cast(task.get()) == nullptr) { cerr << "The landmark heuristics currently only support " << "task transformations that modify the operator costs " << "or add negated axioms. See issues 845, 686 and 454 " @@ -86,8 +85,8 @@ void LandmarkHeuristic::initialize( initial_landmark_graph_has_cycle_of_natural_orderings = landmark_graph_has_cycle_of_natural_orderings(*landmark_graph); - if (initial_landmark_graph_has_cycle_of_natural_orderings - && log.is_at_least_normal()) { + if (initial_landmark_graph_has_cycle_of_natural_orderings && + log.is_at_least_normal()) { log << "Landmark graph contains a cycle of natural orderings." << endl; } @@ -111,7 +110,8 @@ void LandmarkHeuristic::compute_landmark_graph( assert(landmark_factory->achievers_are_calculated()); if (log.is_at_least_normal()) { - log << "Landmark graph generation time: " << landmark_graph_timer << endl; + log << "Landmark graph generation time: " << landmark_graph_timer + << endl; log << "Landmark graph contains " << landmark_graph->get_num_landmarks() << " landmarks, of which " << landmark_graph->get_num_disjunctive_landmarks() @@ -230,51 +230,44 @@ void LandmarkHeuristic::notify_state_transition( void add_landmark_heuristic_options_to_feature( plugins::Feature &feature, const string &description) { feature.document_synopsis( - "Landmark progression is implemented according to the following paper:" - + utils::format_conference_reference( - {"Clemens Büchner", "Thomas Keller", "Salomé Eriksson", "Malte Helmert"}, + "Landmark progression is implemented according to the following paper:" + + utils::format_conference_reference( + {"Clemens Büchner", "Thomas Keller", "Salomé Eriksson", + "Malte Helmert"}, "Landmarks Progression in Heuristic Search", "https://ai.dmi.unibas.ch/papers/buechner-et-al-icaps2023.pdf", "Proceedings of the Thirty-Third International Conference on " "Automated Planning and Scheduling (ICAPS 2023)", - "70-79", - "AAAI Press", - "2023")); + "70-79", "AAAI Press", "2023")); feature.add_option>( - "lm_factory", - "the set of landmarks to use for this heuristic. " - "The set of landmarks can be specified here, " - "or predefined (see LandmarkFactory)."); + "lm_factory", "the set of landmarks to use for this heuristic. " + "The set of landmarks can be specified here, " + "or predefined (see LandmarkFactory)."); feature.add_option( - "pref", - "enable preferred operators (see note below)", - "false"); + "pref", "enable preferred operators (see note below)", "false"); /* TODO: Do we really want these options or should we just always progress everything we can? */ - feature.add_option( - "prog_goal", "Use goal progression.", "true"); + feature.add_option("prog_goal", "Use goal progression.", "true"); feature.add_option( "prog_gn", "Use greedy-necessary ordering progression.", "true"); feature.add_option( "prog_r", "Use reasonable ordering progression.", "true"); add_heuristic_options_to_feature(feature, description); - feature.document_property("preferred operators", - "yes (if enabled; see ``pref`` option)"); + feature.document_property( + "preferred operators", "yes (if enabled; see ``pref`` option)"); } -tuple, bool, bool, bool, bool, - shared_ptr, bool, string, utils::Verbosity> -get_landmark_heuristic_arguments_from_options( - const plugins::Options &opts) { +tuple< + shared_ptr, bool, bool, bool, bool, + shared_ptr, bool, string, utils::Verbosity> +get_landmark_heuristic_arguments_from_options(const plugins::Options &opts) { return tuple_cat( make_tuple( opts.get>("lm_factory"), - opts.get("pref"), - opts.get("prog_goal"), - opts.get("prog_gn"), - opts.get("prog_r")), + opts.get("pref"), opts.get("prog_goal"), + opts.get("prog_gn"), opts.get("prog_r")), get_heuristic_arguments_from_options(opts)); } } diff --git a/src/search/landmarks/landmark_heuristic.h b/src/search/landmarks/landmark_heuristic.h index 8c66474665..6114c0257b 100644 --- a/src/search/landmarks/landmark_heuristic.h +++ b/src/search/landmarks/landmark_heuristic.h @@ -1,7 +1,7 @@ #ifndef LANDMARKS_LANDMARK_HEURISTIC_H #define LANDMARKS_LANDMARK_HEURISTIC_H -# include "../heuristic.h" +#include "../heuristic.h" #include "../tasks/default_value_axioms_task.h" #include "../utils/hash.h" @@ -25,10 +25,12 @@ class LandmarkHeuristic : public Heuristic { std::shared_ptr landmark_graph; const bool use_preferred_operators; // This map remains empty unless `use_preferred_operators` is true. - utils::HashMap> landmarks_achieved_by_atom; + utils::HashMap> + landmarks_achieved_by_atom; std::unique_ptr landmark_status_manager; - std::unique_ptr successor_generator; + std::unique_ptr + successor_generator; void initialize( const std::shared_ptr &landmark_factory, @@ -47,9 +49,8 @@ class LandmarkHeuristic : public Heuristic { public: LandmarkHeuristic( bool use_preferred_operators, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); virtual void get_path_dependent_evaluators( std::set &evals) override { @@ -57,16 +58,16 @@ class LandmarkHeuristic : public Heuristic { } virtual void notify_initial_state(const State &initial_state) override; - virtual void notify_state_transition(const State &parent_state, - OperatorID op_id, - const State &state) override; + virtual void notify_state_transition( + const State &parent_state, OperatorID op_id, + const State &state) override; }; extern void add_landmark_heuristic_options_to_feature( plugins::Feature &feature, const std::string &description); -extern std::tuple, bool, bool, bool, - bool, std::shared_ptr, bool, std::string, - utils::Verbosity> +extern std::tuple< + std::shared_ptr, bool, bool, bool, bool, + std::shared_ptr, bool, std::string, utils::Verbosity> get_landmark_heuristic_arguments_from_options(const plugins::Options &opts); } diff --git a/src/search/landmarks/landmark_status_manager.cc b/src/search/landmarks/landmark_status_manager.cc index 1bf56cba51..9cd20f13c1 100644 --- a/src/search/landmarks/landmark_status_manager.cc +++ b/src/search/landmarks/landmark_status_manager.cc @@ -1,14 +1,15 @@ #include "landmark_status_manager.h" -#include "util.h" #include "landmark.h" +#include "util.h" #include using namespace std; namespace landmarks { -static vector get_goal_landmarks(const LandmarkGraph &graph) { +static vector get_goal_landmarks( + const LandmarkGraph &graph) { vector goals; for (const auto &node : graph) { if (node->get_landmark().is_true_in_goal) { @@ -18,8 +19,8 @@ static vector get_goal_landmarks(const LandmarkGraph &grap return goals; } -static vector>> get_greedy_necessary_children( - const LandmarkGraph &graph) { +static vector>> +get_greedy_necessary_children(const LandmarkGraph &graph) { vector>> orderings; for (const auto &node : graph) { vector greedy_necessary_children; @@ -35,8 +36,8 @@ static vector>> get_gree return orderings; } -static vector>> get_reasonable_parents( - const LandmarkGraph &graph) { +static vector>> +get_reasonable_parents(const LandmarkGraph &graph) { vector>> orderings; for (const auto &node : graph) { vector reasonable_parents; @@ -53,27 +54,30 @@ static vector>> get_reas } LandmarkStatusManager::LandmarkStatusManager( - LandmarkGraph &landmark_graph, - bool progress_goals, + LandmarkGraph &landmark_graph, bool progress_goals, bool progress_greedy_necessary_orderings, bool progress_reasonable_orderings) : landmark_graph(landmark_graph), - goal_landmarks(progress_goals ? get_goal_landmarks(landmark_graph) - : vector{}), + goal_landmarks( + progress_goals ? get_goal_landmarks(landmark_graph) + : vector{}), greedy_necessary_children( progress_greedy_necessary_orderings - ? get_greedy_necessary_children(landmark_graph) - : vector>>{}), + ? get_greedy_necessary_children(landmark_graph) + : vector>>{}), reasonable_parents( progress_reasonable_orderings - ? get_reasonable_parents(landmark_graph) - : vector>>{}), + ? get_reasonable_parents(landmark_graph) + : vector>>{}), /* We initialize to true in `past_landmarks` because true is the neutral element of conjunction/set intersection. */ past_landmarks(vector(landmark_graph.get_num_landmarks(), true)), /* We initialize to false in `future_landmarks` because false is the neutral element for disjunction/set union. */ - future_landmarks(vector(landmark_graph.get_num_landmarks(), false)) { + future_landmarks( + vector(landmark_graph.get_num_landmarks(), false)) { } BitsetView LandmarkStatusManager::get_past_landmarks(const State &state) { @@ -84,11 +88,13 @@ BitsetView LandmarkStatusManager::get_future_landmarks(const State &state) { return future_landmarks[state]; } -ConstBitsetView LandmarkStatusManager::get_past_landmarks(const State &state) const { +ConstBitsetView LandmarkStatusManager::get_past_landmarks( + const State &state) const { return past_landmarks[state]; } -ConstBitsetView LandmarkStatusManager::get_future_landmarks(const State &state) const { +ConstBitsetView LandmarkStatusManager::get_future_landmarks( + const State &state) const { return future_landmarks[state]; } @@ -114,9 +120,9 @@ void LandmarkStatusManager::progress_initial_state(const State &initial_state) { problems anything is a landmark. */ if (ranges::any_of(node->parents, [initial_state](auto &parent) { - const Landmark &landmark = parent.first->get_landmark(); - return !landmark.is_true_in_state(initial_state); - })) { + const Landmark &landmark = parent.first->get_landmark(); + return !landmark.is_true_in_state(initial_state); + })) { future.set(id); } } else { @@ -146,8 +152,8 @@ void LandmarkStatusManager::progress( assert(parent_future.size() == landmark_graph.get_num_landmarks()); progress_landmarks( - parent_past, parent_future, parent_ancestor_state, - past, future, ancestor_state); + parent_past, parent_future, parent_ancestor_state, past, future, + ancestor_state); progress_goals(ancestor_state, future); progress_greedy_necessary_orderings(ancestor_state, past, future); progress_reasonable_orderings(past, future); @@ -155,8 +161,8 @@ void LandmarkStatusManager::progress( void LandmarkStatusManager::progress_landmarks( ConstBitsetView &parent_past, ConstBitsetView &parent_future, - const State &parent_ancestor_state, BitsetView &past, - BitsetView &future, const State &ancestor_state) { + const State &parent_ancestor_state, BitsetView &past, BitsetView &future, + const State &ancestor_state) { for (const auto &node : landmark_graph) { int id = node->get_id(); const Landmark &landmark = node->get_landmark(); @@ -184,8 +190,8 @@ void LandmarkStatusManager::progress_landmarks( } } -void LandmarkStatusManager::progress_goals(const State &ancestor_state, - BitsetView &future) { +void LandmarkStatusManager::progress_goals( + const State &ancestor_state, BitsetView &future) { for (auto &node : goal_landmarks) { if (!node->get_landmark().is_true_in_state(ancestor_state)) { future.set(node->get_id()); @@ -199,8 +205,8 @@ void LandmarkStatusManager::progress_greedy_necessary_orderings( const Landmark &landmark = tail->get_landmark(); assert(!children.empty()); for (auto &child : children) { - if (!past.test(child->get_id()) - && !landmark.is_true_in_state(ancestor_state)) { + if (!past.test(child->get_id()) && + !landmark.is_true_in_state(ancestor_state)) { future.set(tail->get_id()); break; } diff --git a/src/search/landmarks/landmark_status_manager.h b/src/search/landmarks/landmark_status_manager.h index e5a3b1b5ef..7ab9e9e4a0 100644 --- a/src/search/landmarks/landmark_status_manager.h +++ b/src/search/landmarks/landmark_status_manager.h @@ -12,8 +12,12 @@ class LandmarkNode; class LandmarkStatusManager { LandmarkGraph &landmark_graph; const std::vector goal_landmarks; - const std::vector>> greedy_necessary_children; - const std::vector>> reasonable_parents; + const std::vector< + std::pair>> + greedy_necessary_children; + const std::vector< + std::pair>> + reasonable_parents; PerStateBitset past_landmarks; PerStateBitset future_landmarks; @@ -30,8 +34,7 @@ class LandmarkStatusManager { const BitsetView &past, BitsetView &future); public: LandmarkStatusManager( - LandmarkGraph &landmark_graph, - bool progress_goals, + LandmarkGraph &landmark_graph, bool progress_goals, bool progress_greedy_necessary_orderings, bool progress_reasonable_orderings); diff --git a/src/search/landmarks/landmark_sum_heuristic.cc b/src/search/landmarks/landmark_sum_heuristic.cc index f8fa63e91c..a239884cd6 100644 --- a/src/search/landmarks/landmark_sum_heuristic.cc +++ b/src/search/landmarks/landmark_sum_heuristic.cc @@ -22,8 +22,8 @@ static bool are_dead_ends_reliable( return false; } - if (!lm_factory->supports_conditional_effects() - && task_properties::has_conditional_effects(task_proxy)) { + if (!lm_factory->supports_conditional_effects() && + task_properties::has_conditional_effects(task_proxy)) { return false; } @@ -31,10 +31,9 @@ static bool are_dead_ends_reliable( } LandmarkSumHeuristic::LandmarkSumHeuristic( - const shared_ptr &lm_factory, - bool pref, bool prog_goal, bool prog_gn, bool prog_r, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity, + const shared_ptr &lm_factory, bool pref, bool prog_goal, + bool prog_gn, bool prog_r, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity, tasks::AxiomHandlingType axioms) : LandmarkHeuristic( pref, @@ -73,8 +72,8 @@ void LandmarkSumHeuristic::compute_landmark_costs() { min_first_achiever_costs.push_back(min_operator_cost); min_possible_achiever_costs.push_back(min_operator_cost); } else { - int min_first_achiever_cost = get_min_cost_of_achievers( - node->get_landmark().first_achievers); + int min_first_achiever_cost = + get_min_cost_of_achievers(node->get_landmark().first_achievers); min_first_achiever_costs.push_back(min_first_achiever_cost); int min_possible_achiever_cost = get_min_cost_of_achievers( node->get_landmark().possible_achievers); @@ -91,9 +90,9 @@ int LandmarkSumHeuristic::get_heuristic_value(const State &ancestor_state) { landmark_status_manager->get_future_landmarks(ancestor_state); for (int id = 0; id < landmark_graph->get_num_landmarks(); ++id) { if (future.test(id)) { - const int min_achiever_cost = - past.test(id) ? min_possible_achiever_costs[id] - : min_first_achiever_costs[id]; + const int min_achiever_cost = past.test(id) + ? min_possible_achiever_costs[id] + : min_first_achiever_costs[id]; if (min_achiever_cost < numeric_limits::max()) { h += min_achiever_cost; } else { @@ -123,17 +122,13 @@ class LandmarkSumHeuristicFeature "https://ai.dmi.unibas.ch/papers/richter-et-al-aaai2008.pdf", "Proceedings of the 23rd AAAI Conference on Artificial " "Intelligence (AAAI 2008)", - "975-982", - "AAAI Press", - "2008") + + "975-982", "AAAI Press", "2008") + "and" + utils::format_journal_reference( {"Silvia Richter", "Matthias Westphal"}, "The LAMA Planner: Guiding Cost-Based Anytime Planning with Landmarks", "http://www.aaai.org/Papers/JAIR/Vol39/JAIR-3903.pdf", - "Journal of Artificial Intelligence Research", - "39", - "127-177", + "Journal of Artificial Intelligence Research", "39", "127-177", "2010")); /* We usually have the options of base classes behind the options @@ -192,9 +187,8 @@ class LandmarkSumHeuristicFeature document_property("admissible", "no"); document_property("consistent", "no"); document_property( - "safe", - "yes except on tasks with conditional effects when " - "using a LandmarkFactory not supporting them"); + "safe", "yes except on tasks with conditional effects when " + "using a LandmarkFactory not supporting them"); } virtual shared_ptr create_component( diff --git a/src/search/landmarks/landmark_sum_heuristic.h b/src/search/landmarks/landmark_sum_heuristic.h index a3b1eef367..b3ea0f7138 100644 --- a/src/search/landmarks/landmark_sum_heuristic.h +++ b/src/search/landmarks/landmark_sum_heuristic.h @@ -25,9 +25,9 @@ class LandmarkSumHeuristic : public LandmarkHeuristic { LandmarkSumHeuristic( const std::shared_ptr &lm_factory, bool pref, bool prog_goal, bool prog_gn, bool prog_r, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity, tasks::AxiomHandlingType axioms); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity, + tasks::AxiomHandlingType axioms); virtual bool dead_ends_are_reliable() const override; }; diff --git a/src/search/landmarks/util.cc b/src/search/landmarks/util.cc index 280f2b3e1a..bbf90e9beb 100644 --- a/src/search/landmarks/util.cc +++ b/src/search/landmarks/util.cc @@ -4,6 +4,7 @@ #include "landmark_graph.h" #include "../task_proxy.h" + #include "../utils/logging.h" using namespace std; @@ -12,7 +13,8 @@ namespace landmarks { static bool condition_is_reachable( const ConditionsProxy &conditions, const vector> &reached) { for (FactProxy condition : conditions) { - if (!reached[condition.get_variable().get_id()][condition.get_value()]) { + if (!reached[condition.get_variable().get_id()] + [condition.get_value()]) { return false; } } @@ -21,9 +23,9 @@ static bool condition_is_reachable( /* Check whether operator `op` can possibly make `landmark` true in a relaxed task (as given by the reachability information in reached). */ -bool possibly_reaches_landmark(const OperatorProxy &op, - const vector> &reached, - const Landmark &landmark) { +bool possibly_reaches_landmark( + const OperatorProxy &op, const vector> &reached, + const Landmark &landmark) { assert(!reached.empty()); if (!condition_is_reachable(op.get_preconditions(), reached)) { // Operator `op` is not applicable. @@ -33,9 +35,9 @@ bool possibly_reaches_landmark(const OperatorProxy &op, // Check whether an effect of `op` reaches an atom in `landmark`. EffectsProxy effects = op.get_effects(); return any_of(begin(effects), end(effects), [&](const EffectProxy &effect) { - return landmark.contains(effect.get_fact().get_pair()) && - condition_is_reachable(effect.get_conditions(), reached); - }); + return landmark.contains(effect.get_fact().get_pair()) && + condition_is_reachable(effect.get_conditions(), reached); + }); } utils::HashSet get_intersection( @@ -50,13 +52,13 @@ utils::HashSet get_intersection( return intersection; } -void union_inplace(utils::HashSet &set1, - const utils::HashSet &set2) { +void union_inplace( + utils::HashSet &set1, const utils::HashSet &set2) { set1.insert(set2.begin(), set2.end()); } -OperatorProxy get_operator_or_axiom(const TaskProxy &task_proxy, - int op_or_axiom_id) { +OperatorProxy get_operator_or_axiom( + const TaskProxy &task_proxy, int op_or_axiom_id) { if (op_or_axiom_id < 0) { return task_proxy.get_axioms()[-op_or_axiom_id - 1]; } else { @@ -78,8 +80,7 @@ int get_operator_or_axiom_id(const OperatorProxy &op) { at least, but without the time and memory stamps. */ static void dump_node( - const TaskProxy &task_proxy, - const LandmarkNode &node, + const TaskProxy &task_proxy, const LandmarkNode &node, utils::LogProxy &log) { if (log.is_at_least_debug()) { const Landmark &landmark = node.get_landmark(); @@ -105,8 +106,8 @@ static void dump_node( } } -static void dump_ordering(int from, int to, OrderingType type, - const utils::LogProxy &log) { +static void dump_ordering( + int from, int to, OrderingType type, const utils::LogProxy &log) { if (log.is_at_least_debug()) { cout << " lm" << from << " -> lm" << to << " [label="; switch (type) { @@ -128,8 +129,7 @@ static void dump_ordering(int from, int to, OrderingType type, } void dump_landmark_graph( - const TaskProxy &task_proxy, - const LandmarkGraph &graph, + const TaskProxy &task_proxy, const LandmarkGraph &graph, utils::LogProxy &log) { if (log.is_at_least_debug()) { log << "Dumping landmark graph: " << endl; diff --git a/src/search/landmarks/util.h b/src/search/landmarks/util.h index 1b91e130b7..03c49ac71a 100644 --- a/src/search/landmarks/util.h +++ b/src/search/landmarks/util.h @@ -24,16 +24,15 @@ extern bool possibly_reaches_landmark( extern utils::HashSet get_intersection( const utils::HashSet &set1, const utils::HashSet &set2); -extern void union_inplace(utils::HashSet &set1, - const utils::HashSet &set2); +extern void union_inplace( + utils::HashSet &set1, const utils::HashSet &set2); extern OperatorProxy get_operator_or_axiom( const TaskProxy &task_proxy, int op_or_axiom_id); extern int get_operator_or_axiom_id(const OperatorProxy &op); extern void dump_landmark_graph( - const TaskProxy &task_proxy, - const LandmarkGraph &graph, + const TaskProxy &task_proxy, const LandmarkGraph &graph, utils::LogProxy &log); } diff --git a/src/search/lp/cplex_solver_interface.cc b/src/search/lp/cplex_solver_interface.cc index d6e3eb9617..54d77ae2a0 100644 --- a/src/search/lp/cplex_solver_interface.cc +++ b/src/search/lp/cplex_solver_interface.cc @@ -46,9 +46,9 @@ static void handle_cplex_error(CPXENVptr env, int error_code) { } /* Make a call to a CPLEX API function checking its return status. */ -template -static void CPX_CALL(Func cpxfunc, CPXENVptr env, Args && ... args) { - int status = cpxfunc(env, forward(args) ...); +template +static void CPX_CALL(Func cpxfunc, CPXENVptr env, Args &&...args) { + int status = cpxfunc(env, forward(args)...); if (status) { handle_cplex_error(env, status); } @@ -67,7 +67,8 @@ static void freeProblem(CPXENVptr env, CPXLPptr *problem) { CPX_CALL(CPXfreeprob, env, problem); } -static tuple bounds_to_sense_rhs_range(double lb, double ub) { +static tuple bounds_to_sense_rhs_range( + double lb, double ub) { if (lb <= -CPX_INFBOUND && ub >= CPX_INFBOUND) { // CPLEX does not support <= or >= constraints without bounds. return {'R', -CPX_INFBOUND, 2 * CPX_INFBOUND}; @@ -91,8 +92,7 @@ static int sense_to_cplex_sense(LPObjectiveSense sense) { } void CplexSolverInterface::CplexMatrix::assign_column_by_column( - const named_vector::NamedVector &constraints, - int num_cols) { + const named_vector::NamedVector &constraints, int num_cols) { coefficients.clear(); indices.clear(); starts.clear(); @@ -173,7 +173,8 @@ void CplexSolverInterface::CplexMatrix::assign_row_by_row( assert(indices.size() == coefficients.size()); } -void CplexSolverInterface::CplexColumnsInfo::assign(const named_vector::NamedVector &variables) { +void CplexSolverInterface::CplexColumnsInfo::assign( + const named_vector::NamedVector &variables) { lb.clear(); ub.clear(); type.clear(); @@ -200,7 +201,9 @@ void CplexSolverInterface::CplexColumnsInfo::assign(const named_vector::NamedVec assert(static_cast(objective.size()) == variables.size()); } -void CplexSolverInterface::CplexRowsInfo::assign(const named_vector::NamedVector &constraints, int offset, bool dense_range_values) { +void CplexSolverInterface::CplexRowsInfo::assign( + const named_vector::NamedVector &constraints, int offset, + bool dense_range_values) { rhs.clear(); sense.clear(); range_values.clear(); @@ -216,7 +219,8 @@ void CplexSolverInterface::CplexRowsInfo::assign(const named_vector::NamedVector const LPConstraint &constraint = constraints[row_index]; double lb = constraint.get_lower_bound(); double ub = constraint.get_upper_bound(); - const auto &[sense_value, rhs_value, range_value] = bounds_to_sense_rhs_range(lb, ub); + const auto &[sense_value, rhs_value, range_value] = + bounds_to_sense_rhs_range(lb, ub); sense[row_index] = sense_value; rhs[row_index] = rhs_value; if (sense_value == 'R') { @@ -231,13 +235,19 @@ void CplexSolverInterface::CplexRowsInfo::assign(const named_vector::NamedVector assert(static_cast(rhs.size()) == constraints.size()); assert(static_cast(sense.size()) == constraints.size()); assert(static_cast(range_values.size()) <= constraints.size()); - assert((dense_range_values && (static_cast(range_values.size()) == constraints.size()) && (range_indices.size() == 0)) || - (!dense_range_values && (range_values.size() == range_indices.size()))); + assert( + (dense_range_values && + (static_cast(range_values.size()) == constraints.size()) && + (range_indices.size() == 0)) || + (!dense_range_values && (range_values.size() == range_indices.size()))); } CplexSolverInterface::CplexSolverInterface() - : env(nullptr), problem(nullptr), is_mip(false), - num_permanent_constraints(0), num_unsatisfiable_constraints(0), + : env(nullptr), + problem(nullptr), + is_mip(false), + num_permanent_constraints(0), + num_unsatisfiable_constraints(0), num_unsatisfiable_temp_constraints(0) { int status = 0; env = CPXopenCPLEX(&status); @@ -260,10 +270,12 @@ CplexSolverInterface::~CplexSolverInterface() { } bool CplexSolverInterface::is_trivially_unsolvable() const { - return num_unsatisfiable_constraints + num_unsatisfiable_temp_constraints > 0; + return num_unsatisfiable_constraints + num_unsatisfiable_temp_constraints > + 0; } -void CplexSolverInterface::change_constraint_bounds(int index, double lb, double ub) { +void CplexSolverInterface::change_constraint_bounds( + int index, double lb, double ub) { double current_lb = constraint_lower_bounds[index]; double current_ub = constraint_upper_bounds[index]; if (current_lb == lb && current_ub == ub) { @@ -299,11 +311,13 @@ void CplexSolverInterface::load_problem(const LinearProgram &lp) { problem = createProblem(env, ""); const named_vector::NamedVector &variables = lp.get_variables(); - is_mip = any_of(variables.begin(), variables.end(), [](const LPVariable &v) { - return v.is_integer; - }); + is_mip = + any_of(variables.begin(), variables.end(), [](const LPVariable &v) { + return v.is_integer; + }); - const named_vector::NamedVector &constraints = lp.get_constraints(); + const named_vector::NamedVector &constraints = + lp.get_constraints(); num_permanent_constraints = constraints.size(); num_unsatisfiable_constraints = 0; for (const LPConstraint &constraint : constraints) { @@ -315,18 +329,12 @@ void CplexSolverInterface::load_problem(const LinearProgram &lp) { matrix.assign_column_by_column(constraints, variables.size()); columns.assign(variables); rows.assign(constraints); - CPX_CALL(CPXcopylp, env, problem, variables.size(), constraints.size(), - sense_to_cplex_sense(lp.get_sense()), - columns.get_objective(), - rows.get_rhs(), - rows.get_sense(), - matrix.get_starts(), - matrix.get_counts(), - matrix.get_indices(), - matrix.get_coefficients(), - columns.get_lb(), - columns.get_ub(), - rows.get_range_values()); + CPX_CALL( + CPXcopylp, env, problem, variables.size(), constraints.size(), + sense_to_cplex_sense(lp.get_sense()), columns.get_objective(), + rows.get_rhs(), rows.get_sense(), matrix.get_starts(), + matrix.get_counts(), matrix.get_indices(), matrix.get_coefficients(), + columns.get_lb(), columns.get_ub(), rows.get_range_values()); if (is_mip) { CPX_CALL(CPXcopyctype, env, problem, columns.get_type()); @@ -347,17 +355,15 @@ void CplexSolverInterface::load_problem(const LinearProgram &lp) { } if (variables.has_names()) { CplexNameData col_names(variables); - CPX_CALL(CPXchgcolname, env, problem, - col_names.size(), - col_names.get_indices(), - col_names.get_names()); + CPX_CALL( + CPXchgcolname, env, problem, col_names.size(), + col_names.get_indices(), col_names.get_names()); } if (constraints.has_names()) { CplexNameData row_names(constraints); - CPX_CALL(CPXchgrowname, env, problem, - row_names.size(), - row_names.get_indices(), - row_names.get_names()); + CPX_CALL( + CPXchgrowname, env, problem, row_names.size(), + row_names.get_indices(), row_names.get_names()); } } @@ -375,26 +381,20 @@ void CplexSolverInterface::add_temporary_constraints( // CPXaddrows can add new variables as well, but we do not want any. static const int num_extra_columns = 0; char **extra_column_names = nullptr; - CPX_CALL(CPXaddrows, env, problem, num_extra_columns, - constraints.size(), - matrix.get_num_nonzeros(), - rows.get_rhs(), - rows.get_sense(), - matrix.get_starts(), - matrix.get_indices(), - matrix.get_coefficients(), - extra_column_names, - row_names.get_names()); + CPX_CALL( + CPXaddrows, env, problem, num_extra_columns, constraints.size(), + matrix.get_num_nonzeros(), rows.get_rhs(), rows.get_sense(), + matrix.get_starts(), matrix.get_indices(), matrix.get_coefficients(), + extra_column_names, row_names.get_names()); /* If there are any ranged rows, we have to set up their ranges with a separate call. */ if (rows.get_num_ranged_rows() > 0) { - CPX_CALL(CPXchgrngval, env, problem, - rows.get_num_ranged_rows(), - rows.get_range_indices(), - rows.get_range_values()); + CPX_CALL( + CPXchgrngval, env, problem, rows.get_num_ranged_rows(), + rows.get_range_indices(), rows.get_range_values()); } for (const LPConstraint &constraint : constraints) { @@ -419,14 +419,18 @@ double CplexSolverInterface::get_infinity() const { return CPX_INFBOUND; } -void CplexSolverInterface::set_objective_coefficients(const vector &coefficients) { +void CplexSolverInterface::set_objective_coefficients( + const vector &coefficients) { objective_indices.clear(); objective_indices.resize(coefficients.size()); iota(objective_indices.begin(), objective_indices.end(), 0); - CPX_CALL(CPXchgobj, env, problem, coefficients.size(), objective_indices.data(), coefficients.data()); + CPX_CALL( + CPXchgobj, env, problem, coefficients.size(), objective_indices.data(), + coefficients.data()); } -void CplexSolverInterface::set_objective_coefficient(int index, double coefficient) { +void CplexSolverInterface::set_objective_coefficient( + int index, double coefficient) { CPX_CALL(CPXchgobj, env, problem, 1, &index, &coefficient); } @@ -591,6 +595,7 @@ bool CplexSolverInterface::has_temporary_constraints() const { void CplexSolverInterface::print_statistics() const { utils::g_log << "LP variables: " << get_num_variables() << endl; utils::g_log << "LP constraints: " << get_num_constraints() << endl; - utils::g_log << "LP non-zero entries: " << CPXgetnumnz(env, problem) << endl; + utils::g_log << "LP non-zero entries: " << CPXgetnumnz(env, problem) + << endl; } } diff --git a/src/search/lp/cplex_solver_interface.h b/src/search/lp/cplex_solver_interface.h index c228f6fd62..1c2e171065 100644 --- a/src/search/lp/cplex_solver_interface.h +++ b/src/search/lp/cplex_solver_interface.h @@ -6,8 +6,8 @@ #include "../algorithms/named_vector.h" -#include #include +#include namespace lp { template @@ -87,7 +87,7 @@ class CplexSolverInterface : public SolverInterface { entries for column 2 (4.5 and 7.2). */ std::vector counts; -public: + public: /* When loading a whole LP, column-by-column data better matches CPLEX's internal data structures, so we prefer this encoding. @@ -104,11 +104,21 @@ class CplexSolverInterface : public SolverInterface { void assign_row_by_row( const named_vector::NamedVector &constraints); - double *get_coefficients() {return to_cplex_array(coefficients);} - int *get_indices() {return to_cplex_array(indices);} - int *get_starts() {return to_cplex_array(starts);} - int *get_counts() {return to_cplex_array(counts);} - int get_num_nonzeros() {return coefficients.size();} + double *get_coefficients() { + return to_cplex_array(coefficients); + } + int *get_indices() { + return to_cplex_array(indices); + } + int *get_starts() { + return to_cplex_array(starts); + } + int *get_counts() { + return to_cplex_array(counts); + } + int get_num_nonzeros() { + return coefficients.size(); + } }; class CplexColumnsInfo { @@ -120,12 +130,20 @@ class CplexSolverInterface : public SolverInterface { std::vector type; // Objective value of each column (variable) std::vector objective; -public: + public: void assign(const named_vector::NamedVector &variables); - double *get_lb() {return to_cplex_array(lb);} - double *get_ub() {return to_cplex_array(ub);} - char *get_type() {return to_cplex_array(type);} - double *get_objective() {return to_cplex_array(objective);} + double *get_lb() { + return to_cplex_array(lb); + } + double *get_ub() { + return to_cplex_array(ub); + } + char *get_type() { + return to_cplex_array(type); + } + double *get_objective() { + return to_cplex_array(objective); + } }; class CplexRowsInfo { @@ -143,19 +161,31 @@ class CplexSolverInterface : public SolverInterface { rows that are ranged rows. */ std::vector range_indices; -public: - void assign(const named_vector::NamedVector &constraints, int offset = 0, bool dense_range_values = true); - double *get_rhs() {return to_cplex_array(rhs);} - char *get_sense() {return to_cplex_array(sense);} - double *get_range_values() {return to_cplex_array(range_values);} - int *get_range_indices() {return to_cplex_array(range_indices);} - int get_num_ranged_rows() {return range_indices.size();} + public: + void assign( + const named_vector::NamedVector &constraints, + int offset = 0, bool dense_range_values = true); + double *get_rhs() { + return to_cplex_array(rhs); + } + char *get_sense() { + return to_cplex_array(sense); + } + double *get_range_values() { + return to_cplex_array(range_values); + } + int *get_range_indices() { + return to_cplex_array(range_indices); + } + int get_num_ranged_rows() { + return range_indices.size(); + } }; class CplexNameData { std::vector names; std::vector indices; -public: + public: template explicit CplexNameData(const named_vector::NamedVector &values) { if (values.has_names()) { @@ -165,7 +195,8 @@ class CplexSolverInterface : public SolverInterface { for (int i = 0; i < num_values; ++i) { const std::string &name = values.get_name(i); if (!name.empty()) { - // CPLEX copies the names, so the const_cast should be fine. + // CPLEX copies the names, so the const_cast should be + // fine. names.push_back(const_cast(name.data())); indices.push_back(i); } @@ -173,7 +204,9 @@ class CplexSolverInterface : public SolverInterface { } } - int size() {return names.size();} + int size() { + return names.size(); + } int *get_indices() { if (indices.empty()) { return nullptr; @@ -220,11 +253,14 @@ class CplexSolverInterface : public SolverInterface { virtual ~CplexSolverInterface() override; virtual void load_problem(const LinearProgram &lp) override; - virtual void add_temporary_constraints(const named_vector::NamedVector &constraints) override; + virtual void add_temporary_constraints( + const named_vector::NamedVector &constraints) override; virtual void clear_temporary_constraints() override; virtual double get_infinity() const override; - virtual void set_objective_coefficients(const std::vector &coefficients) override; - virtual void set_objective_coefficient(int index, double coefficient) override; + virtual void set_objective_coefficients( + const std::vector &coefficients) override; + virtual void set_objective_coefficient( + int index, double coefficient) override; virtual void set_constraint_lower_bound(int index, double bound) override; virtual void set_constraint_upper_bound(int index, double bound) override; virtual void set_variable_lower_bound(int index, double bound) override; diff --git a/src/search/lp/lp_solver.cc b/src/search/lp/lp_solver.cc index 80e63b4b82..37b35952d3 100644 --- a/src/search/lp/lp_solver.cc +++ b/src/search/lp/lp_solver.cc @@ -30,8 +30,7 @@ tuple get_lp_solver_arguments_from_options( } LPConstraint::LPConstraint(double lower_bound, double upper_bound) - : lower_bound(lower_bound), - upper_bound(upper_bound) { + : lower_bound(lower_bound), upper_bound(upper_bound) { } void LPConstraint::clear() { @@ -48,7 +47,8 @@ void LPConstraint::insert(int index, double coefficient) { coefficients.push_back(coefficient); } -ostream &LPConstraint::dump(ostream &stream, const LinearProgram *program) const { +ostream &LPConstraint::dump( + ostream &stream, const LinearProgram *program) const { double infinity = numeric_limits::infinity(); if (program) { infinity = program->get_infinity(); @@ -61,7 +61,8 @@ ostream &LPConstraint::dump(ostream &stream, const LinearProgram *program) const stream << " + "; int variable = variables[i]; string variable_name; - if (program && program->get_variables().has_names() && !program->get_variables().get_name(variable).empty()) { + if (program && program->get_variables().has_names() && + !program->get_variables().get_name(variable).empty()) { variable_name = program->get_variables().get_name(variable); } else { variable_name = "v" + to_string(variable); @@ -76,8 +77,9 @@ ostream &LPConstraint::dump(ostream &stream, const LinearProgram *program) const return stream; } -LPVariable::LPVariable(double lower_bound, double upper_bound, - double objective_coefficient, bool is_integer) +LPVariable::LPVariable( + double lower_bound, double upper_bound, double objective_coefficient, + bool is_integer) : lower_bound(lower_bound), upper_bound(upper_bound), objective_coefficient(objective_coefficient), @@ -100,11 +102,13 @@ LPObjectiveSense LinearProgram::get_sense() const { return sense; } -const named_vector::NamedVector &LinearProgram::get_variables() const { +const named_vector::NamedVector & +LinearProgram::get_variables() const { return variables; } -const named_vector::NamedVector &LinearProgram::get_constraints() const { +const named_vector::NamedVector & +LinearProgram::get_constraints() const { return constraints; } @@ -116,7 +120,6 @@ void LinearProgram::set_objective_name(const string &name) { objective_name = name; } - LPSolver::LPSolver(LPSolverType solver_type) { string missing_solver; switch (solver_type) { @@ -139,11 +142,10 @@ LPSolver::LPSolver(LPSolverType solver_type) { } if (!pimpl) { cerr << "Tried to use LP solver " << missing_solver - << ", but the planner was compiled without support for it." - << endl + << ", but the planner was compiled without support for it." << endl << "See https://github.com/aibasel/downward/blob/main/BUILD.md\n" - << "to install " << missing_solver - << " and use it in the planner." << endl; + << "to install " << missing_solver << " and use it in the planner." + << endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } } @@ -152,7 +154,8 @@ void LPSolver::load_problem(const LinearProgram &lp) { pimpl->load_problem(lp); } -void LPSolver::add_temporary_constraints(const named_vector::NamedVector &constraints) { +void LPSolver::add_temporary_constraints( + const named_vector::NamedVector &constraints) { pimpl->add_temporary_constraints(constraints); } @@ -240,8 +243,7 @@ void LPSolver::print_statistics() const { pimpl->print_statistics(); } -static plugins::TypedEnumPlugin _enum_plugin({ - {"cplex", "commercial solver by IBM"}, - {"soplex", "open source solver by ZIB"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"cplex", "commercial solver by IBM"}, + {"soplex", "open source solver by ZIB"}}); } diff --git a/src/search/lp/lp_solver.h b/src/search/lp/lp_solver.h index 59693ffeab..88baaedb86 100644 --- a/src/search/lp/lp_solver.h +++ b/src/search/lp/lp_solver.h @@ -9,7 +9,6 @@ #include #include - namespace plugins { class Feature; class Options; @@ -17,11 +16,13 @@ class Options; namespace lp { enum class LPSolverType { - CPLEX, SOPLEX + CPLEX, + SOPLEX }; enum class LPObjectiveSense { - MAXIMIZE, MINIMIZE + MAXIMIZE, + MINIMIZE }; void add_lp_solver_option_to_feature(plugins::Feature &feature); @@ -38,20 +39,33 @@ class LPConstraint { public: LPConstraint(double lower_bound, double upper_bound); - const std::vector &get_variables() const {return variables;} - const std::vector &get_coefficients() const {return coefficients;} + const std::vector &get_variables() const { + return variables; + } + const std::vector &get_coefficients() const { + return coefficients; + } - double get_lower_bound() const {return lower_bound;} - void set_lower_bound(double lb) {lower_bound = lb;} - double get_upper_bound() const {return upper_bound;} - void set_upper_bound(double ub) {upper_bound = ub;} + double get_lower_bound() const { + return lower_bound; + } + void set_lower_bound(double lb) { + lower_bound = lb; + } + double get_upper_bound() const { + return upper_bound; + } + void set_upper_bound(double ub) { + upper_bound = ub; + } void clear(); bool empty() const; // Coefficients must be added without duplicate indices. void insert(int index, double coefficient); - std::ostream &dump(std::ostream &stream, const LinearProgram *program = nullptr) const; + std::ostream &dump( + std::ostream &stream, const LinearProgram *program = nullptr) const; }; struct LPVariable { @@ -60,10 +74,9 @@ struct LPVariable { double objective_coefficient; bool is_integer; - LPVariable(double lower_bound, - double upper_bound, - double objective_coefficient, - bool is_integer = false); + LPVariable( + double lower_bound, double upper_bound, double objective_coefficient, + bool is_integer = false); }; class LinearProgram { @@ -75,18 +88,22 @@ class LinearProgram { double infinity; public: - // objective_name is the name of the objective function used when writing the lp to a file. - LinearProgram(LPObjectiveSense sense, - named_vector::NamedVector &&variables, - named_vector::NamedVector &&constraints, - double infinity) - : sense(sense), variables(std::move(variables)), - constraints(std::move(constraints)), infinity(infinity) { + // objective_name is the name of the objective function used when writing + // the lp to a file. + LinearProgram( + LPObjectiveSense sense, + named_vector::NamedVector &&variables, + named_vector::NamedVector &&constraints, double infinity) + : sense(sense), + variables(std::move(variables)), + constraints(std::move(constraints)), + infinity(infinity) { } /* - Variables and constraints can be given a custom name for debugging purposes. - This has an impact on performance and should not be used in production code. + Variables and constraints can be given a custom name for debugging + purposes. This has an impact on performance and should not be used in + production code. */ named_vector::NamedVector &get_variables(); named_vector::NamedVector &get_constraints(); @@ -104,7 +121,8 @@ class LPSolver { explicit LPSolver(LPSolverType solver_type); void load_problem(const LinearProgram &lp); - void add_temporary_constraints(const named_vector::NamedVector &constraints); + void add_temporary_constraints( + const named_vector::NamedVector &constraints); void clear_temporary_constraints(); double get_infinity() const; diff --git a/src/search/lp/solver_interface.h b/src/search/lp/solver_interface.h index c9be11cf3d..8699a6930c 100644 --- a/src/search/lp/solver_interface.h +++ b/src/search/lp/solver_interface.h @@ -18,11 +18,13 @@ class SolverInterface { virtual ~SolverInterface() = default; virtual void load_problem(const LinearProgram &lp) = 0; - virtual void add_temporary_constraints(const named_vector::NamedVector &constraints) = 0; + virtual void add_temporary_constraints( + const named_vector::NamedVector &constraints) = 0; virtual void clear_temporary_constraints() = 0; virtual double get_infinity() const = 0; - virtual void set_objective_coefficients(const std::vector &coefficients) = 0; + virtual void set_objective_coefficients( + const std::vector &coefficients) = 0; virtual void set_objective_coefficient(int index, double coefficient) = 0; virtual void set_constraint_lower_bound(int index, double bound) = 0; virtual void set_constraint_upper_bound(int index, double bound) = 0; diff --git a/src/search/lp/soplex_solver_interface.cc b/src/search/lp/soplex_solver_interface.cc index e0331c6388..c38f776368 100644 --- a/src/search/lp/soplex_solver_interface.cc +++ b/src/search/lp/soplex_solver_interface.cc @@ -16,7 +16,8 @@ static int get_obj_sense(LPObjectiveSense sense) { } } -static LPRowSetReal constraints_to_row_set(const named_vector::NamedVector &constraints) { +static LPRowSetReal constraints_to_row_set( + const named_vector::NamedVector &constraints) { int num_rows = constraints.size(); int num_nonzeros = 0; for (const LPConstraint &constraint : constraints) { @@ -32,17 +33,22 @@ static LPRowSetReal constraints_to_row_set(const named_vector::NamedVector &variables) { +static LPColSetReal variables_to_col_set( + const named_vector::NamedVector &variables) { int num_cols = variables.size(); LPColSetReal cols(num_cols, 0); DSVector emptycol(0); for (const LPVariable &var : variables) { - cols.add(var.objective_coefficient, var.lower_bound, emptycol, var.upper_bound); + cols.add( + var.objective_coefficient, var.lower_bound, emptycol, + var.upper_bound); } return cols; } @@ -67,7 +73,8 @@ void SoPlexSolverInterface::load_problem(const LinearProgram &lp) { num_temporary_constraints = 0; } -void SoPlexSolverInterface::add_temporary_constraints(const named_vector::NamedVector &constraints) { +void SoPlexSolverInterface::add_temporary_constraints( + const named_vector::NamedVector &constraints) { soplex.addRowsReal(constraints_to_row_set(constraints)); num_temporary_constraints = constraints.size(); } @@ -85,22 +92,26 @@ double SoPlexSolverInterface::get_infinity() const { return infinity; } -void SoPlexSolverInterface::set_objective_coefficients(const vector &coefficients) { +void SoPlexSolverInterface::set_objective_coefficients( + const vector &coefficients) { int num_cols = coefficients.size(); for (int i = 0; i < num_cols; ++i) { soplex.changeObjReal(i, coefficients[i]); } } -void SoPlexSolverInterface::set_objective_coefficient(int index, double coefficient) { +void SoPlexSolverInterface::set_objective_coefficient( + int index, double coefficient) { soplex.changeObjReal(index, coefficient); } -void SoPlexSolverInterface::set_constraint_lower_bound(int index, double bound) { +void SoPlexSolverInterface::set_constraint_lower_bound( + int index, double bound) { soplex.changeLhsReal(index, bound); } -void SoPlexSolverInterface::set_constraint_upper_bound(int index, double bound) { +void SoPlexSolverInterface::set_constraint_upper_bound( + int index, double bound) { soplex.changeRhsReal(index, bound); } @@ -181,7 +192,9 @@ void SoPlexSolverInterface::print_failure_analysis() const { cout << "LP is primal infeasible or unbounded." << endl; break; case SPxSolverBase::Status::OPTIMAL_UNSCALED_VIOLATIONS: - cout << "LP has beed solved to optimality but unscaled solution contains violations." << endl; + cout + << "LP has beed solved to optimality but unscaled solution contains violations." + << endl; break; } } diff --git a/src/search/lp/soplex_solver_interface.h b/src/search/lp/soplex_solver_interface.h index 50bc3fb75f..6960f264e1 100644 --- a/src/search/lp/soplex_solver_interface.h +++ b/src/search/lp/soplex_solver_interface.h @@ -31,12 +31,15 @@ class SoPlexSolverInterface : public SolverInterface { SoPlexSolverInterface(); virtual void load_problem(const LinearProgram &lp) override; - virtual void add_temporary_constraints(const named_vector::NamedVector &constraints) override; + virtual void add_temporary_constraints( + const named_vector::NamedVector &constraints) override; virtual void clear_temporary_constraints() override; virtual double get_infinity() const override; - virtual void set_objective_coefficients(const std::vector &coefficients) override; - virtual void set_objective_coefficient(int index, double coefficient) override; + virtual void set_objective_coefficients( + const std::vector &coefficients) override; + virtual void set_objective_coefficient( + int index, double coefficient) override; virtual void set_constraint_lower_bound(int index, double bound) override; virtual void set_constraint_upper_bound(int index, double bound) override; virtual void set_variable_lower_bound(int index, double bound) override; diff --git a/src/search/merge_and_shrink/distances.cc b/src/search/merge_and_shrink/distances.cc index 1731a6b7e8..f107163136 100644 --- a/src/search/merge_and_shrink/distances.cc +++ b/src/search/merge_and_shrink/distances.cc @@ -64,7 +64,8 @@ static void breadth_first_search( void Distances::compute_init_distances_unit_cost() { vector> forward_graph(get_num_states()); for (const LocalLabelInfo &local_label_info : transition_system) { - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); for (const Transition &transition : transitions) { forward_graph[transition.src].push_back(transition.target); } @@ -79,7 +80,8 @@ void Distances::compute_init_distances_unit_cost() { void Distances::compute_goal_distances_unit_cost() { vector> backward_graph(get_num_states()); for (const LocalLabelInfo &local_label_info : transition_system) { - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); for (const Transition &transition : transitions) { backward_graph[transition.target].push_back(transition.src); } @@ -97,8 +99,7 @@ void Distances::compute_goal_distances_unit_cost() { static void dijkstra_search( const vector>> &graph, - priority_queues::AdaptiveQueue &queue, - vector &distances) { + priority_queues::AdaptiveQueue &queue, vector &distances) { while (!queue.empty()) { pair top_pair = queue.pop(); int distance = top_pair.first; @@ -123,7 +124,8 @@ static void dijkstra_search( void Distances::compute_init_distances_general_cost() { vector>> forward_graph(get_num_states()); for (const LocalLabelInfo &local_label_info : transition_system) { - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); int cost = local_label_info.get_cost(); for (const Transition &transition : transitions) { forward_graph[transition.src].push_back( @@ -142,7 +144,8 @@ void Distances::compute_init_distances_general_cost() { void Distances::compute_goal_distances_general_cost() { vector>> backward_graph(get_num_states()); for (const LocalLabelInfo &local_label_info : transition_system) { - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); int cost = local_label_info.get_cost(); for (const Transition &transition : transitions) { backward_graph[transition.target].push_back( @@ -163,8 +166,7 @@ void Distances::compute_goal_distances_general_cost() { } void Distances::compute_distances( - bool compute_init_distances, - bool compute_goal_distances, + bool compute_init_distances, bool compute_goal_distances, utils::LogProxy &log) { assert(compute_init_distances || compute_goal_distances); /* @@ -190,7 +192,8 @@ void Distances::compute_distances( Otherwise, when computing distances, the previous (invalid) distance information must have been cleared before. */ - assert(!are_init_distances_computed() && !are_goal_distances_computed()); + assert( + !are_init_distances_computed() && !are_goal_distances_computed()); assert(init_distances.empty() && goal_distances.empty()); } @@ -260,8 +263,7 @@ void Distances::compute_distances( void Distances::apply_abstraction( const StateEquivalenceRelation &state_equivalence_relation, - bool compute_init_distances, - bool compute_goal_distances, + bool compute_init_distances, bool compute_goal_distances, utils::LogProxy &log) { if (compute_init_distances) { assert(are_init_distances_computed()); @@ -288,7 +290,8 @@ void Distances::apply_abstraction( state_equivalence_relation[new_state]; assert(!state_equivalence_class.empty()); - StateEquivalenceClass::const_iterator pos = state_equivalence_class.begin(); + StateEquivalenceClass::const_iterator pos = + state_equivalence_class.begin(); int new_init_dist = -1; int new_goal_dist = -1; if (compute_init_distances) { @@ -300,11 +303,13 @@ void Distances::apply_abstraction( ++pos; for (; pos != state_equivalence_class.end(); ++pos) { - if (compute_init_distances && init_distances[*pos] != new_init_dist) { + if (compute_init_distances && + init_distances[*pos] != new_init_dist) { must_recompute = true; break; } - if (compute_goal_distances && goal_distances[*pos] != new_goal_dist) { + if (compute_goal_distances && + goal_distances[*pos] != new_goal_dist) { must_recompute = true; break; } @@ -327,8 +332,7 @@ void Distances::apply_abstraction( << "simplification was not f-preserving!" << endl; } clear_distances(); - compute_distances( - compute_init_distances, compute_goal_distances, log); + compute_distances(compute_init_distances, compute_goal_distances, log); } else { init_distances = move(new_init_distances); goal_distances = move(new_goal_distances); @@ -366,7 +370,8 @@ void Distances::statistics(utils::LogProxy &log) const { if (!are_goal_distances_computed()) { log << "goal distances not computed"; } else if (transition_system.is_solvable(*this)) { - log << "init h=" << get_goal_distance(transition_system.get_init_state()); + log << "init h=" + << get_goal_distance(transition_system.get_init_state()); } else { log << "transition system is unsolvable"; } diff --git a/src/search/merge_and_shrink/distances.h b/src/search/merge_and_shrink/distances.h index 044d613e18..5e8e7366fe 100644 --- a/src/search/merge_and_shrink/distances.h +++ b/src/search/merge_and_shrink/distances.h @@ -50,8 +50,7 @@ class Distances { } void compute_distances( - bool compute_init_distances, - bool compute_goal_distances, + bool compute_init_distances, bool compute_goal_distances, utils::LogProxy &log); /* @@ -65,8 +64,7 @@ class Distances { */ void apply_abstraction( const StateEquivalenceRelation &state_equivalence_relation, - bool compute_init_distances, - bool compute_goal_distances, + bool compute_init_distances, bool compute_goal_distances, utils::LogProxy &log); int get_init_distance(int state) const { diff --git a/src/search/merge_and_shrink/factored_transition_system.cc b/src/search/merge_and_shrink/factored_transition_system.cc index 12068f0232..943412aaaf 100644 --- a/src/search/merge_and_shrink/factored_transition_system.cc +++ b/src/search/merge_and_shrink/factored_transition_system.cc @@ -16,15 +16,13 @@ using namespace std; namespace merge_and_shrink { FTSConstIterator::FTSConstIterator( - const FactoredTransitionSystem &fts, - bool end) + const FactoredTransitionSystem &fts, bool end) : fts(fts), current_index((end ? fts.get_size() : 0)) { next_valid_index(); } void FTSConstIterator::next_valid_index() { - while (current_index < fts.get_size() - && !fts.is_active(current_index)) { + while (current_index < fts.get_size() && !fts.is_active(current_index)) { ++current_index; } } @@ -34,14 +32,12 @@ void FTSConstIterator::operator++() { next_valid_index(); } - FactoredTransitionSystem::FactoredTransitionSystem( unique_ptr labels, vector> &&transition_systems, vector> &&mas_representations, vector> &&distances, - const bool compute_init_distances, - const bool compute_goal_distances, + const bool compute_init_distances, const bool compute_goal_distances, utils::LogProxy &log) : labels(move(labels)), transition_systems(move(transition_systems)), @@ -59,7 +55,8 @@ FactoredTransitionSystem::FactoredTransitionSystem( } } -FactoredTransitionSystem::FactoredTransitionSystem(FactoredTransitionSystem &&other) +FactoredTransitionSystem::FactoredTransitionSystem( + FactoredTransitionSystem &&other) : labels(move(other.labels)), transition_systems(move(other.transition_systems)), mas_representations(move(other.mas_representations)), @@ -81,8 +78,10 @@ void FactoredTransitionSystem::assert_index_valid(int index) const { assert(utils::in_bounds(index, transition_systems)); assert(utils::in_bounds(index, mas_representations)); assert(utils::in_bounds(index, distances)); - if ((!transition_systems[index] || !mas_representations[index] || !distances[index]) && - (transition_systems[index] || mas_representations[index] || distances[index])) { + if ((!transition_systems[index] || !mas_representations[index] || + !distances[index]) && + (transition_systems[index] || mas_representations[index] || + distances[index])) { cerr << "Factor at index is in an inconsistent state!" << endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } @@ -90,10 +89,12 @@ void FactoredTransitionSystem::assert_index_valid(int index) const { bool FactoredTransitionSystem::is_component_valid(int index) const { assert(is_active(index)); - if (compute_init_distances && !distances[index]->are_init_distances_computed()) { + if (compute_init_distances && + !distances[index]->are_init_distances_computed()) { return false; } - if (compute_goal_distances && !distances[index]->are_goal_distances_computed()) { + if (compute_goal_distances && + !distances[index]->are_goal_distances_computed()) { return false; } return transition_systems[index]->is_valid(); @@ -108,8 +109,7 @@ void FactoredTransitionSystem::assert_all_components_valid() const { } void FactoredTransitionSystem::apply_label_mapping( - const vector>> &label_mapping, - int combinable_index) { + const vector>> &label_mapping, int combinable_index) { assert_all_components_valid(); for (const auto &entry : label_mapping) { assert(entry.first == labels->get_num_total_labels()); @@ -126,8 +126,7 @@ void FactoredTransitionSystem::apply_label_mapping( } bool FactoredTransitionSystem::apply_abstraction( - int index, - const StateEquivalenceRelation &state_equivalence_relation, + int index, const StateEquivalenceRelation &state_equivalence_relation, utils::LogProxy &log) { assert(is_component_valid(index)); @@ -143,10 +142,8 @@ bool FactoredTransitionSystem::apply_abstraction( state_equivalence_relation, abstraction_mapping, log); if (compute_init_distances || compute_goal_distances) { distances[index]->apply_abstraction( - state_equivalence_relation, - compute_init_distances, - compute_goal_distances, - log); + state_equivalence_relation, compute_init_distances, + compute_goal_distances, log); } mas_representations[index]->apply_abstraction_to_lookup_table( abstraction_mapping); @@ -158,17 +155,12 @@ bool FactoredTransitionSystem::apply_abstraction( } int FactoredTransitionSystem::merge( - int index1, - int index2, - utils::LogProxy &log) { + int index1, int index2, utils::LogProxy &log) { assert(is_component_valid(index1)); assert(is_component_valid(index2)); - transition_systems.push_back( - TransitionSystem::merge( - *labels, - *transition_systems[index1], - *transition_systems[index2], - log)); + transition_systems.push_back(TransitionSystem::merge( + *labels, *transition_systems[index1], *transition_systems[index2], + log)); distances[index1] = nullptr; distances[index2] = nullptr; transition_systems[index1] = nullptr; @@ -195,11 +187,11 @@ int FactoredTransitionSystem::merge( pair, unique_ptr> FactoredTransitionSystem::extract_factor(int index) { assert(is_component_valid(index)); - return make_pair(move(mas_representations[index]), - move(distances[index])); + return make_pair(move(mas_representations[index]), move(distances[index])); } -void FactoredTransitionSystem::statistics(int index, utils::LogProxy &log) const { +void FactoredTransitionSystem::statistics( + int index, utils::LogProxy &log) const { if (log.is_at_least_verbose()) { assert(is_component_valid(index)); const TransitionSystem &ts = *transition_systems[index]; diff --git a/src/search/merge_and_shrink/factored_transition_system.h b/src/search/merge_and_shrink/factored_transition_system.h index 897fdffc2e..d19d4d4b99 100644 --- a/src/search/merge_and_shrink/factored_transition_system.h +++ b/src/search/merge_and_shrink/factored_transition_system.h @@ -63,7 +63,8 @@ class FactoredTransitionSystem { std::unique_ptr labels; // Entries with nullptr have been merged. std::vector> transition_systems; - std::vector> mas_representations; + std::vector> + mas_representations; std::vector> distances; const bool compute_init_distances; const bool compute_goal_distances; @@ -87,18 +88,18 @@ class FactoredTransitionSystem { FactoredTransitionSystem( std::unique_ptr labels, std::vector> &&transition_systems, - std::vector> &&mas_representations, + std::vector> + &&mas_representations, std::vector> &&distances, - bool compute_init_distances, - bool compute_goal_distances, + bool compute_init_distances, bool compute_goal_distances, utils::LogProxy &log); FactoredTransitionSystem(FactoredTransitionSystem &&other); ~FactoredTransitionSystem(); // No copying or assignment. FactoredTransitionSystem(const FactoredTransitionSystem &) = delete; - FactoredTransitionSystem &operator=( - const FactoredTransitionSystem &) = delete; + FactoredTransitionSystem &operator=(const FactoredTransitionSystem &) = + delete; // Merge-and-shrink transformations. /* @@ -123,23 +124,21 @@ class FactoredTransitionSystem { relation are pruned. */ bool apply_abstraction( - int index, - const StateEquivalenceRelation &state_equivalence_relation, + int index, const StateEquivalenceRelation &state_equivalence_relation, utils::LogProxy &log); /* Merge the two factors at index1 and index2. */ - int merge( - int index1, - int index2, - utils::LogProxy &log); + int merge(int index1, int index2, utils::LogProxy &log); /* Extract the factor at the given index, rendering the FTS invalid. */ - std::pair, - std::unique_ptr> extract_factor(int index); + std::pair< + std::unique_ptr, + std::unique_ptr> + extract_factor(int index); void statistics(int index, utils::LogProxy &log) const; void dump(int index, utils::LogProxy &log) const; diff --git a/src/search/merge_and_shrink/fts_factory.cc b/src/search/merge_and_shrink/fts_factory.cc index 48604f1680..5ede9c7945 100644 --- a/src/search/merge_and_shrink/fts_factory.cc +++ b/src/search/merge_and_shrink/fts_factory.cc @@ -59,14 +59,11 @@ class FTSFactory { void mark_as_relevant(int var_id, int label); unordered_map compute_preconditions(OperatorProxy op); void handle_operator_effect( - OperatorProxy op, - EffectProxy effect, - const unordered_map &pre_val, - vector &has_effect_on_var, + OperatorProxy op, EffectProxy effect, + const unordered_map &pre_val, vector &has_effect_on_var, vector> &transitions_by_var); void handle_operator_precondition( - OperatorProxy op, - FactProxy precondition, + OperatorProxy op, FactProxy precondition, const vector &has_effect_on_var, vector> &transitions_by_var); void build_transitions_for_operator(OperatorProxy op); @@ -75,7 +72,8 @@ class FTSFactory { void build_transitions(const Labels &labels); vector> create_transition_systems( const Labels &labels); - vector> create_mas_representations() const; + vector> + create_mas_representations() const; vector> create_distances( const vector> &transition_systems) const; public: @@ -87,12 +85,10 @@ class FTSFactory { misuse because the class is only used internally in this file. */ FactoredTransitionSystem create( - bool compute_init_distances, - bool compute_goal_distances, + bool compute_init_distances, bool compute_goal_distances, utils::LogProxy &log); }; - FTSFactory::FTSFactory(const TaskProxy &task_proxy) : task_proxy(task_proxy), task_has_conditional_effects(false) { } @@ -145,7 +141,8 @@ void FTSFactory::initialize_transition_system_data(const Labels &labels) { VariablesProxy variables = task_proxy.get_variables(); transition_system_data_by_var.resize(variables.size()); for (VariableProxy var : variables) { - TransitionSystemData &ts_data = transition_system_data_by_var[var.get_id()]; + TransitionSystemData &ts_data = + transition_system_data_by_var[var.get_id()]; ts_data.num_variables = variables.size(); ts_data.incorporated_variables.push_back(var.get_id()); ts_data.label_to_local_label.resize(labels.get_max_num_labels(), -1); @@ -171,10 +168,8 @@ unordered_map FTSFactory::compute_preconditions(OperatorProxy op) { } void FTSFactory::handle_operator_effect( - OperatorProxy op, - EffectProxy effect, - const unordered_map &pre_val, - vector &has_effect_on_var, + OperatorProxy op, EffectProxy effect, + const unordered_map &pre_val, vector &has_effect_on_var, vector> &transitions_by_var) { int label = op.get_id(); FactProxy fact = effect.get_fact(); @@ -246,8 +241,7 @@ void FTSFactory::handle_operator_effect( } void FTSFactory::handle_operator_precondition( - OperatorProxy op, - FactProxy precondition, + OperatorProxy op, FactProxy precondition, const vector &has_effect_on_var, vector> &transitions_by_var) { int label = op.get_id(); @@ -271,14 +265,16 @@ void FTSFactory::build_transitions_for_operator(OperatorProxy op) { vector> transitions_by_var(num_variables); for (EffectProxy effect : op.get_effects()) - handle_operator_effect(op, effect, pre_val, has_effect_on_var, transitions_by_var); + handle_operator_effect( + op, effect, pre_val, has_effect_on_var, transitions_by_var); /* We must handle preconditions *after* effects because handling the effects sets has_effect_on_var. */ for (FactProxy precondition : op.get_preconditions()) - handle_operator_precondition(op, precondition, has_effect_on_var, transitions_by_var); + handle_operator_precondition( + op, precondition, has_effect_on_var, transitions_by_var); int label = op.get_id(); int label_cost = op.get_cost(); @@ -304,11 +300,14 @@ void FTSFactory::build_transitions_for_operator(OperatorProxy op) { vector &label_to_local_label = transition_system_data_by_var[var_id].label_to_local_label; - vector &local_label_infos = transition_system_data_by_var[var_id].local_label_infos; + vector &local_label_infos = + transition_system_data_by_var[var_id].local_label_infos; bool found_locally_equivalent_label_group = false; - for (size_t local_label = 0; local_label < local_label_infos.size(); ++local_label) { + for (size_t local_label = 0; local_label < local_label_infos.size(); + ++local_label) { LocalLabelInfo &local_label_info = local_label_infos[local_label]; - const vector &local_label_transitions = local_label_info.get_transitions(); + const vector &local_label_transitions = + local_label_info.get_transitions(); if (transitions == local_label_transitions) { assert(label_to_local_label[label] == -1); label_to_local_label[label] = local_label; @@ -321,7 +320,8 @@ void FTSFactory::build_transitions_for_operator(OperatorProxy op) { if (!found_locally_equivalent_label_group) { int new_local_label = local_label_infos.size(); LabelGroup label_group = {label}; - local_label_infos.emplace_back(move(label_group), move(transitions), label_cost); + local_label_infos.emplace_back( + move(label_group), move(transitions), label_cost); assert(label_to_local_label[label] == -1); label_to_local_label[label] = new_local_label; } @@ -376,7 +376,8 @@ void FTSFactory::build_transitions(const Labels &labels) { build_transitions_for_irrelevant_ops(variable, labels); } -vector> FTSFactory::create_transition_systems(const Labels &labels) { +vector> FTSFactory::create_transition_systems( + const Labels &labels) { // Create the actual TransitionSystem objects. int num_variables = task_proxy.get_variables().size(); @@ -388,20 +389,15 @@ vector> FTSFactory::create_transition_systems(const for (int var_id = 0; var_id < num_variables; ++var_id) { TransitionSystemData &ts_data = transition_system_data_by_var[var_id]; result.push_back(make_unique( - ts_data.num_variables, - move(ts_data.incorporated_variables), - labels, - move(ts_data.label_to_local_label), - move(ts_data.local_label_infos), - ts_data.num_states, - move(ts_data.goal_states), - ts_data.init_state - )); + ts_data.num_variables, move(ts_data.incorporated_variables), labels, + move(ts_data.label_to_local_label), move(ts_data.local_label_infos), + ts_data.num_states, move(ts_data.goal_states), ts_data.init_state)); } return result; } -vector> FTSFactory::create_mas_representations() const { +vector> +FTSFactory::create_mas_representations() const { // Create the actual MergeAndShrinkRepresentation objects. int num_variables = task_proxy.get_variables().size(); @@ -429,15 +425,13 @@ vector> FTSFactory::create_distances( result.reserve(num_variables * 2 - 1); for (int var_id = 0; var_id < num_variables; ++var_id) { - result.push_back( - make_unique(*transition_systems[var_id])); + result.push_back(make_unique(*transition_systems[var_id])); } return result; } FactoredTransitionSystem FTSFactory::create( - const bool compute_init_distances, - const bool compute_goal_distances, + const bool compute_init_distances, const bool compute_goal_distances, utils::LogProxy &log) { if (log.is_at_least_normal()) { log << "Building atomic transition systems... " << endl; @@ -455,23 +449,14 @@ FactoredTransitionSystem FTSFactory::create( create_distances(transition_systems); return FactoredTransitionSystem( - move(labels), - move(transition_systems), - move(mas_representations), - move(distances), - compute_init_distances, - compute_goal_distances, - log); + move(labels), move(transition_systems), move(mas_representations), + move(distances), compute_init_distances, compute_goal_distances, log); } FactoredTransitionSystem create_factored_transition_system( - const TaskProxy &task_proxy, - const bool compute_init_distances, - const bool compute_goal_distances, - utils::LogProxy &log) { - return FTSFactory(task_proxy).create( - compute_init_distances, - compute_goal_distances, - log); + const TaskProxy &task_proxy, const bool compute_init_distances, + const bool compute_goal_distances, utils::LogProxy &log) { + return FTSFactory(task_proxy) + .create(compute_init_distances, compute_goal_distances, log); } } diff --git a/src/search/merge_and_shrink/fts_factory.h b/src/search/merge_and_shrink/fts_factory.h index e4369ddc1f..bc8d575a28 100644 --- a/src/search/merge_and_shrink/fts_factory.h +++ b/src/search/merge_and_shrink/fts_factory.h @@ -21,10 +21,8 @@ namespace merge_and_shrink { class FactoredTransitionSystem; extern FactoredTransitionSystem create_factored_transition_system( - const TaskProxy &task_proxy, - bool compute_init_distances, - bool compute_goal_distances, - utils::LogProxy &log); + const TaskProxy &task_proxy, bool compute_init_distances, + bool compute_goal_distances, utils::LogProxy &log); } #endif diff --git a/src/search/merge_and_shrink/label_reduction.cc b/src/search/merge_and_shrink/label_reduction.cc index 7f1eca41b2..ab31e4cd5a 100644 --- a/src/search/merge_and_shrink/label_reduction.cc +++ b/src/search/merge_and_shrink/label_reduction.cc @@ -27,9 +27,8 @@ using utils::ExitCode; namespace merge_and_shrink { LabelReduction::LabelReduction( - bool before_shrinking, bool before_merging, - LabelReductionMethod method, LabelReductionSystemOrder system_order, - int random_seed) + bool before_shrinking, bool before_merging, LabelReductionMethod method, + LabelReductionSystemOrder system_order, int random_seed) : lr_before_shrinking(before_shrinking), lr_before_merging(before_merging), lr_method(method), @@ -37,8 +36,7 @@ LabelReduction::LabelReduction( rng(utils::get_rng(random_seed)) { utils::verify_argument( lr_before_shrinking || lr_before_merging, - "Please turn on at least one of the options \"before_shrinking\" or \"before_merging\"!" - ); + "Please turn on at least one of the options \"before_shrinking\" or \"before_merging\"!"); } bool LabelReduction::initialized() const { @@ -49,10 +47,11 @@ void LabelReduction::initialize(const TaskProxy &task_proxy) { assert(!initialized()); // Compute the transition system order. - size_t max_transition_system_count = task_proxy.get_variables().size() * 2 - 1; + size_t max_transition_system_count = + task_proxy.get_variables().size() * 2 - 1; transition_system_order.reserve(max_transition_system_count); - if (lr_system_order == LabelReductionSystemOrder::REGULAR - || lr_system_order == LabelReductionSystemOrder::RANDOM) { + if (lr_system_order == LabelReductionSystemOrder::REGULAR || + lr_system_order == LabelReductionSystemOrder::RANDOM) { for (size_t i = 0; i < max_transition_system_count; ++i) transition_system_order.push_back(i); if (lr_system_order == LabelReductionSystemOrder::RANDOM) { @@ -61,15 +60,15 @@ void LabelReduction::initialize(const TaskProxy &task_proxy) { } else { assert(lr_system_order == LabelReductionSystemOrder::REVERSE); for (size_t i = 0; i < max_transition_system_count; ++i) - transition_system_order.push_back(max_transition_system_count - 1 - i); + transition_system_order.push_back( + max_transition_system_count - 1 - i); } } void LabelReduction::compute_label_mapping( const equivalence_relation::EquivalenceRelation &relation, const FactoredTransitionSystem &fts, - vector>> &label_mapping, - utils::LogProxy &log) const { + vector>> &label_mapping, utils::LogProxy &log) const { const Labels &labels = fts.get_labels(); int next_new_label = labels.get_num_total_labels(); int num_labels = 0; @@ -88,10 +87,11 @@ void LabelReduction::compute_label_mapping( // Labels have to be sorted for LocalLabelInfo. sort(equivalent_labels.begin(), equivalent_labels.end()); if (log.is_at_least_debug()) { - log << "Reducing labels " - << equivalent_labels << " to " << next_new_label << endl; + log << "Reducing labels " << equivalent_labels << " to " + << next_new_label << endl; } - label_mapping.push_back(make_pair(next_new_label, equivalent_labels)); + label_mapping.push_back( + make_pair(next_new_label, equivalent_labels)); ++next_new_label; } if (!equivalent_labels.empty()) { @@ -101,17 +101,14 @@ void LabelReduction::compute_label_mapping( } int number_reduced_labels = num_labels - num_labels_after_reduction; if (log.is_at_least_verbose() && number_reduced_labels > 0) { - log << "Label reduction: " - << num_labels << " labels, " - << num_labels_after_reduction << " after reduction" - << endl; + log << "Label reduction: " << num_labels << " labels, " + << num_labels_after_reduction << " after reduction" << endl; } } equivalence_relation::EquivalenceRelation LabelReduction::compute_combinable_equivalence_relation( - int ts_index, - const FactoredTransitionSystem &fts) const { + int ts_index, const FactoredTransitionSystem &fts) const { /* Returns an equivalence relation over labels s.t. l ~ l' iff l and l' are locally equivalent in all transition systems @@ -140,8 +137,7 @@ LabelReduction::compute_combinable_equivalence_relation( } bool LabelReduction::reduce( - const pair &next_merge, - FactoredTransitionSystem &fts, + const pair &next_merge, FactoredTransitionSystem &fts, utils::LogProxy &log) const { assert(initialized()); assert(reduce_before_shrinking() || reduce_before_merging()); @@ -170,9 +166,8 @@ bool LabelReduction::reduce( } utils::release_vector_memory(label_mapping); - relation = compute_combinable_equivalence_relation( - next_merge.second, - fts); + relation = + compute_combinable_equivalence_relation(next_merge.second, fts); compute_label_mapping(relation, fts, label_mapping, log); if (!label_mapping.empty()) { fts.apply_label_mapping(label_mapping, next_merge.second); @@ -193,7 +188,9 @@ bool LabelReduction::reduce( int max_iterations; if (lr_method == LabelReductionMethod::ALL_TRANSITION_SYSTEMS) { max_iterations = num_transition_systems; - } else if (lr_method == LabelReductionMethod::ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT) { + } else if ( + lr_method == + LabelReductionMethod::ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT) { max_iterations = INF; } else { ABORT("unknown label reduction method"); @@ -277,7 +274,8 @@ void LabelReduction::dump_options(utils::LogProxy &log) const { } log << endl; if (lr_method == LabelReductionMethod::ALL_TRANSITION_SYSTEMS || - lr_method == LabelReductionMethod::ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT) { + lr_method == + LabelReductionMethod::ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT) { log << "System order: "; switch (lr_system_order) { case LabelReductionSystemOrder::REGULAR: @@ -309,16 +307,12 @@ class LabelReductionFeature "https://ai.dmi.unibas.ch/papers/sievers-et-al-aaai2014.pdf", "Proceedings of the 28th AAAI Conference on Artificial" " Intelligence (AAAI 2014)", - "2358-2366", - "AAAI Press", - "2014")); + "2358-2366", "AAAI Press", "2014")); add_option( - "before_shrinking", - "apply label reduction before shrinking"); + "before_shrinking", "apply label reduction before shrinking"); add_option( - "before_merging", - "apply label reduction before merging"); + "before_merging", "apply label reduction before merging"); add_option( "method", @@ -344,47 +338,45 @@ class LabelReductionFeature utils::add_rng_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get("before_shrinking"), opts.get("before_merging"), opts.get("method"), opts.get("system_order"), - utils::get_rng_arguments_from_options(opts) - ); + utils::get_rng_arguments_from_options(opts)); } }; static plugins::FeaturePlugin _plugin; -static class LabelReductionCategoryPlugin : public plugins::TypedCategoryPlugin { +static class LabelReductionCategoryPlugin + : public plugins::TypedCategoryPlugin { public: LabelReductionCategoryPlugin() : TypedCategoryPlugin("LabelReduction") { - document_synopsis("This page describes the current single 'option' for label reduction."); + document_synopsis( + "This page describes the current single 'option' for label reduction."); } -} -_category_plugin; - -static plugins::TypedEnumPlugin _label_reduction_method_enum_plugin({ - {"two_transition_systems", - "compute the 'combinable relation' only for the two transition " - "systems being merged next"}, - {"all_transition_systems", - "compute the 'combinable relation' for labels once for every " - "transition system and reduce labels"}, - {"all_transition_systems_with_fixpoint", - "keep computing the 'combinable relation' for labels iteratively " - "for all transition systems until no more labels can be reduced"} - }); - -static plugins::TypedEnumPlugin _label_reduction_system_order_enum_plugin({ - {"regular", - "transition systems are considered in the order given in the planner " - "input if atomic and in the order of their creation if composite."}, - {"reverse", - "inverse of regular"}, - {"random", - "random order"} - }); +} _category_plugin; + +static plugins::TypedEnumPlugin + _label_reduction_method_enum_plugin( + {{"two_transition_systems", + "compute the 'combinable relation' only for the two transition " + "systems being merged next"}, + {"all_transition_systems", + "compute the 'combinable relation' for labels once for every " + "transition system and reduce labels"}, + {"all_transition_systems_with_fixpoint", + "keep computing the 'combinable relation' for labels iteratively " + "for all transition systems until no more labels can be reduced"}}); + +static plugins::TypedEnumPlugin + _label_reduction_system_order_enum_plugin( + {{"regular", + "transition systems are considered in the order given in the planner " + "input if atomic and in the order of their creation if composite."}, + {"reverse", "inverse of regular"}, + {"random", "random order"}}); } diff --git a/src/search/merge_and_shrink/label_reduction.h b/src/search/merge_and_shrink/label_reduction.h index ff94281afd..4fc4177497 100644 --- a/src/search/merge_and_shrink/label_reduction.h +++ b/src/search/merge_and_shrink/label_reduction.h @@ -72,17 +72,14 @@ class LabelReduction { utils::LogProxy &log) const; equivalence_relation::EquivalenceRelation compute_combinable_equivalence_relation( - int ts_index, - const FactoredTransitionSystem &fts) const; + int ts_index, const FactoredTransitionSystem &fts) const; public: LabelReduction( - bool before_shrinking, bool before_merging, - LabelReductionMethod method, + bool before_shrinking, bool before_merging, LabelReductionMethod method, LabelReductionSystemOrder system_order, int random_seed); void initialize(const TaskProxy &task_proxy); bool reduce( - const std::pair &next_merge, - FactoredTransitionSystem &fts, + const std::pair &next_merge, FactoredTransitionSystem &fts, utils::LogProxy &log) const; void dump_options(utils::LogProxy &log) const; bool reduce_before_shrinking() const { diff --git a/src/search/merge_and_shrink/labels.cc b/src/search/merge_and_shrink/labels.cc index e1562807cc..5813b61979 100644 --- a/src/search/merge_and_shrink/labels.cc +++ b/src/search/merge_and_shrink/labels.cc @@ -12,9 +12,10 @@ using namespace std; namespace merge_and_shrink { LabelsConstIterator::LabelsConstIterator( - const vector &label_costs, - vector::const_iterator it) - : end_it(label_costs.end()), it(it), current_pos(distance(label_costs.begin(), it)) { + const vector &label_costs, vector::const_iterator it) + : end_it(label_costs.end()), + it(it), + current_pos(distance(label_costs.begin(), it)) { advance_to_next_valid_index(); } @@ -67,8 +68,7 @@ void Labels::dump_labels() const { utils::g_log << "active labels:" << endl; for (size_t label = 0; label < label_costs.size(); ++label) { if (label_costs[label] != -1) { - utils::g_log << "label " << label - << ", cost " << label_costs[label] + utils::g_log << "label " << label << ", cost " << label_costs[label] << endl; } } diff --git a/src/search/merge_and_shrink/merge_and_shrink_algorithm.cc b/src/search/merge_and_shrink/merge_and_shrink_algorithm.cc index cb9cce1e61..ff025b9b37 100644 --- a/src/search/merge_and_shrink/merge_and_shrink_algorithm.cc +++ b/src/search/merge_and_shrink/merge_and_shrink_algorithm.cc @@ -34,17 +34,17 @@ using plugins::Bounds; using utils::ExitCode; namespace merge_and_shrink { -static void log_progress(const utils::Timer &timer, const string &msg, utils::LogProxy &log) { +static void log_progress( + const utils::Timer &timer, const string &msg, utils::LogProxy &log) { log << "M&S algorithm timer: " << timer << " (" << msg << ")" << endl; } MergeAndShrinkAlgorithm::MergeAndShrinkAlgorithm( const shared_ptr &merge_strategy, const shared_ptr &shrink_strategy, const shared_ptr &label_reduction, - bool prune_unreachable_states, bool prune_irrelevant_states, - int max_states, int max_states_before_merge, - int threshold_before_merge, double main_loop_max_time, - utils::Verbosity verbosity) + bool prune_unreachable_states, bool prune_irrelevant_states, int max_states, + int max_states_before_merge, int threshold_before_merge, + double main_loop_max_time, utils::Verbosity verbosity) : merge_strategy_factory(merge_strategy), shrink_strategy(shrink_strategy), label_reduction(label_reduction), @@ -84,30 +84,29 @@ void MergeAndShrinkAlgorithm::handle_shrink_limit_defaults() { if (max_states_before_merge > max_states) { max_states_before_merge = max_states; if (log.is_warning()) { - log << "WARNING: " - << "max_states_before_merge exceeds max_states, " + log << "WARNING: " << "max_states_before_merge exceeds max_states, " << "correcting max_states_before_merge." << endl; } } - utils::verify_argument(max_states >= 1, - "Transition system size must be at least 1."); + utils::verify_argument( + max_states >= 1, "Transition system size must be at least 1."); - utils::verify_argument(max_states_before_merge >= 1, - "Transition system size before merge must be at least 1."); + utils::verify_argument( + max_states_before_merge >= 1, + "Transition system size before merge must be at least 1."); if (shrink_threshold_before_merge == -1) { shrink_threshold_before_merge = max_states; } - utils::verify_argument(shrink_threshold_before_merge >= 1, - "Threshold must be at least 1."); + utils::verify_argument( + shrink_threshold_before_merge >= 1, "Threshold must be at least 1."); if (shrink_threshold_before_merge > max_states) { shrink_threshold_before_merge = max_states; if (log.is_warning()) { - log << "WARNING: " - << "threshold exceeds max_states, " + log << "WARNING: " << "threshold exceeds max_states, " << "correcting threshold." << endl; } } @@ -166,34 +165,42 @@ void MergeAndShrinkAlgorithm::warn_on_unusual_options() const { log << dashes << endl << "WARNING! You did not enable label reduction. " << endl << "This may drastically reduce the performance of merge-and-shrink!" - << endl << dashes << endl; + << endl + << dashes << endl; } - } else if (label_reduction->reduce_before_merging() && label_reduction->reduce_before_shrinking()) { + } else if ( + label_reduction->reduce_before_merging() && + label_reduction->reduce_before_shrinking()) { if (log.is_warning()) { log << dashes << endl - << "WARNING! You set label reduction to be applied twice in each merge-and-shrink" << endl - << "iteration, both before shrinking and merging. This double computation effort" << endl - << "does not pay off for most configurations!" - << endl << dashes << endl; + << "WARNING! You set label reduction to be applied twice in each merge-and-shrink" + << endl + << "iteration, both before shrinking and merging. This double computation effort" + << endl + << "does not pay off for most configurations!" << endl + << dashes << endl; } } else { if (label_reduction->reduce_before_shrinking() && - (shrink_strategy->get_name() == "f-preserving" - || shrink_strategy->get_name() == "random")) { + (shrink_strategy->get_name() == "f-preserving" || + shrink_strategy->get_name() == "random")) { if (log.is_warning()) { log << dashes << endl - << "WARNING! Bucket-based shrink strategies such as f-preserving random perform" << endl + << "WARNING! Bucket-based shrink strategies such as f-preserving random perform" + << endl << "best if used with label reduction before merging, not before shrinking!" - << endl << dashes << endl; + << endl + << dashes << endl; } } if (label_reduction->reduce_before_merging() && shrink_strategy->get_name() == "bisimulation") { if (log.is_warning()) { log << dashes << endl - << "WARNING! Shrinking based on bisimulation performs best if used with label" << endl - << "reduction before shrinking, not before merging!" - << endl << dashes << endl; + << "WARNING! Shrinking based on bisimulation performs best if used with label" + << endl + << "reduction before shrinking, not before merging!" << endl + << dashes << endl; } } } @@ -203,7 +210,8 @@ void MergeAndShrinkAlgorithm::warn_on_unusual_options() const { log << dashes << endl << "WARNING! Pruning is (partially) turned off!" << endl << "This may drastically reduce the performance of merge-and-shrink!" - << endl << dashes << endl; + << endl + << dashes << endl; } } } @@ -221,16 +229,15 @@ bool MergeAndShrinkAlgorithm::ran_out_of_time( } void MergeAndShrinkAlgorithm::main_loop( - FactoredTransitionSystem &fts, - const TaskProxy &task_proxy) { + FactoredTransitionSystem &fts, const TaskProxy &task_proxy) { utils::CountdownTimer timer(main_loop_max_time); if (log.is_at_least_normal()) { log << "Starting main loop "; if (main_loop_max_time == numeric_limits::infinity()) { log << "without a time limit." << endl; } else { - log << "with a time limit of " - << main_loop_max_time << "s." << endl; + log << "with a time limit of " << main_loop_max_time << "s." + << endl; } } int maximum_intermediate_size = 0; @@ -249,10 +256,9 @@ void MergeAndShrinkAlgorithm::main_loop( merge_strategy_factory = nullptr; auto log_main_loop_progress = [&timer, this](const string &msg) { - log << "M&S algorithm main loop timer: " - << timer.get_elapsed_time() - << " (" << msg << ")" << endl; - }; + log << "M&S algorithm main loop timer: " << timer.get_elapsed_time() + << " (" << msg << ")" << endl; + }; while (fts.get_num_active_entries() > 1) { // Choose next transition systems to merge pair merge_indices = merge_strategy->get_next(); @@ -263,8 +269,8 @@ void MergeAndShrinkAlgorithm::main_loop( int merge_index2 = merge_indices.second; assert(merge_index1 != merge_index2); if (log.is_at_least_normal()) { - log << "Next pair of indices: (" - << merge_index1 << ", " << merge_index2 << ")" << endl; + log << "Next pair of indices: (" << merge_index1 << ", " + << merge_index2 << ")" << endl; if (log.is_at_least_verbose()) { fts.statistics(merge_index1, log); fts.statistics(merge_index2, log); @@ -286,14 +292,9 @@ void MergeAndShrinkAlgorithm::main_loop( // Shrinking bool shrunk = shrink_before_merge_step( - fts, - merge_index1, - merge_index2, - max_states, - max_states_before_merge, - shrink_threshold_before_merge, - *shrink_strategy, - log); + fts, merge_index1, merge_index2, max_states, + max_states_before_merge, shrink_threshold_before_merge, + *shrink_strategy, log); if (log.is_at_least_normal() && shrunk) { log_main_loop_progress("after shrinking"); } @@ -335,11 +336,8 @@ void MergeAndShrinkAlgorithm::main_loop( // Pruning if (prune_unreachable_states || prune_irrelevant_states) { bool pruned = prune_step( - fts, - merged_index, - prune_unreachable_states, - prune_irrelevant_states, - log); + fts, merged_index, prune_unreachable_states, + prune_irrelevant_states, log); if (log.is_at_least_normal() && pruned) { if (log.is_at_least_verbose()) { fts.statistics(merged_index, log); @@ -357,7 +355,9 @@ void MergeAndShrinkAlgorithm::main_loop( if (!fts.is_factor_solvable(merged_index)) { if (log.is_at_least_normal()) { log << "Abstract problem is unsolvable, stopping " - "computation. " << endl << endl; + "computation. " + << endl + << endl; } break; } @@ -383,7 +383,8 @@ void MergeAndShrinkAlgorithm::main_loop( label_reduction = nullptr; } -FactoredTransitionSystem MergeAndShrinkAlgorithm::build_factored_transition_system( +FactoredTransitionSystem +MergeAndShrinkAlgorithm::build_factored_transition_system( const TaskProxy &task_proxy) { if (starting_peak_memory) { cerr << "Calling build_factored_transition_system twice is not " @@ -407,12 +408,8 @@ FactoredTransitionSystem MergeAndShrinkAlgorithm::build_factored_transition_syst shrink_strategy->requires_goal_distances() || merge_strategy_factory->requires_goal_distances() || prune_irrelevant_states; - FactoredTransitionSystem fts = - create_factored_transition_system( - task_proxy, - compute_init_distances, - compute_goal_distances, - log); + FactoredTransitionSystem fts = create_factored_transition_system( + task_proxy, compute_init_distances, compute_goal_distances, log); if (log.is_at_least_normal()) { log_progress(timer, "after computation of atomic factors", log); } @@ -429,10 +426,7 @@ FactoredTransitionSystem MergeAndShrinkAlgorithm::build_factored_transition_syst assert(fts.is_active(index)); if (prune_unreachable_states || prune_irrelevant_states) { bool pruned_factor = prune_step( - fts, - index, - prune_unreachable_states, - prune_irrelevant_states, + fts, index, prune_unreachable_states, prune_irrelevant_states, log); pruned = pruned || pruned_factor; } @@ -459,7 +453,8 @@ FactoredTransitionSystem MergeAndShrinkAlgorithm::build_factored_transition_syst return fts; } -void add_merge_and_shrink_algorithm_options_to_feature(plugins::Feature &feature) { +void add_merge_and_shrink_algorithm_options_to_feature( + plugins::Feature &feature) { // Merge strategy option. feature.add_option>( "merge_strategy", @@ -505,55 +500,48 @@ void add_merge_and_shrink_algorithm_options_to_feature(plugins::Feature &feature "note that the time limit is only checked between transformations " "of the main loop, but not during, so it can be exceeded if a " "transformation is runtime-intense.", - "infinity", - Bounds("0.0", "infinity")); + "infinity", Bounds("0.0", "infinity")); } -tuple, shared_ptr, - shared_ptr, bool, bool, int, int, int, double> +tuple< + shared_ptr, shared_ptr, + shared_ptr, bool, bool, int, int, int, double> get_merge_and_shrink_algorithm_arguments_from_options( const plugins::Options &opts) { return tuple_cat( make_tuple( opts.get>("merge_strategy"), opts.get>("shrink_strategy"), - opts.get>( - "label_reduction", nullptr), + opts.get>("label_reduction", nullptr), opts.get("prune_unreachable_states"), opts.get("prune_irrelevant_states")), get_transition_system_size_limit_arguments_from_options(opts), - make_tuple(opts.get("main_loop_max_time")) - ); + make_tuple(opts.get("main_loop_max_time"))); } -void add_transition_system_size_limit_options_to_feature(plugins::Feature &feature) { +void add_transition_system_size_limit_options_to_feature( + plugins::Feature &feature) { feature.add_option( "max_states", - "maximum transition system size allowed at any time point.", - "-1", + "maximum transition system size allowed at any time point.", "-1", Bounds("-1", "infinity")); feature.add_option( "max_states_before_merge", "maximum transition system size allowed for two transition systems " "before being merged to form the synchronized product.", - "-1", - Bounds("-1", "infinity")); + "-1", Bounds("-1", "infinity")); feature.add_option( "threshold_before_merge", "If a transition system, before being merged, surpasses this soft " "transition system size limit, the shrink strategy is called to " "possibly shrink the transition system.", - "-1", - Bounds("-1", "infinity")); + "-1", Bounds("-1", "infinity")); } -tuple -get_transition_system_size_limit_arguments_from_options( +tuple get_transition_system_size_limit_arguments_from_options( const plugins::Options &opts) { return make_tuple( - opts.get("max_states"), - opts.get("max_states_before_merge"), - opts.get("threshold_before_merge") - ); + opts.get("max_states"), opts.get("max_states_before_merge"), + opts.get("threshold_before_merge")); } } diff --git a/src/search/merge_and_shrink/merge_and_shrink_algorithm.h b/src/search/merge_and_shrink/merge_and_shrink_algorithm.h index ac47b27e1d..2d1c336ba8 100644 --- a/src/search/merge_and_shrink/merge_and_shrink_algorithm.h +++ b/src/search/merge_and_shrink/merge_and_shrink_algorithm.h @@ -53,9 +53,7 @@ class MergeAndShrinkAlgorithm { void warn_on_unusual_options() const; bool ran_out_of_time(const utils::CountdownTimer &timer) const; void statistics(int maximum_intermediate_size) const; - void main_loop( - FactoredTransitionSystem &fts, - const TaskProxy &task_proxy); + void main_loop(FactoredTransitionSystem &fts, const TaskProxy &task_proxy); void handle_shrink_limit_defaults(); public: MergeAndShrinkAlgorithm( @@ -63,20 +61,21 @@ class MergeAndShrinkAlgorithm { const std::shared_ptr &shrink_strategy, const std::shared_ptr &label_reduction, bool prune_unreachable_states, bool prune_irrelevant_states, - int max_states, int max_states_before_merge, - int threshold_before_merge, double main_loop_max_time, - utils::Verbosity verbosity); - FactoredTransitionSystem build_factored_transition_system(const TaskProxy &task_proxy); + int max_states, int max_states_before_merge, int threshold_before_merge, + double main_loop_max_time, utils::Verbosity verbosity); + FactoredTransitionSystem build_factored_transition_system( + const TaskProxy &task_proxy); }; -extern void add_merge_and_shrink_algorithm_options_to_feature(plugins::Feature &feature); -std::tuple, - std::shared_ptr, - std::shared_ptr, bool, bool, int, int, int, - double> +extern void add_merge_and_shrink_algorithm_options_to_feature( + plugins::Feature &feature); +std::tuple< + std::shared_ptr, std::shared_ptr, + std::shared_ptr, bool, bool, int, int, int, double> get_merge_and_shrink_algorithm_arguments_from_options( const plugins::Options &opts); -extern void add_transition_system_size_limit_options_to_feature(plugins::Feature &feature); +extern void add_transition_system_size_limit_options_to_feature( + plugins::Feature &feature); std::tuple get_transition_system_size_limit_arguments_from_options( const plugins::Options &opts); diff --git a/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc b/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc index 501b70bdbe..668bbeb836 100644 --- a/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc +++ b/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc @@ -24,19 +24,19 @@ MergeAndShrinkHeuristic::MergeAndShrinkHeuristic( const shared_ptr &merge_strategy, const shared_ptr &shrink_strategy, const shared_ptr &label_reduction, - bool prune_unreachable_states, bool prune_irrelevant_states, - int max_states, int max_states_before_merge, - int threshold_before_merge, double main_loop_max_time, - const shared_ptr &transform, bool cache_estimates, - const string &description, utils::Verbosity verbosity) + bool prune_unreachable_states, bool prune_irrelevant_states, int max_states, + int max_states_before_merge, int threshold_before_merge, + double main_loop_max_time, const shared_ptr &transform, + bool cache_estimates, const string &description, utils::Verbosity verbosity) : Heuristic(transform, cache_estimates, description, verbosity) { log << "Initializing merge-and-shrink heuristic..." << endl; MergeAndShrinkAlgorithm algorithm( merge_strategy, shrink_strategy, label_reduction, - prune_unreachable_states, prune_irrelevant_states, - max_states, max_states_before_merge, threshold_before_merge, - main_loop_max_time, verbosity); - FactoredTransitionSystem fts = algorithm.build_factored_transition_system(task_proxy); + prune_unreachable_states, prune_irrelevant_states, max_states, + max_states_before_merge, threshold_before_merge, main_loop_max_time, + verbosity); + FactoredTransitionSystem fts = + algorithm.build_factored_transition_system(task_proxy); extract_factors(fts); log << "Done initializing merge-and-shrink heuristic." << endl << endl; } @@ -49,7 +49,8 @@ void MergeAndShrinkHeuristic::extract_factor( representation, which serves as the heuristic. */ auto final_entry = fts.extract_factor(index); - unique_ptr mas_representation = move(final_entry.first); + unique_ptr mas_representation = + move(final_entry.first); unique_ptr distances = move(final_entry.second); if (!distances->are_goal_distances_computed()) { const bool compute_init = false; @@ -61,7 +62,8 @@ void MergeAndShrinkHeuristic::extract_factor( mas_representations.push_back(move(mas_representation)); } -bool MergeAndShrinkHeuristic::extract_unsolvable_factor(FactoredTransitionSystem &fts) { +bool MergeAndShrinkHeuristic::extract_unsolvable_factor( + FactoredTransitionSystem &fts) { /* Check if there is an unsolvable factor. If so, extract and store it and return true. Otherwise, return false. */ for (int index : fts) { @@ -70,8 +72,7 @@ bool MergeAndShrinkHeuristic::extract_unsolvable_factor(FactoredTransitionSystem extract_factor(fts, index); if (log.is_at_least_normal()) { log << fts.get_transition_system(index).tag() - << "use this unsolvable factor as heuristic." - << endl; + << "use this unsolvable factor as heuristic." << endl; } return true; } @@ -79,13 +80,14 @@ bool MergeAndShrinkHeuristic::extract_unsolvable_factor(FactoredTransitionSystem return false; } -void MergeAndShrinkHeuristic::extract_nontrivial_factors(FactoredTransitionSystem &fts) { +void MergeAndShrinkHeuristic::extract_nontrivial_factors( + FactoredTransitionSystem &fts) { // Iterate over remaining factors and extract and store the nontrivial ones. for (int index : fts) { if (fts.is_factor_trivial(index)) { if (log.is_at_least_verbose()) { - log << fts.get_transition_system(index).tag() - << "is trivial." << endl; + log << fts.get_transition_system(index).tag() << "is trivial." + << endl; } } else { extract_factor(fts, index); @@ -123,7 +125,8 @@ void MergeAndShrinkHeuristic::extract_factors(FactoredTransitionSystem &fts) { int MergeAndShrinkHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); int heuristic = 0; - for (const unique_ptr &mas_representation : mas_representations) { + for (const unique_ptr &mas_representation : + mas_representations) { int cost = mas_representation->get_value(state); if (cost == PRUNED_STATE || cost == INF) { // If state is unreachable or irrelevant, we encountered a dead end. @@ -141,48 +144,46 @@ class MergeAndShrinkHeuristicFeature document_title("Merge-and-shrink heuristic"); document_synopsis( "This heuristic implements the algorithm described in the following " - "paper:" + utils::format_conference_reference( + "paper:" + + utils::format_conference_reference( {"Silvan Sievers", "Martin Wehrle", "Malte Helmert"}, "Generalized Label Reduction for Merge-and-Shrink Heuristics", "https://ai.dmi.unibas.ch/papers/sievers-et-al-aaai2014.pdf", "Proceedings of the 28th AAAI Conference on Artificial" " Intelligence (AAAI 2014)", - "2358-2366", - "AAAI Press", - "2014") + "\n" + + "2358-2366", "AAAI Press", "2014") + + "\n" + "For a more exhaustive description of merge-and-shrink, see the journal " - "paper" + utils::format_journal_reference( + "paper" + + utils::format_journal_reference( {"Silvan Sievers", "Malte Helmert"}, "Merge-and-Shrink: A Compositional Theory of Transformations " "of Factored Transition Systems", "https://ai.dmi.unibas.ch/papers/sievers-helmert-jair2021.pdf", - "Journal of Artificial Intelligence Research", - "71", - "781-883", - "2021") + "\n" + + "Journal of Artificial Intelligence Research", "71", "781-883", + "2021") + + "\n" + "The following paper describes how to improve the DFP merge strategy " "with tie-breaking, and presents two new merge strategies (dyn-MIASM " - "and SCC-DFP):" + utils::format_conference_reference( + "and SCC-DFP):" + + utils::format_conference_reference( {"Silvan Sievers", "Martin Wehrle", "Malte Helmert"}, "An Analysis of Merge Strategies for Merge-and-Shrink Heuristics", "https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf", "Proceedings of the 26th International Conference on Automated " "Planning and Scheduling (ICAPS 2016)", - "294-298", - "AAAI Press", - "2016") + "\n" + + "294-298", "AAAI Press", "2016") + + "\n" + "Details of the algorithms and the implementation are described in the " - "paper" + utils::format_conference_reference( + "paper" + + utils::format_conference_reference( {"Silvan Sievers"}, "Merge-and-Shrink Heuristics for Classical Planning: Efficient " "Implementation and Partial Abstractions", "https://ai.dmi.unibas.ch/papers/sievers-socs2018.pdf", "Proceedings of the 11th Annual Symposium on Combinatorial Search " "(SoCS 2018)", - "90-98", - "AAAI Press", - "2018") - ); + "90-98", "AAAI Press", "2018")); add_merge_and_shrink_algorithm_options_to_feature(*this); add_heuristic_options_to_feature(*this, "merge_and_shrink"); @@ -228,7 +229,8 @@ class MergeAndShrinkHeuristicFeature "before_merging=false),max_states=50k,threshold_before_merge=1)\n}}}\n"); document_language_support("action costs", "supported"); - document_language_support("conditional effects", "supported (but see note)"); + document_language_support( + "conditional effects", "supported (but see note)"); document_language_support("axioms", "not supported"); document_property("admissible", "yes (but see note)"); @@ -237,12 +239,11 @@ class MergeAndShrinkHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( get_merge_and_shrink_algorithm_arguments_from_options(opts), - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/merge_and_shrink/merge_and_shrink_heuristic.h b/src/search/merge_and_shrink/merge_and_shrink_heuristic.h index 2f1f5f63fc..521ddfbf53 100644 --- a/src/search/merge_and_shrink/merge_and_shrink_heuristic.h +++ b/src/search/merge_and_shrink/merge_and_shrink_heuristic.h @@ -15,7 +15,8 @@ class LabelReduction; class MergeAndShrinkHeuristic : public Heuristic { // The final merge-and-shrink representations, storing goal distances. - std::vector> mas_representations; + std::vector> + mas_representations; void extract_factor(FactoredTransitionSystem &fts, int index); bool extract_unsolvable_factor(FactoredTransitionSystem &fts); @@ -29,11 +30,10 @@ class MergeAndShrinkHeuristic : public Heuristic { const std::shared_ptr &shrink_strategy, const std::shared_ptr &label_reduction, bool prune_unreachable_states, bool prune_irrelevant_states, - int max_states, int max_states_before_merge, - int threshold_before_merge, double main_loop_max_time, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + int max_states, int max_states_before_merge, int threshold_before_merge, + double main_loop_max_time, + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/merge_and_shrink/merge_and_shrink_representation.cc b/src/search/merge_and_shrink/merge_and_shrink_representation.cc index 18731721ca..efdc98c340 100644 --- a/src/search/merge_and_shrink/merge_and_shrink_representation.cc +++ b/src/search/merge_and_shrink/merge_and_shrink_representation.cc @@ -26,7 +26,6 @@ int MergeAndShrinkRepresentation::get_domain_size() const { return domain_size; } - MergeAndShrinkRepresentationLeaf::MergeAndShrinkRepresentationLeaf( int var_id, int domain_size) : MergeAndShrinkRepresentation(domain_size), @@ -81,16 +80,16 @@ void MergeAndShrinkRepresentationLeaf::dump(utils::LogProxy &log) const { } } - MergeAndShrinkRepresentationMerge::MergeAndShrinkRepresentationMerge( unique_ptr left_child_, unique_ptr right_child_) - : MergeAndShrinkRepresentation(left_child_->get_domain_size() * - right_child_->get_domain_size()), + : MergeAndShrinkRepresentation( + left_child_->get_domain_size() * right_child_->get_domain_size()), left_child(move(left_child_)), right_child(move(right_child_)), - lookup_table(left_child->get_domain_size(), - vector(right_child->get_domain_size())) { + lookup_table( + left_child->get_domain_size(), + vector(right_child->get_domain_size())) { int counter = 0; for (vector &row : lookup_table) { for (int &entry : row) { @@ -126,8 +125,7 @@ void MergeAndShrinkRepresentationMerge::apply_abstraction_to_lookup_table( domain_size = new_domain_size; } -int MergeAndShrinkRepresentationMerge::get_value( - const State &state) const { +int MergeAndShrinkRepresentationMerge::get_value(const State &state) const { int state1 = left_child->get_value(state); int state2 = right_child->get_value(state); if (state1 == PRUNED_STATE || state2 == PRUNED_STATE) diff --git a/src/search/merge_and_shrink/merge_and_shrink_representation.h b/src/search/merge_and_shrink/merge_and_shrink_representation.h index 5952b9df97..6b99eda642 100644 --- a/src/search/merge_and_shrink/merge_and_shrink_representation.h +++ b/src/search/merge_and_shrink/merge_and_shrink_representation.h @@ -39,7 +39,6 @@ class MergeAndShrinkRepresentation { virtual void dump(utils::LogProxy &log) const = 0; }; - class MergeAndShrinkRepresentationLeaf : public MergeAndShrinkRepresentation { const int var_id; @@ -56,7 +55,6 @@ class MergeAndShrinkRepresentationLeaf : public MergeAndShrinkRepresentation { virtual void dump(utils::LogProxy &log) const override; }; - class MergeAndShrinkRepresentationMerge : public MergeAndShrinkRepresentation { std::unique_ptr left_child; std::unique_ptr right_child; diff --git a/src/search/merge_and_shrink/merge_scoring_function.cc b/src/search/merge_and_shrink/merge_scoring_function.cc index 83dd2dfd18..2e0fd2d536 100644 --- a/src/search/merge_and_shrink/merge_scoring_function.cc +++ b/src/search/merge_and_shrink/merge_scoring_function.cc @@ -8,8 +8,7 @@ using namespace std; namespace merge_and_shrink { -MergeScoringFunction::MergeScoringFunction() - : initialized(false) { +MergeScoringFunction::MergeScoringFunction() : initialized(false) { } void MergeScoringFunction::dump_options(utils::LogProxy &log) const { @@ -20,9 +19,11 @@ void MergeScoringFunction::dump_options(utils::LogProxy &log) const { } } -static class MergeScoringFunctionCategoryPlugin : public plugins::TypedCategoryPlugin { +static class MergeScoringFunctionCategoryPlugin + : public plugins::TypedCategoryPlugin { public: - MergeScoringFunctionCategoryPlugin() : TypedCategoryPlugin("MergeScoringFunction") { + MergeScoringFunctionCategoryPlugin() + : TypedCategoryPlugin("MergeScoringFunction") { document_synopsis( "This page describes various merge scoring functions. A scoring function, " "given a list of merge candidates and a factored transition system, " @@ -31,6 +32,5 @@ static class MergeScoringFunctionCategoryPlugin : public plugins::TypedCategoryP "Scoring functions are currently only used within the score based " "filtering merge selector."); } -} -_category_plugin; +} _category_plugin; } diff --git a/src/search/merge_and_shrink/merge_scoring_function.h b/src/search/merge_and_shrink/merge_scoring_function.h index a3c5eb51a5..31e2db7ebe 100644 --- a/src/search/merge_and_shrink/merge_scoring_function.h +++ b/src/search/merge_and_shrink/merge_scoring_function.h @@ -14,7 +14,8 @@ namespace merge_and_shrink { class FactoredTransitionSystem; class MergeScoringFunction { virtual std::string name() const = 0; - virtual void dump_function_specific_options(utils::LogProxy &) const {} + virtual void dump_function_specific_options(utils::LogProxy &) const { + } protected: bool initialized; public: diff --git a/src/search/merge_and_shrink/merge_scoring_function_dfp.cc b/src/search/merge_and_shrink/merge_scoring_function_dfp.cc index 16716cecec..2e5f218d14 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_dfp.cc +++ b/src/search/merge_and_shrink/merge_scoring_function_dfp.cc @@ -24,7 +24,8 @@ static vector compute_label_ranks( for (const LocalLabelInfo &local_label_info : ts) { const LabelGroup &label_group = local_label_info.get_label_group(); - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); // Relevant labels with no transitions have a rank of infinity. int label_rank = INF; bool group_relevant = false; @@ -46,8 +47,8 @@ static vector compute_label_ranks( label_rank = -1; } else { for (const Transition &transition : transitions) { - label_rank = min(label_rank, - distances.get_goal_distance(transition.target)); + label_rank = min( + label_rank, distances.get_goal_distance(transition.target)); } } for (int label : label_group) { @@ -101,7 +102,8 @@ string MergeScoringFunctionDFP::name() const { } class MergeScoringFunctionDFPFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeScoringFunction, MergeScoringFunctionDFP> { public: MergeScoringFunctionDFPFeature() : TypedFeature("dfp") { document_title("DFP scoring"); @@ -109,15 +111,14 @@ class MergeScoringFunctionDFPFeature "This scoring function computes the 'DFP' score as described in the " "paper \"Directed model checking with distance-preserving abstractions\" " "by Draeger, Finkbeiner and Podelski (SPIN 2006), adapted to planning in " - "the following paper:" + utils::format_conference_reference( + "the following paper:" + + utils::format_conference_reference( {"Silvan Sievers", "Martin Wehrle", "Malte Helmert"}, "Generalized Label Reduction for Merge-and-Shrink Heuristics", "https://ai.dmi.unibas.ch/papers/sievers-et-al-aaai2014.pdf", "Proceedings of the 28th AAAI Conference on Artificial" " Intelligence (AAAI 2014)", - "2358-2366", - "AAAI Press", - "2014")); + "2358-2366", "AAAI Press", "2014")); document_note( "Note", @@ -132,8 +133,8 @@ class MergeScoringFunctionDFPFeature "before_merging=false),max_states=50000,threshold_before_merge=1)\n}}}"); } - virtual shared_ptr - create_component(const plugins::Options &) const override { + virtual shared_ptr create_component( + const plugins::Options &) const override { return make_shared(); } }; diff --git a/src/search/merge_and_shrink/merge_scoring_function_goal_relevance.cc b/src/search/merge_and_shrink/merge_scoring_function_goal_relevance.cc index 38a8753f88..6bec966953 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_goal_relevance.cc +++ b/src/search/merge_and_shrink/merge_scoring_function_goal_relevance.cc @@ -40,9 +40,11 @@ string MergeScoringFunctionGoalRelevance::name() const { } class MergeScoringFunctionGoalRelevanceFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeScoringFunction, MergeScoringFunctionGoalRelevance> { public: - MergeScoringFunctionGoalRelevanceFeature() : TypedFeature("goal_relevance") { + MergeScoringFunctionGoalRelevanceFeature() + : TypedFeature("goal_relevance") { document_title("Goal relevance scoring"); document_synopsis( "This scoring function assigns a merge candidate a value of 0 iff at " @@ -51,8 +53,8 @@ class MergeScoringFunctionGoalRelevanceFeature "All other candidates get a score of positive infinity."); } - virtual shared_ptr - create_component(const plugins::Options &) const override { + virtual shared_ptr create_component( + const plugins::Options &) const override { return make_shared(); } }; diff --git a/src/search/merge_and_shrink/merge_scoring_function_miasm.cc b/src/search/merge_and_shrink/merge_scoring_function_miasm.cc index 20a551893a..d5ec7fbc1a 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_miasm.cc +++ b/src/search/merge_and_shrink/merge_scoring_function_miasm.cc @@ -3,9 +3,9 @@ #include "distances.h" #include "factored_transition_system.h" #include "merge_and_shrink_algorithm.h" +#include "merge_scoring_function_miasm_utils.h" #include "shrink_strategy.h" #include "transition_system.h" -#include "merge_scoring_function_miasm_utils.h" #include "../task_proxy.h" @@ -18,8 +18,7 @@ using namespace std; namespace merge_and_shrink { MergeScoringFunctionMIASM::MergeScoringFunctionMIASM( shared_ptr shrink_strategy, int max_states, - int max_states_before_merge, int threshold_before_merge, - bool use_caching) + int max_states_before_merge, int threshold_before_merge, bool use_caching) : use_caching(use_caching), shrink_strategy(move(shrink_strategy)), max_states(max_states), @@ -37,24 +36,22 @@ vector MergeScoringFunctionMIASM::compute_scores( double score; int index1 = merge_candidate.first; int index2 = merge_candidate.second; - if (use_caching && cached_scores_by_merge_candidate_indices[index1][index2]) { + if (use_caching && + cached_scores_by_merge_candidate_indices[index1][index2]) { score = *cached_scores_by_merge_candidate_indices[index1][index2]; } else { - unique_ptr product = shrink_before_merge_externally( - fts, - index1, - index2, - *shrink_strategy, - max_states, - max_states_before_merge, - shrink_threshold_before_merge, - silent_log); + unique_ptr product = + shrink_before_merge_externally( + fts, index1, index2, *shrink_strategy, max_states, + max_states_before_merge, shrink_threshold_before_merge, + silent_log); // Compute distances for the product and count the alive states. unique_ptr distances = make_unique(*product); const bool compute_init_distances = true; const bool compute_goal_distances = true; - distances->compute_distances(compute_init_distances, compute_goal_distances, silent_log); + distances->compute_distances( + compute_init_distances, compute_goal_distances, silent_log); int num_states = product->get_size(); int alive_states_count = 0; for (int state = 0; state < num_states; ++state) { @@ -70,9 +67,10 @@ vector MergeScoringFunctionMIASM::compute_scores( */ assert(num_states); score = static_cast(alive_states_count) / - static_cast(num_states); + static_cast(num_states); if (use_caching) { - cached_scores_by_merge_candidate_indices[index1][index2] = score; + cached_scores_by_merge_candidate_indices[index1][index2] = + score; } } scores.push_back(score); @@ -85,11 +83,11 @@ void MergeScoringFunctionMIASM::initialize(const TaskProxy &task_proxy) { int num_variables = task_proxy.get_variables().size(); int max_factor_index = 2 * num_variables - 1; cached_scores_by_merge_candidate_indices.resize( - max_factor_index, - vector>(max_factor_index)); + max_factor_index, vector>(max_factor_index)); } -void MergeScoringFunctionMIASM::dump_function_specific_options(utils::LogProxy &log) const { +void MergeScoringFunctionMIASM::dump_function_specific_options( + utils::LogProxy &log) const { if (log.is_at_least_normal()) { log << "Use caching: " << (use_caching ? "yes" : "no") << endl; } @@ -100,7 +98,8 @@ string MergeScoringFunctionMIASM::name() const { } class MergeScoringFunctionMIASMFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeScoringFunction, MergeScoringFunctionMIASM> { public: MergeScoringFunctionMIASMFeature() : TypedFeature("sf_miasm") { document_title("MIASM"); @@ -114,19 +113,18 @@ class MergeScoringFunctionMIASMFeature "the transition systems before if otherwise their product would exceed " "the specified size limits. A stateless merge strategy using this " "scoring function is called dyn-MIASM (nowadays also called sbMIASM " - "for score-based MIASM) and is described in the following paper:" - + utils::format_conference_reference( + "for score-based MIASM) and is described in the following paper:" + + utils::format_conference_reference( {"Silvan Sievers", "Martin Wehrle", "Malte Helmert"}, "An Analysis of Merge Strategies for Merge-and-Shrink Heuristics", "https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf", "Proceedings of the 26th International Conference on Planning and " "Scheduling (ICAPS 2016)", - "2358-2366", - "AAAI Press", - "2016")); + "2358-2366", "AAAI Press", "2016")); - // TODO: use shrink strategy and limit options from MergeAndShrinkHeuristic - // instead of having the identical options here again. + // TODO: use shrink strategy and limit options from + // MergeAndShrinkHeuristic instead of having the identical options here + // again. add_option>( "shrink_strategy", "We recommend setting this to match the shrink strategy configuration " @@ -169,14 +167,12 @@ class MergeScoringFunctionMIASMFeature "true"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get>("shrink_strategy"), - get_transition_system_size_limit_arguments_from_options( - opts), - opts.get("use_caching") - ); + get_transition_system_size_limit_arguments_from_options(opts), + opts.get("use_caching")); } }; diff --git a/src/search/merge_and_shrink/merge_scoring_function_miasm.h b/src/search/merge_and_shrink/merge_scoring_function_miasm.h index a35e8877ab..bed357d3ce 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_miasm.h +++ b/src/search/merge_and_shrink/merge_scoring_function_miasm.h @@ -17,15 +17,17 @@ class MergeScoringFunctionMIASM : public MergeScoringFunction { const int max_states_before_merge; const int shrink_threshold_before_merge; utils::LogProxy silent_log; - std::vector>> cached_scores_by_merge_candidate_indices; + std::vector>> + cached_scores_by_merge_candidate_indices; virtual std::string name() const override; - virtual void dump_function_specific_options(utils::LogProxy &log) const override; + virtual void dump_function_specific_options( + utils::LogProxy &log) const override; public: MergeScoringFunctionMIASM( - std::shared_ptr shrink_strategy, - int max_states, int max_states_before_merge, - int threshold_before_merge, bool use_caching); + std::shared_ptr shrink_strategy, int max_states, + int max_states_before_merge, int threshold_before_merge, + bool use_caching); virtual std::vector compute_scores( const FactoredTransitionSystem &fts, const std::vector> &merge_candidates) override; diff --git a/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.cc b/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.cc index 4b52b80a81..4fcb2607e4 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.cc +++ b/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.cc @@ -21,19 +21,17 @@ namespace merge_and_shrink { return the result. Return nullptr otherwise. */ static unique_ptr copy_and_shrink_ts( - const TransitionSystem &ts, - const Distances &distances, - const ShrinkStrategy &shrink_strategy, - int new_size, - utils::LogProxy &log) { + const TransitionSystem &ts, const Distances &distances, + const ShrinkStrategy &shrink_strategy, int new_size, utils::LogProxy &log) { /* TODO: think about factoring out common logic of this function and the function shrink_factor in utils.cc */ StateEquivalenceRelation equivalence_relation = - shrink_strategy.compute_equivalence_relation(ts, distances, new_size, log); + shrink_strategy.compute_equivalence_relation( + ts, distances, new_size, log); // TODO: We currently violate this; see issue250 - //assert(equivalence_relation.size() <= target_size); + // assert(equivalence_relation.size() <= target_size); int new_num_states = equivalence_relation.size(); if (new_num_states < ts.get_size()) { @@ -41,8 +39,8 @@ static unique_ptr copy_and_shrink_ts( If we actually shrink the transition system, we first need to copy it, then shrink it and return it. */ - vector abstraction_mapping = compute_abstraction_mapping( - ts.get_size(), equivalence_relation); + vector abstraction_mapping = + compute_abstraction_mapping(ts.get_size(), equivalence_relation); unique_ptr ts_copy = make_unique(ts); ts_copy->apply_abstraction( @@ -54,13 +52,9 @@ static unique_ptr copy_and_shrink_ts( } unique_ptr shrink_before_merge_externally( - const FactoredTransitionSystem &fts, - int index1, - int index2, - const ShrinkStrategy &shrink_strategy, - int max_states, - int max_states_before_merge, - int shrink_threshold_before_merge, + const FactoredTransitionSystem &fts, int index1, int index2, + const ShrinkStrategy &shrink_strategy, int max_states, + int max_states_before_merge, int shrink_threshold_before_merge, utils::LogProxy &log) { const TransitionSystem &original_ts1 = fts.get_transition_system(index1); const TransitionSystem &original_ts2 = fts.get_transition_system(index2); @@ -70,12 +64,12 @@ unique_ptr shrink_before_merge_externally( in the merge-and-shrink loop. */ pair new_sizes = compute_shrink_sizes( - original_ts1.get_size(), - original_ts2.get_size(), - max_states_before_merge, - max_states); - bool must_shrink_ts1 = original_ts1.get_size() > min(new_sizes.first, shrink_threshold_before_merge); - bool must_shrink_ts2 = original_ts2.get_size() > min(new_sizes.second, shrink_threshold_before_merge); + original_ts1.get_size(), original_ts2.get_size(), + max_states_before_merge, max_states); + bool must_shrink_ts1 = original_ts1.get_size() > + min(new_sizes.first, shrink_threshold_before_merge); + bool must_shrink_ts2 = original_ts2.get_size() > + min(new_sizes.second, shrink_threshold_before_merge); /* If we need to shrink, copy_and_shrink_ts will take care of computing @@ -86,20 +80,14 @@ unique_ptr shrink_before_merge_externally( unique_ptr ts1 = nullptr; if (must_shrink_ts1) { ts1 = copy_and_shrink_ts( - original_ts1, - fts.get_distances(index1), - shrink_strategy, - new_sizes.first, - log); + original_ts1, fts.get_distances(index1), shrink_strategy, + new_sizes.first, log); } unique_ptr ts2 = nullptr; if (must_shrink_ts2) { ts2 = copy_and_shrink_ts( - original_ts2, - fts.get_distances(index2), - shrink_strategy, - new_sizes.second, - log); + original_ts2, fts.get_distances(index2), shrink_strategy, + new_sizes.second, log); } /* @@ -107,9 +95,7 @@ unique_ptr shrink_before_merge_externally( the copied and shrunk ones. */ return TransitionSystem::merge( - fts.get_labels(), - (ts1 ? *ts1 : original_ts1), - (ts2 ? *ts2 : original_ts2), - log); + fts.get_labels(), (ts1 ? *ts1 : original_ts1), + (ts2 ? *ts2 : original_ts2), log); } } diff --git a/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.h b/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.h index d6fbddbab3..6a78c13027 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.h +++ b/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.h @@ -18,13 +18,9 @@ class TransitionSystem; product. */ extern std::unique_ptr shrink_before_merge_externally( - const FactoredTransitionSystem &fts, - int index1, - int index2, - const ShrinkStrategy &shrink_strategy, - int max_states, - int max_states_before_merge, - int shrink_threshold_before_merge, + const FactoredTransitionSystem &fts, int index1, int index2, + const ShrinkStrategy &shrink_strategy, int max_states, + int max_states_before_merge, int shrink_threshold_before_merge, utils::LogProxy &log); } diff --git a/src/search/merge_and_shrink/merge_scoring_function_single_random.cc b/src/search/merge_and_shrink/merge_scoring_function_single_random.cc index dbd3e5536b..51fed9bc5b 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_single_random.cc +++ b/src/search/merge_and_shrink/merge_scoring_function_single_random.cc @@ -14,8 +14,7 @@ using namespace std; namespace merge_and_shrink { MergeScoringFunctionSingleRandom::MergeScoringFunctionSingleRandom( int random_seed) - : random_seed(random_seed), - rng(utils::get_rng(random_seed)) { + : random_seed(random_seed), rng(utils::get_rng(random_seed)) { } vector MergeScoringFunctionSingleRandom::compute_scores( @@ -47,7 +46,8 @@ void MergeScoringFunctionSingleRandom::dump_function_specific_options( } class MergeScoringFunctionSingleRandomFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeScoringFunction, MergeScoringFunctionSingleRandom> { public: MergeScoringFunctionSingleRandomFeature() : TypedFeature("single_random") { document_title("Single random"); @@ -58,11 +58,11 @@ class MergeScoringFunctionSingleRandomFeature utils::add_rng_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( - utils::get_rng_arguments_from_options(opts) - ); + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + MergeScoringFunctionSingleRandom>( + utils::get_rng_arguments_from_options(opts)); } }; diff --git a/src/search/merge_and_shrink/merge_scoring_function_single_random.h b/src/search/merge_and_shrink/merge_scoring_function_single_random.h index 285cf1fff5..0f52028e95 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_single_random.h +++ b/src/search/merge_and_shrink/merge_scoring_function_single_random.h @@ -15,7 +15,8 @@ class MergeScoringFunctionSingleRandom : public MergeScoringFunction { std::shared_ptr rng; virtual std::string name() const override; - virtual void dump_function_specific_options(utils::LogProxy &log) const override; + virtual void dump_function_specific_options( + utils::LogProxy &log) const override; public: explicit MergeScoringFunctionSingleRandom(int random_seed); virtual std::vector compute_scores( diff --git a/src/search/merge_and_shrink/merge_scoring_function_total_order.cc b/src/search/merge_and_shrink/merge_scoring_function_total_order.cc index 1d791ba979..8234d8f93f 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_total_order.cc +++ b/src/search/merge_and_shrink/merge_scoring_function_total_order.cc @@ -89,23 +89,23 @@ void MergeScoringFunctionTotalOrder::initialize(const TaskProxy &task_proxy) { // Put the orders in the correct order if (atomic_before_product) { - transition_system_order.insert(transition_system_order.end(), - atomic_tso.begin(), - atomic_tso.end()); - transition_system_order.insert(transition_system_order.end(), - product_tso.begin(), - product_tso.end()); + transition_system_order.insert( + transition_system_order.end(), atomic_tso.begin(), + atomic_tso.end()); + transition_system_order.insert( + transition_system_order.end(), product_tso.begin(), + product_tso.end()); } else { - transition_system_order.insert(transition_system_order.end(), - product_tso.begin(), - product_tso.end()); - transition_system_order.insert(transition_system_order.end(), - atomic_tso.begin(), - atomic_tso.end()); + transition_system_order.insert( + transition_system_order.end(), product_tso.begin(), + product_tso.end()); + transition_system_order.insert( + transition_system_order.end(), atomic_tso.begin(), + atomic_tso.end()); } - merge_candidate_order.reserve(max_transition_system_count * - max_transition_system_count / 2); + merge_candidate_order.reserve( + max_transition_system_count * max_transition_system_count / 2); for (size_t i = 0; i < transition_system_order.size(); ++i) { for (size_t j = i + 1; j < transition_system_order.size(); ++j) { merge_candidate_order.emplace_back( @@ -118,7 +118,8 @@ string MergeScoringFunctionTotalOrder::name() const { return "total order"; } -void MergeScoringFunctionTotalOrder::dump_function_specific_options(utils::LogProxy &log) const { +void MergeScoringFunctionTotalOrder::dump_function_specific_options( + utils::LogProxy &log) const { if (log.is_at_least_normal()) { log << "Atomic transition system order: "; switch (atomic_ts_order) { @@ -148,8 +149,9 @@ void MergeScoringFunctionTotalOrder::dump_function_specific_options(utils::LogPr } log << endl; - log << "Consider " << (atomic_before_product ? - "atomic before product" : "product before atomic") + log << "Consider " + << (atomic_before_product ? "atomic before product" + : "product before atomic") << " transition systems" << endl; log << "Random seed: " << random_seed << endl; } @@ -178,7 +180,8 @@ void MergeScoringFunctionTotalOrder::add_options_to_feature( } class MergeScoringFunctionTotalOrderFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeScoringFunction, MergeScoringFunctionTotalOrder> { public: MergeScoringFunctionTotalOrderFeature() : TypedFeature("total_order") { document_title("Total order"); @@ -187,16 +190,14 @@ class MergeScoringFunctionTotalOrderFeature "based on the specified options. The score for each merge candidate " "correponds to its position in the order. This scoring function is " "mainly intended as tie-breaking, and has been introduced in the " - "following paper:" - + utils::format_conference_reference( + "following paper:" + + utils::format_conference_reference( {"Silvan Sievers", "Martin Wehrle", "Malte Helmert"}, "An Analysis of Merge Strategies for Merge-and-Shrink Heuristics", "https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf", "Proceedings of the 26th International Conference on Automated " "Planning and Scheduling (ICAPS 2016)", - "294-298", - "AAAI Press", - "2016") + + "294-298", "AAAI Press", "2016") + "Furthermore, using the atomic_ts_order option, this scoring function, " "if used alone in a score based filtering merge selector, can be used " "to emulate the corresponding (precomputed) linear merge strategies " @@ -204,34 +205,27 @@ class MergeScoringFunctionTotalOrderFeature MergeScoringFunctionTotalOrder::add_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + MergeScoringFunctionTotalOrder>( opts.get("atomic_ts_order"), opts.get("product_ts_order"), opts.get("atomic_before_product"), - utils::get_rng_arguments_from_options(opts) - ); + utils::get_rng_arguments_from_options(opts)); } }; static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _atomic_ts_order_enum_plugin({ - {"reverse_level", - "the variable order of Fast Downward"}, - {"level", - "opposite of reverse_level"}, - {"random", - "a randomized order"} - }); - -static plugins::TypedEnumPlugin _product_ts_order_enum_plugin({ - {"old_to_new", - "consider composite transition systems from oldest to most recent"}, - {"new_to_old", - "opposite of old_to_new"}, - {"random", - "a randomized order"} - }); +static plugins::TypedEnumPlugin _atomic_ts_order_enum_plugin( + {{"reverse_level", "the variable order of Fast Downward"}, + {"level", "opposite of reverse_level"}, + {"random", "a randomized order"}}); + +static plugins::TypedEnumPlugin _product_ts_order_enum_plugin( + {{"old_to_new", + "consider composite transition systems from oldest to most recent"}, + {"new_to_old", "opposite of old_to_new"}, + {"random", "a randomized order"}}); } diff --git a/src/search/merge_and_shrink/merge_scoring_function_total_order.h b/src/search/merge_and_shrink/merge_scoring_function_total_order.h index 3d2e974da8..ea31c7f68f 100644 --- a/src/search/merge_and_shrink/merge_scoring_function_total_order.h +++ b/src/search/merge_and_shrink/merge_scoring_function_total_order.h @@ -35,7 +35,8 @@ class MergeScoringFunctionTotalOrder : public MergeScoringFunction { std::vector> merge_candidate_order; virtual std::string name() const override; - virtual void dump_function_specific_options(utils::LogProxy &log) const override; + virtual void dump_function_specific_options( + utils::LogProxy &log) const override; public: explicit MergeScoringFunctionTotalOrder( AtomicTSOrder atomic_ts_order, ProductTSOrder product_ts_order, diff --git a/src/search/merge_and_shrink/merge_selector.cc b/src/search/merge_and_shrink/merge_selector.cc index 4bdf01d859..32455cfc5a 100644 --- a/src/search/merge_and_shrink/merge_selector.cc +++ b/src/search/merge_and_shrink/merge_selector.cc @@ -49,7 +49,8 @@ void MergeSelector::dump_options(utils::LogProxy &log) const { } } -static class MergeSelectorCategoryPlugin : public plugins::TypedCategoryPlugin { +static class MergeSelectorCategoryPlugin + : public plugins::TypedCategoryPlugin { public: MergeSelectorCategoryPlugin() : TypedCategoryPlugin("MergeSelector") { document_synopsis( @@ -59,6 +60,5 @@ static class MergeSelectorCategoryPlugin : public plugins::TypedCategoryPlugin> compute_merge_candidates( const FactoredTransitionSystem &fts, const std::vector &indices_subset) const; diff --git a/src/search/merge_and_shrink/merge_selector_score_based_filtering.cc b/src/search/merge_and_shrink/merge_selector_score_based_filtering.cc index 663abf2343..22e6b5a5ec 100644 --- a/src/search/merge_and_shrink/merge_selector_score_based_filtering.cc +++ b/src/search/merge_and_shrink/merge_selector_score_based_filtering.cc @@ -43,8 +43,8 @@ pair MergeSelectorScoreBasedFiltering::select_merge( for (const shared_ptr &scoring_function : merge_scoring_functions) { - vector scores = scoring_function->compute_scores( - fts, merge_candidates); + vector scores = + scoring_function->compute_scores(fts, merge_candidates); merge_candidates = get_remaining_candidates(merge_candidates, scores); if (merge_candidates.size() == 1) { break; @@ -53,8 +53,9 @@ pair MergeSelectorScoreBasedFiltering::select_merge( if (merge_candidates.size() > 1) { cerr << "More than one merge candidate remained after computing all " - "scores! Did you forget to include a uniquely tie-breaking " - "scoring function, e.g. total_order or single_random?" << endl; + "scores! Did you forget to include a uniquely tie-breaking " + "scoring function, e.g. total_order or single_random?" + << endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } @@ -62,8 +63,8 @@ pair MergeSelectorScoreBasedFiltering::select_merge( } void MergeSelectorScoreBasedFiltering::initialize(const TaskProxy &task_proxy) { - for (shared_ptr &scoring_function - : merge_scoring_functions) { + for (shared_ptr &scoring_function : + merge_scoring_functions) { scoring_function->initialize(task_proxy); } } @@ -75,16 +76,16 @@ string MergeSelectorScoreBasedFiltering::name() const { void MergeSelectorScoreBasedFiltering::dump_selector_specific_options( utils::LogProxy &log) const { if (log.is_at_least_normal()) { - for (const shared_ptr &scoring_function - : merge_scoring_functions) { + for (const shared_ptr &scoring_function : + merge_scoring_functions) { scoring_function->dump_options(log); } } } bool MergeSelectorScoreBasedFiltering::requires_init_distances() const { - for (const shared_ptr &scoring_function - : merge_scoring_functions) { + for (const shared_ptr &scoring_function : + merge_scoring_functions) { if (scoring_function->requires_init_distances()) { return true; } @@ -93,8 +94,8 @@ bool MergeSelectorScoreBasedFiltering::requires_init_distances() const { } bool MergeSelectorScoreBasedFiltering::requires_goal_distances() const { - for (const shared_ptr &scoring_function - : merge_scoring_functions) { + for (const shared_ptr &scoring_function : + merge_scoring_functions) { if (scoring_function->requires_goal_distances()) { return true; } @@ -103,9 +104,11 @@ bool MergeSelectorScoreBasedFiltering::requires_goal_distances() const { } class MergeSelectorScoreBasedFilteringFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeSelector, MergeSelectorScoreBasedFiltering> { public: - MergeSelectorScoreBasedFilteringFeature() : TypedFeature("score_based_filtering") { + MergeSelectorScoreBasedFilteringFeature() + : TypedFeature("score_based_filtering") { document_title("Score based filtering merge selector"); document_synopsis( "This merge selector has a list of scoring functions, which are used " @@ -117,12 +120,11 @@ class MergeSelectorScoreBasedFilteringFeature "The list of scoring functions used to compute scores for candidates."); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return make_shared( opts.get_list>( - "scoring_functions") - ); + "scoring_functions")); } }; diff --git a/src/search/merge_and_shrink/merge_selector_score_based_filtering.h b/src/search/merge_and_shrink/merge_selector_score_based_filtering.h index 58009e3c8e..b314a338f9 100644 --- a/src/search/merge_and_shrink/merge_selector_score_based_filtering.h +++ b/src/search/merge_and_shrink/merge_selector_score_based_filtering.h @@ -16,13 +16,16 @@ class MergeSelectorScoreBasedFiltering : public MergeSelector { std::vector> merge_scoring_functions; protected: virtual std::string name() const override; - virtual void dump_selector_specific_options(utils::LogProxy &log) const override; + virtual void dump_selector_specific_options( + utils::LogProxy &log) const override; public: explicit MergeSelectorScoreBasedFiltering( - const std::vector> &scoring_functions); + const std::vector> + &scoring_functions); virtual std::pair select_merge( const FactoredTransitionSystem &fts, - const std::vector &indices_subset = std::vector()) const override; + const std::vector &indices_subset = + std::vector()) const override; virtual void initialize(const TaskProxy &task_proxy) override; virtual bool requires_init_distances() const override; virtual bool requires_goal_distances() const override; diff --git a/src/search/merge_and_shrink/merge_strategy.cc b/src/search/merge_and_shrink/merge_strategy.cc index c27019df24..dc24d49cee 100644 --- a/src/search/merge_and_shrink/merge_strategy.cc +++ b/src/search/merge_and_shrink/merge_strategy.cc @@ -3,8 +3,6 @@ using namespace std; namespace merge_and_shrink { -MergeStrategy::MergeStrategy( - const FactoredTransitionSystem &fts) - : fts(fts) { +MergeStrategy::MergeStrategy(const FactoredTransitionSystem &fts) : fts(fts) { } } diff --git a/src/search/merge_and_shrink/merge_strategy_factory.cc b/src/search/merge_and_shrink/merge_strategy_factory.cc index e3bf5083e8..9a7da72132 100644 --- a/src/search/merge_and_shrink/merge_strategy_factory.cc +++ b/src/search/merge_and_shrink/merge_strategy_factory.cc @@ -28,14 +28,14 @@ tuple get_merge_strategy_arguments_from_options( return utils::get_log_arguments_from_options(opts); } - -static class MergeStrategyFactoryCategoryPlugin : public plugins::TypedCategoryPlugin { +static class MergeStrategyFactoryCategoryPlugin + : public plugins::TypedCategoryPlugin { public: - MergeStrategyFactoryCategoryPlugin() : TypedCategoryPlugin("MergeStrategy") { + MergeStrategyFactoryCategoryPlugin() + : TypedCategoryPlugin("MergeStrategy") { document_synopsis( "This page describes the various merge strategies supported " "by the planner."); } -} -_category_plugin; +} _category_plugin; } diff --git a/src/search/merge_and_shrink/merge_strategy_factory.h b/src/search/merge_and_shrink/merge_strategy_factory.h index 9f1f5a93a2..906c661e8c 100644 --- a/src/search/merge_and_shrink/merge_strategy_factory.h +++ b/src/search/merge_and_shrink/merge_strategy_factory.h @@ -28,15 +28,13 @@ class MergeStrategyFactory { virtual ~MergeStrategyFactory() = default; void dump_options() const; virtual std::unique_ptr compute_merge_strategy( - const TaskProxy &task_proxy, - const FactoredTransitionSystem &fts) = 0; + const TaskProxy &task_proxy, const FactoredTransitionSystem &fts) = 0; virtual bool requires_init_distances() const = 0; virtual bool requires_goal_distances() const = 0; }; extern void add_merge_strategy_options_to_feature(plugins::Feature &feature); -extern std::tuple -get_merge_strategy_arguments_from_options( +extern std::tuple get_merge_strategy_arguments_from_options( const plugins::Options &opts); } diff --git a/src/search/merge_and_shrink/merge_strategy_factory_precomputed.cc b/src/search/merge_and_shrink/merge_strategy_factory_precomputed.cc index e142e4f874..3e62a67d9f 100644 --- a/src/search/merge_and_shrink/merge_strategy_factory_precomputed.cc +++ b/src/search/merge_and_shrink/merge_strategy_factory_precomputed.cc @@ -1,8 +1,8 @@ #include "merge_strategy_factory_precomputed.h" #include "merge_strategy_precomputed.h" -#include "merge_tree_factory.h" #include "merge_tree.h" +#include "merge_tree_factory.h" #include "../plugins/plugin.h" @@ -10,13 +10,12 @@ using namespace std; namespace merge_and_shrink { MergeStrategyFactoryPrecomputed::MergeStrategyFactoryPrecomputed( - const shared_ptr &merge_tree, - utils::Verbosity verbosity) - : MergeStrategyFactory(verbosity), - merge_tree_factory(merge_tree) { + const shared_ptr &merge_tree, utils::Verbosity verbosity) + : MergeStrategyFactory(verbosity), merge_tree_factory(merge_tree) { } -unique_ptr MergeStrategyFactoryPrecomputed::compute_merge_strategy( +unique_ptr +MergeStrategyFactoryPrecomputed::compute_merge_strategy( const TaskProxy &task_proxy, const FactoredTransitionSystem &fts) { unique_ptr merge_tree = merge_tree_factory->compute_merge_tree(task_proxy); @@ -42,9 +41,11 @@ void MergeStrategyFactoryPrecomputed::dump_strategy_specific_options() const { } class MergeStrategyFactoryPrecomputedFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeStrategyFactory, MergeStrategyFactoryPrecomputed> { public: - MergeStrategyFactoryPrecomputedFeature() : TypedFeature("merge_precomputed") { + MergeStrategyFactoryPrecomputedFeature() + : TypedFeature("merge_precomputed") { document_title("Precomputed merge strategy"); document_synopsis( "This merge strategy has a precomputed merge tree. Note that this " @@ -55,8 +56,7 @@ class MergeStrategyFactoryPrecomputedFeature "by the merge tree."); add_option>( - "merge_tree", - "The precomputed merge tree."); + "merge_tree", "The precomputed merge tree."); add_merge_strategy_options_to_feature(*this); document_note( @@ -67,12 +67,12 @@ class MergeStrategyFactoryPrecomputedFeature "merge_strategy=merge_precomputed(merge_tree=linear())" "\n}}}"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + MergeStrategyFactoryPrecomputed>( opts.get>("merge_tree"), - get_merge_strategy_arguments_from_options(opts) - ); + get_merge_strategy_arguments_from_options(opts)); } }; diff --git a/src/search/merge_and_shrink/merge_strategy_factory_sccs.cc b/src/search/merge_and_shrink/merge_strategy_factory_sccs.cc index f61bbf6294..f8ccc76734 100644 --- a/src/search/merge_and_shrink/merge_strategy_factory_sccs.cc +++ b/src/search/merge_and_shrink/merge_strategy_factory_sccs.cc @@ -1,7 +1,7 @@ #include "merge_strategy_factory_sccs.h" -#include "merge_strategy_sccs.h" #include "merge_selector.h" +#include "merge_strategy_sccs.h" #include "merge_tree_factory.h" #include "transition_system.h" @@ -21,18 +21,19 @@ using namespace std; namespace merge_and_shrink { -static bool compare_sccs_increasing(const vector &lhs, const vector &rhs) { +static bool compare_sccs_increasing( + const vector &lhs, const vector &rhs) { return lhs.size() < rhs.size(); } -static bool compare_sccs_decreasing(const vector &lhs, const vector &rhs) { +static bool compare_sccs_decreasing( + const vector &lhs, const vector &rhs) { return lhs.size() > rhs.size(); } MergeStrategyFactorySCCs::MergeStrategyFactorySCCs( const OrderOfSCCs &order_of_sccs, - const shared_ptr &merge_selector, - utils::Verbosity verbosity) + const shared_ptr &merge_selector, utils::Verbosity verbosity) : MergeStrategyFactory(verbosity), order_of_sccs(order_of_sccs), merge_selector(merge_selector) { @@ -96,9 +97,7 @@ unique_ptr MergeStrategyFactorySCCs::compute_merge_strategy( } return make_unique( - fts, - merge_selector, - move(non_singleton_cg_sccs)); + fts, merge_selector, move(non_singleton_cg_sccs)); } bool MergeStrategyFactorySCCs::requires_init_distances() const { @@ -138,21 +137,20 @@ string MergeStrategyFactorySCCs::name() const { } class MergeStrategyFactorySCCsFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeStrategyFactory, MergeStrategyFactorySCCs> { public: MergeStrategyFactorySCCsFeature() : TypedFeature("merge_sccs") { document_title("Merge strategy SCCs"); document_synopsis( - "This merge strategy implements the algorithm described in the paper " - + utils::format_conference_reference( + "This merge strategy implements the algorithm described in the paper " + + utils::format_conference_reference( {"Silvan Sievers", "Martin Wehrle", "Malte Helmert"}, "An Analysis of Merge Strategies for Merge-and-Shrink Heuristics", "https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf", "Proceedings of the 26th International Conference on Planning and " "Scheduling (ICAPS 2016)", - "2358-2366", - "AAAI Press", - "2016") + + "2358-2366", "AAAI Press", "2016") + "In a nutshell, it computes the maximal strongly connected " "components (SCCs) of the causal graph, " "obtaining a partitioning of the task's variables. Every such " @@ -163,37 +161,31 @@ class MergeStrategyFactorySCCsFeature "strategy and the configurable order of the SCCs."); add_option( - "order_of_sccs", - "how the SCCs should be ordered", - "topological"); + "order_of_sccs", "how the SCCs should be ordered", "topological"); add_option>( - "merge_selector", - "the fallback merge strategy to use"); + "merge_selector", "the fallback merge strategy to use"); add_merge_strategy_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get("order_of_sccs"), - opts.get> ("merge_selector"), - get_merge_strategy_arguments_from_options(opts) - ); + opts.get>("merge_selector"), + get_merge_strategy_arguments_from_options(opts)); } }; static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"topological", - "according to the topological ordering of the directed graph " - "where each obtained SCC is a 'supervertex'"}, - {"reverse_topological", - "according to the reverse topological ordering of the directed " - "graph where each obtained SCC is a 'supervertex'"}, - {"decreasing", - "biggest SCCs first, using 'topological' as tie-breaker"}, - {"increasing", - "smallest SCCs first, using 'topological' as tie-breaker"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"topological", + "according to the topological ordering of the directed graph " + "where each obtained SCC is a 'supervertex'"}, + {"reverse_topological", + "according to the reverse topological ordering of the directed " + "graph where each obtained SCC is a 'supervertex'"}, + {"decreasing", "biggest SCCs first, using 'topological' as tie-breaker"}, + {"increasing", + "smallest SCCs first, using 'topological' as tie-breaker"}}); } diff --git a/src/search/merge_and_shrink/merge_strategy_factory_stateless.cc b/src/search/merge_and_shrink/merge_strategy_factory_stateless.cc index dbc86cfd18..9b91b1ecd8 100644 --- a/src/search/merge_and_shrink/merge_strategy_factory_stateless.cc +++ b/src/search/merge_and_shrink/merge_strategy_factory_stateless.cc @@ -9,15 +9,12 @@ using namespace std; namespace merge_and_shrink { MergeStrategyFactoryStateless::MergeStrategyFactoryStateless( - const shared_ptr &merge_selector, - utils::Verbosity verbosity) - : MergeStrategyFactory(verbosity), - merge_selector(merge_selector) { + const shared_ptr &merge_selector, utils::Verbosity verbosity) + : MergeStrategyFactory(verbosity), merge_selector(merge_selector) { } unique_ptr MergeStrategyFactoryStateless::compute_merge_strategy( - const TaskProxy &task_proxy, - const FactoredTransitionSystem &fts) { + const TaskProxy &task_proxy, const FactoredTransitionSystem &fts) { merge_selector->initialize(task_proxy); return make_unique(fts, merge_selector); } @@ -41,7 +38,8 @@ bool MergeStrategyFactoryStateless::requires_goal_distances() const { } class MergeStrategyFactoryStatelessFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + MergeStrategyFactory, MergeStrategyFactoryStateless> { public: MergeStrategyFactoryStatelessFeature() : TypedFeature("merge_stateless") { document_title("Stateless merge strategy"); @@ -51,8 +49,7 @@ class MergeStrategyFactoryStatelessFeature "system, not requiring any additional information."); add_option>( - "merge_selector", - "The merge selector to be used."); + "merge_selector", "The merge selector to be used."); add_merge_strategy_options_to_feature(*this); document_note( @@ -70,12 +67,12 @@ class MergeStrategyFactoryStatelessFeature "\n}}}"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + MergeStrategyFactoryStateless>( opts.get>("merge_selector"), - get_merge_strategy_arguments_from_options(opts) - ); + get_merge_strategy_arguments_from_options(opts)); } }; diff --git a/src/search/merge_and_shrink/merge_strategy_sccs.cc b/src/search/merge_and_shrink/merge_strategy_sccs.cc index ee926119c6..9a39d61d1c 100644 --- a/src/search/merge_and_shrink/merge_strategy_sccs.cc +++ b/src/search/merge_and_shrink/merge_strategy_sccs.cc @@ -35,7 +35,7 @@ pair MergeStrategySCCs::get_next() { if (non_singleton_cg_sccs.empty()) { // We are done dealing with all SCCs, allow merging any factors. current_ts_indices.reserve(fts.get_num_active_entries()); - for (int ts_index: fts) { + for (int ts_index : fts) { current_ts_indices.push_back(ts_index); } } else { @@ -54,7 +54,8 @@ pair MergeStrategySCCs::get_next() { } // Select the next merge for the current set of indices. - pair next_pair = merge_selector->select_merge(fts, current_ts_indices); + pair next_pair = + merge_selector->select_merge(fts, current_ts_indices); // Remove the two merged indices from the current index set. for (vector::iterator it = current_ts_indices.begin(); diff --git a/src/search/merge_and_shrink/merge_strategy_stateless.cc b/src/search/merge_and_shrink/merge_strategy_stateless.cc index fe182c35c7..59bd10caba 100644 --- a/src/search/merge_and_shrink/merge_strategy_stateless.cc +++ b/src/search/merge_and_shrink/merge_strategy_stateless.cc @@ -8,8 +8,7 @@ namespace merge_and_shrink { MergeStrategyStateless::MergeStrategyStateless( const FactoredTransitionSystem &fts, const shared_ptr &merge_selector) - : MergeStrategy(fts), - merge_selector(merge_selector) { + : MergeStrategy(fts), merge_selector(merge_selector) { } pair MergeStrategyStateless::get_next() { diff --git a/src/search/merge_and_shrink/merge_tree.cc b/src/search/merge_and_shrink/merge_tree.cc index 42b6ee084a..6713e669bc 100644 --- a/src/search/merge_and_shrink/merge_tree.cc +++ b/src/search/merge_and_shrink/merge_tree.cc @@ -33,8 +33,7 @@ MergeTreeNode::MergeTreeNode(int ts_index) } MergeTreeNode::MergeTreeNode( - MergeTreeNode *left_child, - MergeTreeNode *right_child) + MergeTreeNode *left_child, MergeTreeNode *right_child) : parent(nullptr), left_child(left_child), right_child(right_child), @@ -84,7 +83,8 @@ MergeTreeNode *MergeTreeNode::get_parent_of_ts_index(int index) { return this; } - if (right_child && right_child->is_leaf() && right_child->ts_index == index) { + if (right_child && right_child->is_leaf() && + right_child->ts_index == index) { return this; } @@ -108,10 +108,12 @@ int MergeTreeNode::compute_num_internal_nodes() const { } else { int number_of_internal_nodes = 1; // count the node itself if (left_child) { - number_of_internal_nodes += left_child->compute_num_internal_nodes(); + number_of_internal_nodes += + left_child->compute_num_internal_nodes(); } if (right_child) { - number_of_internal_nodes += right_child->compute_num_internal_nodes(); + number_of_internal_nodes += + right_child->compute_num_internal_nodes(); } return number_of_internal_nodes; } @@ -132,8 +134,7 @@ void MergeTreeNode::inorder( } MergeTree::MergeTree( - MergeTreeNode *root, - const shared_ptr &rng, + MergeTreeNode *root, const shared_ptr &rng, UpdateOption update_option) : root(root), rng(rng), update_option(update_option) { } @@ -159,7 +160,8 @@ pair MergeTree::get_parents_of_ts_indices( int found_indices = 0; while (!copy->is_leaf()) { MergeTreeNode *next_merge = copy->get_left_most_sibling(); - pair merge = next_merge->erase_children_and_set_index(new_index); + pair merge = + next_merge->erase_children_and_set_index(new_index); if (merge.first == ts_index1 || merge.second == ts_index1) { ++found_indices; } @@ -221,8 +223,9 @@ void MergeTree::update(pair merge, int new_index) { surviving_node->left_child->ts_index == ts_index2) { surviving_leaf = surviving_node->left_child; } else { - assert(surviving_node->right_child->ts_index == ts_index1 || - surviving_node->right_child->ts_index == ts_index2); + assert( + surviving_node->right_child->ts_index == ts_index1 || + surviving_node->right_child->ts_index == ts_index2); surviving_leaf = surviving_node->right_child; } surviving_leaf->ts_index = new_index; @@ -253,8 +256,9 @@ void MergeTree::update(pair merge, int new_index) { removed_node->right_child = nullptr; } else { - assert(removed_node->right_child->ts_index == ts_index1 || - removed_node->right_child->ts_index == ts_index2); + assert( + removed_node->right_child->ts_index == ts_index1 || + removed_node->right_child->ts_index == ts_index2); surviving_child_of_removed_node = removed_node->left_child; removed_node->left_child = nullptr; } @@ -264,7 +268,7 @@ void MergeTree::update(pair merge, int new_index) { } // Finally delete removed_node (this also deletes its child - //corresponding to one of the merged indices, but not the other one). + // corresponding to one of the merged indices, but not the other one). delete removed_node; removed_node = nullptr; @@ -275,10 +279,12 @@ void MergeTree::update(pair merge, int new_index) { // parent_of_removed_node can be nullptr if removed_node // was the root node if (!parent_of_removed_node->left_child) { - parent_of_removed_node->left_child = surviving_child_of_removed_node; + parent_of_removed_node->left_child = + surviving_child_of_removed_node; } else { assert(!parent_of_removed_node->right_child); - parent_of_removed_node->right_child = surviving_child_of_removed_node; + parent_of_removed_node->right_child = + surviving_child_of_removed_node; } } } @@ -286,8 +292,7 @@ void MergeTree::update(pair merge, int new_index) { void MergeTree::inorder_traversal( int indentation_offset, utils::LogProxy &log) const { - log << "Merge tree, read from left to right (90° rotated tree): " - << endl; + log << "Merge tree, read from left to right (90° rotated tree): " << endl; return root->inorder(indentation_offset, 0, log); } } diff --git a/src/search/merge_and_shrink/merge_tree.h b/src/search/merge_and_shrink/merge_tree.h index 1b6db5d2a7..25cd5cabb5 100644 --- a/src/search/merge_and_shrink/merge_tree.h +++ b/src/search/merge_and_shrink/merge_tree.h @@ -35,15 +35,16 @@ struct MergeTreeNode { // Find the parent node for the given index. MergeTreeNode *get_parent_of_ts_index(int index); int compute_num_internal_nodes() const; - void inorder(int offset, int current_indentation, utils::LogProxy &log) const; + void inorder( + int offset, int current_indentation, utils::LogProxy &log) const; bool is_leaf() const { return !left_child && !right_child; } bool has_two_leaf_children() const { - return left_child && right_child && - left_child->is_leaf() && right_child->is_leaf(); + return left_child && right_child && left_child->is_leaf() && + right_child->is_leaf(); } }; diff --git a/src/search/merge_and_shrink/merge_tree_factory.cc b/src/search/merge_and_shrink/merge_tree_factory.cc index 93acfc06ff..acb4e8967c 100644 --- a/src/search/merge_and_shrink/merge_tree_factory.cc +++ b/src/search/merge_and_shrink/merge_tree_factory.cc @@ -12,10 +12,8 @@ using namespace std; namespace merge_and_shrink { -MergeTreeFactory::MergeTreeFactory( - int random_seed, UpdateOption update_option) - : rng(utils::get_rng(random_seed)), - update_option(update_option) { +MergeTreeFactory::MergeTreeFactory(int random_seed, UpdateOption update_option) + : rng(utils::get_rng(random_seed)), update_option(update_option) { } void MergeTreeFactory::dump_options(utils::LogProxy &log) const { @@ -38,11 +36,10 @@ void MergeTreeFactory::dump_options(utils::LogProxy &log) const { } unique_ptr MergeTreeFactory::compute_merge_tree( - const TaskProxy &, - const FactoredTransitionSystem &, - const vector &) { + const TaskProxy &, const FactoredTransitionSystem &, const vector &) { cerr << "This merge tree does not support being computed on a subset " - "of indices for a given factored transition system!" << endl; + "of indices for a given factored transition system!" + << endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } @@ -60,11 +57,11 @@ tuple get_merge_tree_arguments_from_options( const plugins::Options &opts) { return tuple_cat( utils::get_rng_arguments_from_options(opts), - make_tuple(opts.get("update_option")) - ); + make_tuple(opts.get("update_option"))); } -static class MergeTreeFactoryCategoryPlugin : public plugins::TypedCategoryPlugin { +static class MergeTreeFactoryCategoryPlugin + : public plugins::TypedCategoryPlugin { public: MergeTreeFactoryCategoryPlugin() : TypedCategoryPlugin("MergeTree") { document_synopsis( @@ -75,15 +72,12 @@ static class MergeTreeFactoryCategoryPlugin : public plugins::TypedCategoryPlugi "'precomputed', but they can also be used as fallback merge strategies in " "'combined' merge strategies."); } -} -_category_plugin; +} _category_plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"use_first", - "the node representing the index that would have been merged earlier survives"}, - {"use_second", - "the node representing the index that would have been merged later survives"}, - {"use_random", - "a random node (of the above two) survives"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"use_first", + "the node representing the index that would have been merged earlier survives"}, + {"use_second", + "the node representing the index that would have been merged later survives"}, + {"use_random", "a random node (of the above two) survives"}}); } diff --git a/src/search/merge_and_shrink/merge_tree_factory.h b/src/search/merge_and_shrink/merge_tree_factory.h index cf6c3b896f..c061b680a7 100644 --- a/src/search/merge_and_shrink/merge_tree_factory.h +++ b/src/search/merge_and_shrink/merge_tree_factory.h @@ -27,7 +27,8 @@ class MergeTreeFactory { std::shared_ptr rng; UpdateOption update_option; virtual std::string name() const = 0; - virtual void dump_tree_specific_options(utils::LogProxy &) const {} + virtual void dump_tree_specific_options(utils::LogProxy &) const { + } public: MergeTreeFactory(int random_seed, UpdateOption update_option); virtual ~MergeTreeFactory() = default; @@ -38,18 +39,16 @@ class MergeTreeFactory { /* Compute a merge tree for the given current factored transition, system, possibly for a subset of indices. */ virtual std::unique_ptr compute_merge_tree( - const TaskProxy &task_proxy, - const FactoredTransitionSystem &fts, + const TaskProxy &task_proxy, const FactoredTransitionSystem &fts, const std::vector &indices_subset); virtual bool requires_init_distances() const = 0; virtual bool requires_goal_distances() const = 0; }; // Derived classes must call this method in their parsing methods. -extern void add_merge_tree_options_to_feature( - plugins::Feature &feature); -extern std::tuple -get_merge_tree_arguments_from_options(const plugins::Options &opts); +extern void add_merge_tree_options_to_feature(plugins::Feature &feature); +extern std::tuple get_merge_tree_arguments_from_options( + const plugins::Options &opts); } #endif diff --git a/src/search/merge_and_shrink/merge_tree_factory_linear.cc b/src/search/merge_and_shrink/merge_tree_factory_linear.cc index b136099e2c..5d3f171288 100644 --- a/src/search/merge_and_shrink/merge_tree_factory_linear.cc +++ b/src/search/merge_and_shrink/merge_tree_factory_linear.cc @@ -18,15 +18,16 @@ using namespace std; namespace merge_and_shrink { MergeTreeFactoryLinear::MergeTreeFactoryLinear( - variable_order_finder::VariableOrderType variable_order, - int random_seed, UpdateOption update_option) + variable_order_finder::VariableOrderType variable_order, int random_seed, + UpdateOption update_option) : MergeTreeFactory(random_seed, update_option), variable_order_type(variable_order) { } unique_ptr MergeTreeFactoryLinear::compute_merge_tree( const TaskProxy &task_proxy) { - variable_order_finder::VariableOrderFinder vof(task_proxy, variable_order_type, rng); + variable_order_finder::VariableOrderFinder vof( + task_proxy, variable_order_type, rng); MergeTreeNode *root = new MergeTreeNode(vof.next()); while (!vof.done()) { MergeTreeNode *right_child = new MergeTreeNode(vof.next()); @@ -36,8 +37,7 @@ unique_ptr MergeTreeFactoryLinear::compute_merge_tree( } unique_ptr MergeTreeFactoryLinear::compute_merge_tree( - const TaskProxy &task_proxy, - const FactoredTransitionSystem &fts, + const TaskProxy &task_proxy, const FactoredTransitionSystem &fts, const vector &indices_subset) { /* Compute a mapping from state variables to transition system indices @@ -50,8 +50,8 @@ unique_ptr MergeTreeFactoryLinear::compute_merge_tree( vector used_ts_indices(num_ts, true); for (int ts_index : fts) { bool use_ts_index = - find(indices_subset.begin(), indices_subset.end(), - ts_index) != indices_subset.end(); + find(indices_subset.begin(), indices_subset.end(), ts_index) != + indices_subset.end(); if (use_ts_index) { used_ts_indices[ts_index] = false; } @@ -68,7 +68,8 @@ unique_ptr MergeTreeFactoryLinear::compute_merge_tree( skipping all indices not in indices_subset, because these have been set to "used" above. */ - variable_order_finder::VariableOrderFinder vof(task_proxy, variable_order_type, rng); + variable_order_finder::VariableOrderFinder vof( + task_proxy, variable_order_type, rng); int next_var = vof.next(); int ts_index = var_to_ts_index[next_var]; @@ -100,7 +101,8 @@ string MergeTreeFactoryLinear::name() const { return "linear"; } -void MergeTreeFactoryLinear::dump_tree_specific_options(utils::LogProxy &log) const { +void MergeTreeFactoryLinear::dump_tree_specific_options( + utils::LogProxy &log) const { if (log.is_at_least_normal()) { dump_variable_order_type(variable_order_type, log); } @@ -121,49 +123,46 @@ class MergeTreeFactoryLinearFeature document_title("Linear merge trees"); document_synopsis( "These merge trees implement several linear merge orders, which " - "are described in the paper:" + utils::format_conference_reference( + "are described in the paper:" + + utils::format_conference_reference( {"Malte Helmert", "Patrik Haslum", "Joerg Hoffmann"}, "Flexible Abstraction Heuristics for Optimal Sequential Planning", "https://ai.dmi.unibas.ch/papers/helmert-et-al-icaps2007.pdf", "Proceedings of the Seventeenth International Conference on" " Automated Planning and Scheduling (ICAPS 2007)", - "176-183", - "AAAI Press", - "2007")); + "176-183", "AAAI Press", "2007")); MergeTreeFactoryLinear::add_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get( "variable_order"), - get_merge_tree_arguments_from_options(opts) - ); + get_merge_tree_arguments_from_options(opts)); } }; static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"cg_goal_level", - "variables are prioritized first if they have an arc to a previously " - "added variable, second if their goal value is defined " - "and third according to their level in the causal graph"}, - {"cg_goal_random", - "variables are prioritized first if they have an arc to a previously " - "added variable, second if their goal value is defined " - "and third randomly"}, - {"goal_cg_level", - "variables are prioritized first if their goal value is defined, " - "second if they have an arc to a previously added variable, " - "and third according to their level in the causal graph"}, - {"random", - "variables are ordered randomly"}, - {"level", - "variables are ordered according to their level in the causal graph"}, - {"reverse_level", - "variables are ordered reverse to their level in the causal graph"} - }); +static plugins::TypedEnumPlugin + _enum_plugin( + {{"cg_goal_level", + "variables are prioritized first if they have an arc to a previously " + "added variable, second if their goal value is defined " + "and third according to their level in the causal graph"}, + {"cg_goal_random", + "variables are prioritized first if they have an arc to a previously " + "added variable, second if their goal value is defined " + "and third randomly"}, + {"goal_cg_level", + "variables are prioritized first if their goal value is defined, " + "second if they have an arc to a previously added variable, " + "and third according to their level in the causal graph"}, + {"random", "variables are ordered randomly"}, + {"level", + "variables are ordered according to their level in the causal graph"}, + {"reverse_level", + "variables are ordered reverse to their level in the causal graph"}}); } diff --git a/src/search/merge_and_shrink/merge_tree_factory_linear.h b/src/search/merge_and_shrink/merge_tree_factory_linear.h index 516ba2e2d2..2389ab000f 100644 --- a/src/search/merge_and_shrink/merge_tree_factory_linear.h +++ b/src/search/merge_and_shrink/merge_tree_factory_linear.h @@ -15,7 +15,8 @@ class MergeTreeFactoryLinear : public MergeTreeFactory { std::shared_ptr rng; protected: virtual std::string name() const override; - virtual void dump_tree_specific_options(utils::LogProxy &log) const override; + virtual void dump_tree_specific_options( + utils::LogProxy &log) const override; public: MergeTreeFactoryLinear( variable_order_finder::VariableOrderType variable_order, @@ -23,8 +24,7 @@ class MergeTreeFactoryLinear : public MergeTreeFactory { virtual std::unique_ptr compute_merge_tree( const TaskProxy &task_proxy) override; virtual std::unique_ptr compute_merge_tree( - const TaskProxy &task_proxy, - const FactoredTransitionSystem &fts, + const TaskProxy &task_proxy, const FactoredTransitionSystem &fts, const std::vector &indices_subset) override; virtual bool requires_init_distances() const override { diff --git a/src/search/merge_and_shrink/shrink_bisimulation.cc b/src/search/merge_and_shrink/shrink_bisimulation.cc index ba7ad4ab9a..ef41f8d832 100644 --- a/src/search/merge_and_shrink/shrink_bisimulation.cc +++ b/src/search/merge_and_shrink/shrink_bisimulation.cc @@ -12,8 +12,8 @@ #include #include -#include #include +#include #include #include @@ -52,9 +52,9 @@ struct Signature { SuccessorSignature succ_signature; int state; - Signature(int h, bool is_goal, int group_, - const SuccessorSignature &succ_signature_, - int state_) + Signature( + int h, bool is_goal, int group_, + const SuccessorSignature &succ_signature_, int state_) : group(group_), succ_signature(succ_signature_), state(state_) { if (is_goal) { assert(h == 0); @@ -77,30 +77,25 @@ struct Signature { void dump(utils::LogProxy &log) const { if (log.is_at_least_debug()) { log << "Signature(h_and_goal = " << h_and_goal - << ", group = " << group - << ", state = " << state + << ", group = " << group << ", state = " << state << ", succ_sig = ["; for (size_t i = 0; i < succ_signature.size(); ++i) { if (i) log << ", "; - log << "(" << succ_signature[i].first - << "," << succ_signature[i].second - << ")"; + log << "(" << succ_signature[i].first << "," + << succ_signature[i].second << ")"; } log << "])" << endl; } } }; - ShrinkBisimulation::ShrinkBisimulation(bool greedy, AtLimit at_limit) - : greedy(greedy), - at_limit(at_limit) { + : greedy(greedy), at_limit(at_limit) { } int ShrinkBisimulation::initialize_groups( - const TransitionSystem &ts, - const Distances &distances, + const TransitionSystem &ts, const Distances &distances, vector &state_to_group) const { /* Group 0 holds all goal states. @@ -124,8 +119,8 @@ int ShrinkBisimulation::initialize_groups( assert(h == 0); state_to_group[state] = 0; } else { - pair result = h_to_group.insert( - make_pair(h, num_groups)); + pair result = + h_to_group.insert(make_pair(h, num_groups)); state_to_group[state] = result.first->second; if (result.second) { // We inserted a new element => a new group was started. @@ -137,10 +132,8 @@ int ShrinkBisimulation::initialize_groups( } void ShrinkBisimulation::compute_signatures( - const TransitionSystem &ts, - const Distances &distances, - vector &signatures, - const vector &state_to_group) const { + const TransitionSystem &ts, const Distances &distances, + vector &signatures, const vector &state_to_group) const { assert(signatures.empty()); // Step 1: Compute bare state signatures (without transition information). @@ -150,12 +143,13 @@ void ShrinkBisimulation::compute_signatures( if (h == INF) { h = IRRELEVANT; } - Signature signature(h, ts.is_goal_state(state), - state_to_group[state], SuccessorSignature(), - state); + Signature signature( + h, ts.is_goal_state(state), state_to_group[state], + SuccessorSignature(), state); signatures.push_back(signature); } - signatures.push_back(Signature(SENTINEL, false, -1, SuccessorSignature(), -1)); + signatures.push_back( + Signature(SENTINEL, false, -1, SuccessorSignature(), -1)); // Step 2: Add transition information. int label_group_counter = 0; @@ -184,7 +178,8 @@ void ShrinkBisimulation::compute_signatures( label_reduction=exact(before_shrinking=true,before_merging=false))) */ for (const LocalLabelInfo &local_label_info : ts) { - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); for (const Transition &transition : transitions) { assert(signatures[transition.src + 1].state == transition.src); bool skip_transition = false; @@ -228,17 +223,15 @@ void ShrinkBisimulation::compute_signatures( for (size_t i = 0; i < signatures.size(); ++i) { SuccessorSignature &succ_sig = signatures[i].succ_signature; ::sort(succ_sig.begin(), succ_sig.end()); - succ_sig.erase(::unique(succ_sig.begin(), succ_sig.end()), - succ_sig.end()); + succ_sig.erase( + ::unique(succ_sig.begin(), succ_sig.end()), succ_sig.end()); } ::sort(signatures.begin(), signatures.end()); } StateEquivalenceRelation ShrinkBisimulation::compute_equivalence_relation( - const TransitionSystem &ts, - const Distances &distances, - int target_size, + const TransitionSystem &ts, const Distances &distances, int target_size, utils::LogProxy &) const { assert(distances.are_goal_distances_computed()); int num_states = ts.get_size(); @@ -320,8 +313,8 @@ StateEquivalenceRelation ShrinkBisimulation::compute_equivalence_relation( if (prev_sig.group != curr_sig.group) { // Start first group of a block; keep old group no. new_group_no = curr_sig.group; - } else if (prev_sig.succ_signature - != curr_sig.succ_signature) { + } else if ( + prev_sig.succ_signature != curr_sig.succ_signature) { new_group_no = num_groups++; assert(num_groups <= target_size); } @@ -361,7 +354,8 @@ string ShrinkBisimulation::name() const { return "bisimulation"; } -void ShrinkBisimulation::dump_strategy_specific_options(utils::LogProxy &log) const { +void ShrinkBisimulation::dump_strategy_specific_options( + utils::LogProxy &log) const { if (log.is_at_least_normal()) { log << "Bisimulation type: " << (greedy ? "greedy" : "exact") << endl; log << "At limit: "; @@ -383,21 +377,19 @@ class ShrinkBisimulationFeature document_title("Bismulation based shrink strategy"); document_synopsis( "This shrink strategy implements the algorithm described in" - " the paper:" + utils::format_conference_reference( + " the paper:" + + utils::format_conference_reference( {"Raz Nissim", "Joerg Hoffmann", "Malte Helmert"}, "Computing Perfect Heuristics in Polynomial Time: On Bisimulation" " and Merge-and-Shrink Abstractions in Optimal Planning.", "https://ai.dmi.unibas.ch/papers/nissim-et-al-ijcai2011.pdf", "Proceedings of the Twenty-Second International Joint Conference" " on Artificial Intelligence (IJCAI 2011)", - "1983-1990", - "AAAI Press", - "2011")); + "1983-1990", "AAAI Press", "2011")); add_option("greedy", "use greedy bisimulation", "false"); add_option( - "at_limit", - "what to do when the size limit is hit", "return"); + "at_limit", "what to do when the size limit is hit", "return"); document_note( "shrink_bisimulation(greedy=true)", @@ -422,22 +414,17 @@ class ShrinkBisimulationFeature "merging)."); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return make_shared( - opts.get("greedy"), - opts.get("at_limit") - ); + opts.get("greedy"), opts.get("at_limit")); } }; static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"return", - "stop without refining the equivalence class further"}, - {"use_up", - "continue refining the equivalence class until " - "the size limit is hit"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"return", "stop without refining the equivalence class further"}, + {"use_up", "continue refining the equivalence class until " + "the size limit is hit"}}); } diff --git a/src/search/merge_and_shrink/shrink_bisimulation.h b/src/search/merge_and_shrink/shrink_bisimulation.h index 58a023ec44..e69c478d32 100644 --- a/src/search/merge_and_shrink/shrink_bisimulation.h +++ b/src/search/merge_and_shrink/shrink_bisimulation.h @@ -16,30 +16,25 @@ class ShrinkBisimulation : public ShrinkStrategy { const AtLimit at_limit; void compute_abstraction( - const TransitionSystem &ts, - const Distances &distances, - int target_size, + const TransitionSystem &ts, const Distances &distances, int target_size, StateEquivalenceRelation &equivalence_relation) const; int initialize_groups( - const TransitionSystem &ts, - const Distances &distances, + const TransitionSystem &ts, const Distances &distances, std::vector &state_to_group) const; void compute_signatures( - const TransitionSystem &ts, - const Distances &distances, + const TransitionSystem &ts, const Distances &distances, std::vector &signatures, const std::vector &state_to_group) const; protected: - virtual void dump_strategy_specific_options(utils::LogProxy &log) const override; + virtual void dump_strategy_specific_options( + utils::LogProxy &log) const override; virtual std::string name() const override; public: explicit ShrinkBisimulation(bool greedy, AtLimit at_limit); virtual StateEquivalenceRelation compute_equivalence_relation( - const TransitionSystem &ts, - const Distances &distances, - int target_size, + const TransitionSystem &ts, const Distances &distances, int target_size, utils::LogProxy &log) const override; virtual bool requires_init_distances() const override { diff --git a/src/search/merge_and_shrink/shrink_bucket_based.cc b/src/search/merge_and_shrink/shrink_bucket_based.cc index 0465f23c53..ab21e8de0d 100644 --- a/src/search/merge_and_shrink/shrink_bucket_based.cc +++ b/src/search/merge_and_shrink/shrink_bucket_based.cc @@ -17,7 +17,8 @@ ShrinkBucketBased::ShrinkBucketBased(int random_seed) } StateEquivalenceRelation ShrinkBucketBased::compute_abstraction( - const vector &buckets, int target_size, utils::LogProxy &log) const { + const vector &buckets, int target_size, + utils::LogProxy &log) const { bool show_combine_buckets_warning = true; StateEquivalenceRelation equiv_relation; equiv_relation.reserve(target_size); @@ -55,7 +56,8 @@ StateEquivalenceRelation ShrinkBucketBased::compute_abstraction( } } StateEquivalenceClass &group = equiv_relation.back(); - group.insert_after(group.before_begin(), bucket.begin(), bucket.end()); + group.insert_after( + group.before_begin(), bucket.begin(), bucket.end()); } else { // Complicated case: must combine until bucket budget is met. // First create singleton groups. @@ -64,8 +66,9 @@ StateEquivalenceRelation ShrinkBucketBased::compute_abstraction( groups[i].push_front(bucket[i]); // Then combine groups until required size is reached. - assert(budget_for_this_bucket >= 2 && - budget_for_this_bucket < static_cast(groups.size())); + assert( + budget_for_this_bucket >= 2 && + budget_for_this_bucket < static_cast(groups.size())); while (static_cast(groups.size()) > budget_for_this_bucket) { auto it1 = rng->choose(groups); auto it2 = it1; @@ -89,9 +92,7 @@ StateEquivalenceRelation ShrinkBucketBased::compute_abstraction( } StateEquivalenceRelation ShrinkBucketBased::compute_equivalence_relation( - const TransitionSystem &ts, - const Distances &distances, - int target_size, + const TransitionSystem &ts, const Distances &distances, int target_size, utils::LogProxy &log) const { vector buckets = partition_into_buckets(ts, distances); return compute_abstraction(buckets, target_size, log); diff --git a/src/search/merge_and_shrink/shrink_bucket_based.h b/src/search/merge_and_shrink/shrink_bucket_based.h index 46caaf74ae..827bebcf7b 100644 --- a/src/search/merge_and_shrink/shrink_bucket_based.h +++ b/src/search/merge_and_shrink/shrink_bucket_based.h @@ -42,25 +42,20 @@ class ShrinkBucketBased : public ShrinkStrategy { private: StateEquivalenceRelation compute_abstraction( - const std::vector &buckets, - int target_size, + const std::vector &buckets, int target_size, utils::LogProxy &log) const; protected: virtual std::vector partition_into_buckets( - const TransitionSystem &ts, - const Distances &Distances) const = 0; + const TransitionSystem &ts, const Distances &Distances) const = 0; public: explicit ShrinkBucketBased(int random_seed); virtual StateEquivalenceRelation compute_equivalence_relation( - const TransitionSystem &ts, - const Distances &distances, - int target_size, + const TransitionSystem &ts, const Distances &distances, int target_size, utils::LogProxy &log) const override; }; -extern void add_shrink_bucket_options_to_feature( - plugins::Feature &feature); +extern void add_shrink_bucket_options_to_feature(plugins::Feature &feature); extern std::tuple get_shrink_bucket_arguments_from_options( const plugins::Options &opts); } diff --git a/src/search/merge_and_shrink/shrink_fh.cc b/src/search/merge_and_shrink/shrink_fh.cc index 89a5683abb..9ed32209a5 100644 --- a/src/search/merge_and_shrink/shrink_fh.cc +++ b/src/search/merge_and_shrink/shrink_fh.cc @@ -20,14 +20,11 @@ using namespace std; namespace merge_and_shrink { ShrinkFH::ShrinkFH(HighLow shrink_f, HighLow shrink_h, int random_seed) - : ShrinkBucketBased(random_seed), - f_start(shrink_f), - h_start(shrink_h) { + : ShrinkBucketBased(random_seed), f_start(shrink_f), h_start(shrink_h) { } vector ShrinkFH::partition_into_buckets( - const TransitionSystem &ts, - const Distances &distances) const { + const TransitionSystem &ts, const Distances &distances) const { assert(distances.are_init_distances_computed()); assert(distances.are_goal_distances_computed()); int max_h = 0; @@ -66,9 +63,8 @@ vector ShrinkFH::partition_into_buckets( // Helper function for ordered_buckets_use_map. template -static void collect_h_buckets( - HIterator begin, HIterator end, - vector &buckets) { +static void +collect_h_buckets(HIterator begin, HIterator end, vector &buckets) { for (HIterator iter = begin; iter != end; ++iter) { Bucket &bucket = iter->second; assert(!bucket.empty()); @@ -80,23 +76,21 @@ static void collect_h_buckets( // Helper function for ordered_buckets_use_map. template static void collect_f_h_buckets( - FHIterator begin, FHIterator end, - ShrinkFH::HighLow h_start, + FHIterator begin, FHIterator end, ShrinkFH::HighLow h_start, vector &buckets) { for (FHIterator iter = begin; iter != end; ++iter) { if (h_start == ShrinkFH::HighLow::HIGH) { - collect_h_buckets(iter->second.rbegin(), iter->second.rend(), - buckets); + collect_h_buckets( + iter->second.rbegin(), iter->second.rend(), buckets); } else { - collect_h_buckets(iter->second.begin(), iter->second.end(), - buckets); + collect_h_buckets( + iter->second.begin(), iter->second.end(), buckets); } } } vector ShrinkFH::ordered_buckets_use_map( - const TransitionSystem &ts, - const Distances &distances) const { + const TransitionSystem &ts, const Distances &distances) const { map> states_by_f_and_h; int bucket_count = 0; int num_states = ts.get_size(); @@ -119,21 +113,19 @@ vector ShrinkFH::ordered_buckets_use_map( buckets.reserve(bucket_count); if (f_start == HighLow::HIGH) { collect_f_h_buckets( - states_by_f_and_h.rbegin(), states_by_f_and_h.rend(), - h_start, buckets); + states_by_f_and_h.rbegin(), states_by_f_and_h.rend(), h_start, + buckets); } else { collect_f_h_buckets( - states_by_f_and_h.begin(), states_by_f_and_h.end(), - h_start, buckets); + states_by_f_and_h.begin(), states_by_f_and_h.end(), h_start, + buckets); } assert(static_cast(buckets.size()) == bucket_count); return buckets; } vector ShrinkFH::ordered_buckets_use_vector( - const TransitionSystem &ts, - const Distances &distances, - int max_f, + const TransitionSystem &ts, const Distances &distances, int max_f, int max_h) const { vector> states_by_f_and_h; states_by_f_and_h.resize(max_f + 1); @@ -161,8 +153,10 @@ vector ShrinkFH::ordered_buckets_use_vector( int f_end = (f_start == HighLow::HIGH ? 0 : max_f); int f_incr = (f_init > f_end ? -1 : 1); for (int f = f_init; f != f_end + f_incr; f += f_incr) { - int h_init = (h_start == HighLow::HIGH ? states_by_f_and_h[f].size() - 1 : 0); - int h_end = (h_start == HighLow::HIGH ? 0 : states_by_f_and_h[f].size() - 1); + int h_init = + (h_start == HighLow::HIGH ? states_by_f_and_h[f].size() - 1 : 0); + int h_end = + (h_start == HighLow::HIGH ? 0 : states_by_f_and_h[f].size() - 1); int h_incr = (h_init > h_end ? -1 : 1); for (int h = h_init; h != h_end + h_incr; h += h_incr) { Bucket &bucket = states_by_f_and_h[f][h]; @@ -189,22 +183,20 @@ void ShrinkFH::dump_strategy_specific_options(utils::LogProxy &log) const { } } -class ShrinkFHFeature - : public plugins::TypedFeature { +class ShrinkFHFeature : public plugins::TypedFeature { public: ShrinkFHFeature() : TypedFeature("shrink_fh") { document_title("f-preserving shrink strategy"); document_synopsis( "This shrink strategy implements the algorithm described in" - " the paper:" + utils::format_conference_reference( + " the paper:" + + utils::format_conference_reference( {"Malte Helmert", "Patrik Haslum", "Joerg Hoffmann"}, "Flexible Abstraction Heuristics for Optimal Sequential Planning", "https://ai.dmi.unibas.ch/papers/helmert-et-al-icaps2007.pdf", "Proceedings of the Seventeenth International Conference on" " Automated Planning and Scheduling (ICAPS 2007)", - "176-183", - "AAAI Press", - "2007")); + "176-183", "AAAI Press", "2007")); add_option( "shrink_f", @@ -212,8 +204,7 @@ class ShrinkFHFeature "high"); add_option( "shrink_h", - "in which direction the h based shrink priority is ordered", - "low"); + "in which direction the h based shrink priority is ordered", "low"); add_shrink_bucket_options_to_feature(*this); document_note( @@ -244,22 +235,18 @@ class ShrinkFHFeature "vector-based approach."); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get("shrink_f"), opts.get("shrink_h"), - get_shrink_bucket_arguments_from_options(opts) - ); + get_shrink_bucket_arguments_from_options(opts)); } }; static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"high", - "prefer shrinking states with high value"}, - {"low", - "prefer shrinking states with low value"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"high", "prefer shrinking states with high value"}, + {"low", "prefer shrinking states with low value"}}); } diff --git a/src/search/merge_and_shrink/shrink_fh.h b/src/search/merge_and_shrink/shrink_fh.h index 5ebaac8e1b..d93d2ba9e6 100644 --- a/src/search/merge_and_shrink/shrink_fh.h +++ b/src/search/merge_and_shrink/shrink_fh.h @@ -23,27 +23,27 @@ namespace merge_and_shrink { */ class ShrinkFH : public ShrinkBucketBased { public: - enum class HighLow {HIGH, LOW}; + enum class HighLow { + HIGH, + LOW + }; private: const HighLow f_start; const HighLow h_start; std::vector ordered_buckets_use_vector( - const TransitionSystem &ts, - const Distances &distances, - int max_f, + const TransitionSystem &ts, const Distances &distances, int max_f, int max_h) const; std::vector ordered_buckets_use_map( - const TransitionSystem &ts, - const Distances &distances) const; + const TransitionSystem &ts, const Distances &distances) const; protected: virtual std::string name() const override; - virtual void dump_strategy_specific_options(utils::LogProxy &log) const override; + virtual void dump_strategy_specific_options( + utils::LogProxy &log) const override; virtual std::vector partition_into_buckets( - const TransitionSystem &ts, - const Distances &distances) const override; + const TransitionSystem &ts, const Distances &distances) const override; public: ShrinkFH(HighLow shrink_f, HighLow shrink_h, int random_seed); diff --git a/src/search/merge_and_shrink/shrink_random.cc b/src/search/merge_and_shrink/shrink_random.cc index 911be792cf..3f751daf60 100644 --- a/src/search/merge_and_shrink/shrink_random.cc +++ b/src/search/merge_and_shrink/shrink_random.cc @@ -11,13 +11,11 @@ using namespace std; namespace merge_and_shrink { -ShrinkRandom::ShrinkRandom(int random_seed) - : ShrinkBucketBased(random_seed) { +ShrinkRandom::ShrinkRandom(int random_seed) : ShrinkBucketBased(random_seed) { } vector ShrinkRandom::partition_into_buckets( - const TransitionSystem &ts, - const Distances &) const { + const TransitionSystem &ts, const Distances &) const { vector buckets; buckets.resize(1); Bucket &big_bucket = buckets.back(); @@ -43,11 +41,10 @@ class ShrinkRandomFeature add_shrink_bucket_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - get_shrink_bucket_arguments_from_options(opts) - ); + get_shrink_bucket_arguments_from_options(opts)); } }; diff --git a/src/search/merge_and_shrink/shrink_random.h b/src/search/merge_and_shrink/shrink_random.h index 92592ce544..1dffdc662e 100644 --- a/src/search/merge_and_shrink/shrink_random.h +++ b/src/search/merge_and_shrink/shrink_random.h @@ -7,11 +7,11 @@ namespace merge_and_shrink { class ShrinkRandom : public ShrinkBucketBased { protected: virtual std::vector partition_into_buckets( - const TransitionSystem &ts, - const Distances &distances) const override; + const TransitionSystem &ts, const Distances &distances) const override; virtual std::string name() const override; - void dump_strategy_specific_options(utils::LogProxy &) const override {} + void dump_strategy_specific_options(utils::LogProxy &) const override { + } public: explicit ShrinkRandom(int random_seed); diff --git a/src/search/merge_and_shrink/shrink_strategy.cc b/src/search/merge_and_shrink/shrink_strategy.cc index 4e34edcc6c..2c6c7d4241 100644 --- a/src/search/merge_and_shrink/shrink_strategy.cc +++ b/src/search/merge_and_shrink/shrink_strategy.cc @@ -4,7 +4,6 @@ #include "transition_system.h" #include "../plugins/plugin.h" - #include "../utils/logging.h" #include @@ -24,14 +23,13 @@ string ShrinkStrategy::get_name() const { return name(); } -static class ShrinkStrategyCategoryPlugin : public plugins::TypedCategoryPlugin { +static class ShrinkStrategyCategoryPlugin + : public plugins::TypedCategoryPlugin { public: ShrinkStrategyCategoryPlugin() : TypedCategoryPlugin("ShrinkStrategy") { document_synopsis( "This page describes the various shrink strategies supported " - "by the planner." - ); + "by the planner."); } -} -_category_plugin; +} _category_plugin; } diff --git a/src/search/merge_and_shrink/shrink_strategy.h b/src/search/merge_and_shrink/shrink_strategy.h index 2b1f3ab42a..9881218d36 100644 --- a/src/search/merge_and_shrink/shrink_strategy.h +++ b/src/search/merge_and_shrink/shrink_strategy.h @@ -37,9 +37,7 @@ class ShrinkStrategy { way. */ virtual StateEquivalenceRelation compute_equivalence_relation( - const TransitionSystem &ts, - const Distances &distances, - int target_size, + const TransitionSystem &ts, const Distances &distances, int target_size, utils::LogProxy &log) const = 0; virtual bool requires_init_distances() const = 0; virtual bool requires_goal_distances() const = 0; diff --git a/src/search/merge_and_shrink/transition_system.cc b/src/search/merge_and_shrink/transition_system.cc index c842a74304..9578f12635 100644 --- a/src/search/merge_and_shrink/transition_system.cc +++ b/src/search/merge_and_shrink/transition_system.cc @@ -42,9 +42,8 @@ void LocalLabelInfo::remove_labels(const vector &old_labels) { assert(is_consistent()); assert(utils::is_sorted_unique(old_labels)); auto it = set_difference( - label_group.begin(), label_group.end(), - old_labels.begin(), old_labels.end(), - label_group.begin()); + label_group.begin(), label_group.end(), old_labels.begin(), + old_labels.end(), label_group.begin()); label_group.erase(it, label_group.end()); assert(is_consistent()); } @@ -87,7 +86,6 @@ bool LocalLabelInfo::is_consistent() const { utils::is_sorted_unique(transitions); } - TransitionSystemConstIterator::TransitionSystemConstIterator( vector::const_iterator it, vector::const_iterator end_it) @@ -123,14 +121,10 @@ TransitionSystemConstIterator &TransitionSystemConstIterator::operator++() { */ TransitionSystem::TransitionSystem( - int num_variables, - vector &&incorporated_variables, - const Labels &labels, - vector &&label_to_local_label, - vector &&local_label_infos, - int num_states, - vector &&goal_states, - int init_state) + int num_variables, vector &&incorporated_variables, + const Labels &labels, vector &&label_to_local_label, + vector &&local_label_infos, int num_states, + vector &&goal_states, int init_state) : num_variables(num_variables), incorporated_variables(move(incorporated_variables)), labels(move(labels)), @@ -157,10 +151,8 @@ TransitionSystem::~TransitionSystem() { } unique_ptr TransitionSystem::merge( - const Labels &labels, - const TransitionSystem &ts1, - const TransitionSystem &ts2, - utils::LogProxy &log) { + const Labels &labels, const TransitionSystem &ts1, + const TransitionSystem &ts2, utils::LogProxy &log) { if (log.is_at_least_verbose()) { log << "Merging " << ts1.get_description() << " and " << ts2.get_description() << endl; @@ -208,7 +200,8 @@ unique_ptr TransitionSystem::merge( LabelGroup dead_labels; for (const LocalLabelInfo &local_label_info : ts1) { const LabelGroup &group1 = local_label_info.get_label_group(); - const vector &transitions1 = local_label_info.get_transitions(); + const vector &transitions1 = + local_label_info.get_transitions(); // Distribute the labels of this group among the "buckets" // corresponding to the groups of ts2. @@ -227,8 +220,9 @@ unique_ptr TransitionSystem::merge( // Create the new transitions for this bucket vector new_transitions; - if (!transitions1.empty() && !transitions2.empty() - && transitions1.size() > new_transitions.max_size() / transitions2.size()) + if (!transitions1.empty() && !transitions2.empty() && + transitions1.size() > + new_transitions.max_size() / transitions2.size()) utils::exit_with(ExitCode::SEARCH_OUT_OF_MEMORY); new_transitions.reserve(transitions1.size() * transitions2.size()); for (const Transition &transition1 : transitions1) { @@ -246,7 +240,8 @@ unique_ptr TransitionSystem::merge( // Create a new group if the transitions are not empty LabelGroup &new_labels = bucket.second; if (new_transitions.empty()) { - dead_labels.insert(dead_labels.end(), new_labels.begin(), new_labels.end()); + dead_labels.insert( + dead_labels.end(), new_labels.begin(), new_labels.end()); } else { sort(new_transitions.begin(), new_transitions.end()); sort(new_labels.begin(), new_labels.end()); @@ -256,7 +251,8 @@ unique_ptr TransitionSystem::merge( cost = min(ts1.labels.get_label_cost(label), cost); label_to_local_label[label] = new_local_label; } - local_label_infos.emplace_back(move(new_labels), move(new_transitions), cost); + local_label_infos.emplace_back( + move(new_labels), move(new_transitions), cost); } } } @@ -277,19 +273,14 @@ unique_ptr TransitionSystem::merge( label_to_local_label[label] = new_local_label; } // Dead labels have empty transitions - local_label_infos.emplace_back(move(dead_labels), vector(), cost); + local_label_infos.emplace_back( + move(dead_labels), vector(), cost); } return make_unique( - num_variables, - move(incorporated_variables), - ts1.labels, - move(label_to_local_label), - move(local_label_infos), - num_states, - move(goal_states), - init_state - ); + num_variables, move(incorporated_variables), ts1.labels, + move(label_to_local_label), move(local_label_infos), num_states, + move(goal_states), init_state); } void TransitionSystem::compute_equivalent_local_labels() { @@ -305,14 +296,18 @@ void TransitionSystem::compute_equivalent_local_labels() { for (int local_label1 = 0; local_label1 < num_local_labels; ++local_label1) { if (local_label_infos[local_label1].is_active()) { - const vector &transitions1 = local_label_infos[local_label1].get_transitions(); + const vector &transitions1 = + local_label_infos[local_label1].get_transitions(); for (int local_label2 = local_label1 + 1; local_label2 < num_local_labels; ++local_label2) { if (local_label_infos[local_label2].is_active()) { - const vector &transitions2 = local_label_infos[local_label2].get_transitions(); - // Comparing transitions directly works because they are sorted and unique. + const vector &transitions2 = + local_label_infos[local_label2].get_transitions(); + // Comparing transitions directly works because they are + // sorted and unique. if (transitions1 == transitions2) { - for (int label : local_label_infos[local_label2].get_label_group()) { + for (int label : local_label_infos[local_label2] + .get_label_group()) { label_to_local_label[label] = local_label1; } local_label_infos[local_label1].merge_local_label_info( @@ -328,15 +323,14 @@ void TransitionSystem::compute_equivalent_local_labels() { void TransitionSystem::apply_abstraction( const StateEquivalenceRelation &state_equivalence_relation, - const vector &abstraction_mapping, - utils::LogProxy &log) { + const vector &abstraction_mapping, utils::LogProxy &log) { assert(is_valid()); int new_num_states = state_equivalence_relation.size(); assert(new_num_states < num_states); if (log.is_at_least_verbose()) { - log << tag() << "applying abstraction (" << get_size() - << " to " << new_num_states << " states)" << endl; + log << tag() << "applying abstraction (" << get_size() << " to " + << new_num_states << " states)" << endl; } vector new_goal_states(new_num_states, false); @@ -356,7 +350,8 @@ void TransitionSystem::apply_abstraction( // Update all transitions. for (LocalLabelInfo &local_label_info : local_label_infos) { - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); if (!transitions.empty()) { vector new_transitions; /* @@ -441,7 +436,7 @@ void TransitionSystem::apply_label_reduction( as a new local label and update the label_to_local_label mapping. */ unordered_map> local_label_to_old_labels; - for (const pair> &mapping: label_mapping) { + for (const pair> &mapping : label_mapping) { const vector &old_labels = mapping.second; assert(old_labels.size() >= 2); unordered_set seen_local_labels; @@ -449,8 +444,11 @@ void TransitionSystem::apply_label_reduction( for (int old_label : old_labels) { int old_local_label = label_to_local_label[old_label]; if (seen_local_labels.insert(old_local_label).second) { - const vector &transitions = local_label_infos[old_local_label].get_transitions(); - new_label_transitions.insert(new_label_transitions.end(), transitions.begin(), transitions.end()); + const vector &transitions = + local_label_infos[old_local_label].get_transitions(); + new_label_transitions.insert( + new_label_transitions.end(), transitions.begin(), + transitions.end()); } local_label_to_old_labels[old_local_label].push_back(old_label); // Reset (for consistency only, old labels are never accessed). @@ -464,7 +462,8 @@ void TransitionSystem::apply_label_reduction( int new_cost = labels.get_label_cost(new_label); LabelGroup new_label_group = {new_label}; - local_label_infos.emplace_back(move(new_label_group), move(new_label_transitions), new_cost); + local_label_infos.emplace_back( + move(new_label_group), move(new_label_transitions), new_cost); } /* @@ -498,34 +497,37 @@ bool TransitionSystem::are_local_labels_consistent() const { } bool TransitionSystem::is_valid() const { - return are_local_labels_consistent() - && is_label_mapping_consistent(); + return are_local_labels_consistent() && is_label_mapping_consistent(); } bool TransitionSystem::is_label_mapping_consistent() const { for (int label : labels) { int local_label = label_to_local_label[label]; - const LabelGroup &label_group = local_label_infos[local_label].get_label_group(); + const LabelGroup &label_group = + local_label_infos[local_label].get_label_group(); assert(!label_group.empty()); - if (find(label_group.begin(), - label_group.end(), - label) - == label_group.end()) { + if (find(label_group.begin(), label_group.end(), label) == + label_group.end()) { dump_label_mapping(); - cerr << "label " << label << " is not part of the " - "local label it is mapped to" << endl; + cerr << "label " << label + << " is not part of the " + "local label it is mapped to" + << endl; return false; } } - for (size_t local_label = 0; local_label < local_label_infos.size(); ++local_label) { + for (size_t local_label = 0; local_label < local_label_infos.size(); + ++local_label) { const LocalLabelInfo &local_label_info = local_label_infos[local_label]; for (int label : local_label_info.get_label_group()) { if (label_to_local_label[label] != static_cast(local_label)) { dump_label_mapping(); - cerr << "label " << label << " is not mapped " - "to the local label it is part of" << endl; + cerr << "label " << label + << " is not mapped " + "to the local label it is part of" + << endl; return false; } } @@ -536,15 +538,15 @@ bool TransitionSystem::is_label_mapping_consistent() const { void TransitionSystem::dump_label_mapping() const { utils::g_log << "to local label mapping: "; for (int label : labels) { - utils::g_log << label << " -> " - << label_to_local_label[label] << ", "; + utils::g_log << label << " -> " << label_to_local_label[label] << ", "; } utils::g_log << endl; utils::g_log << "local to label mapping: "; - for (size_t local_label = 0; - local_label < local_label_infos.size(); ++local_label) { + for (size_t local_label = 0; local_label < local_label_infos.size(); + ++local_label) { utils::g_log << local_label << ": " - << local_label_infos[local_label].get_label_group() << ", "; + << local_label_infos[local_label].get_label_group() + << ", "; } utils::g_log << endl; } @@ -553,7 +555,8 @@ bool TransitionSystem::is_solvable(const Distances &distances) const { if (init_state == PRUNED_STATE) { return false; } - if (distances.are_goal_distances_computed() && distances.get_goal_distance(init_state) == INF) { + if (distances.are_goal_distances_computed() && + distances.get_goal_distance(init_state) == INF) { return false; } return true; @@ -596,11 +599,13 @@ void TransitionSystem::dump_dot_graph(utils::LogProxy &log) const { } for (const LocalLabelInfo &local_label_info : *this) { const LabelGroup &label_group = local_label_info.get_label_group(); - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); for (const Transition &transition : transitions) { int src = transition.src; int target = transition.target; - log << " node" << src << " -> node" << target << " [label = "; + log << " node" << src << " -> node" << target + << " [label = "; for (size_t i = 0; i < label_group.size(); ++i) { if (i != 0) log << "_"; @@ -620,7 +625,8 @@ void TransitionSystem::dump_labels_and_transitions(utils::LogProxy &log) const { const LabelGroup &label_group = local_label_info.get_label_group(); log << "labels: " << label_group << endl; log << "transitions: "; - const vector &transitions = local_label_info.get_transitions(); + const vector &transitions = + local_label_info.get_transitions(); for (size_t i = 0; i < transitions.size(); ++i) { int src = transitions[i].src; int target = transitions[i].target; @@ -635,8 +641,8 @@ void TransitionSystem::dump_labels_and_transitions(utils::LogProxy &log) const { void TransitionSystem::statistics(utils::LogProxy &log) const { if (log.is_at_least_verbose()) { - log << tag() << get_size() << " states, " - << compute_total_transitions() << " arcs " << endl; + log << tag() << get_size() << " states, " << compute_total_transitions() + << " arcs " << endl; } } } diff --git a/src/search/merge_and_shrink/transition_system.h b/src/search/merge_and_shrink/transition_system.h index eea23a0254..4575d66f61 100644 --- a/src/search/merge_and_shrink/transition_system.h +++ b/src/search/merge_and_shrink/transition_system.h @@ -24,8 +24,7 @@ struct Transition { int src; int target; - Transition(int src, int target) - : src(src), target(target) { + Transition(int src, int target) : src(src), target(target) { } bool operator==(const Transition &other) const { @@ -54,15 +53,15 @@ using LabelGroup = std::vector; are sorted and unique. */ class LocalLabelInfo { - // The sorted set of labels with identical transitions in a transition system. + // The sorted set of labels with identical transitions in a transition + // system. LabelGroup label_group; std::vector transitions; // The cost is the minimum cost over all labels in label_group. int cost; public: LocalLabelInfo( - LabelGroup &&label_group, - std::vector &&transitions, + LabelGroup &&label_group, std::vector &&transitions, int cost) : label_group(move(label_group)), transitions(move(transitions)), @@ -115,7 +114,6 @@ class LocalLabelInfo { bool is_consistent() const; }; - /* Iterator class for TransitionSystem which provides access to the active entries of into local_label_infos. @@ -144,7 +142,6 @@ class TransitionSystemConstIterator { } }; - class TransitionSystem { private: /* @@ -197,14 +194,10 @@ class TransitionSystem { void dump_label_mapping() const; public: TransitionSystem( - int num_variables, - std::vector &&incorporated_variables, - const Labels &labels, - std::vector &&label_to_local_label, - std::vector &&local_label_infos, - int num_states, - std::vector &&goal_states, - int init_state); + int num_variables, std::vector &&incorporated_variables, + const Labels &labels, std::vector &&label_to_local_label, + std::vector &&local_label_infos, int num_states, + std::vector &&goal_states, int init_state); TransitionSystem(const TransitionSystem &other); ~TransitionSystem(); /* @@ -214,10 +207,8 @@ class TransitionSystem { (It is a bug to merge an unsolvable transition system.) */ static std::unique_ptr merge( - const Labels &labels, - const TransitionSystem &ts1, - const TransitionSystem &ts2, - utils::LogProxy &log); + const Labels &labels, const TransitionSystem &ts1, + const TransitionSystem &ts2, utils::LogProxy &log); /* Applies the given state equivalence relation to the transition system. @@ -228,8 +219,7 @@ class TransitionSystem { */ void apply_abstraction( const StateEquivalenceRelation &state_equivalence_relation, - const std::vector &abstraction_mapping, - utils::LogProxy &log); + const std::vector &abstraction_mapping, utils::LogProxy &log); /* Applies the given label mapping, mapping old to new label numbers. This @@ -240,11 +230,13 @@ class TransitionSystem { bool only_equivalent_labels); TransitionSystemConstIterator begin() const { - return TransitionSystemConstIterator(local_label_infos.begin(), local_label_infos.end()); + return TransitionSystemConstIterator( + local_label_infos.begin(), local_label_infos.end()); } TransitionSystemConstIterator end() const { - return TransitionSystemConstIterator(local_label_infos.end(), local_label_infos.end()); + return TransitionSystemConstIterator( + local_label_infos.end(), local_label_infos.end()); } /* diff --git a/src/search/merge_and_shrink/utils.cc b/src/search/merge_and_shrink/utils.cc index 4f5e935e7b..96e583aae1 100644 --- a/src/search/merge_and_shrink/utils.cc +++ b/src/search/merge_and_shrink/utils.cc @@ -16,9 +16,7 @@ using namespace std; namespace merge_and_shrink { pair compute_shrink_sizes( - int size1, - int size2, - int max_states_before_merge, + int size1, int size2, int max_states_before_merge, int max_states_after_merge) { // Bound both sizes by max allowed size before merge. int new_size1 = min(size1, max_states_before_merge); @@ -62,11 +60,8 @@ pair compute_shrink_sizes( Return true iff the factor was actually shrunk. */ static bool shrink_factor( - FactoredTransitionSystem &fts, - int index, - int new_size, - int shrink_threshold_before_merge, - const ShrinkStrategy &shrink_strategy, + FactoredTransitionSystem &fts, int index, int new_size, + int shrink_threshold_before_merge, const ShrinkStrategy &shrink_strategy, utils::LogProxy &log) { /* TODO: think about factoring out common logic of this function and the @@ -86,31 +81,26 @@ static bool shrink_factor( const Distances &distances = fts.get_distances(index); StateEquivalenceRelation equivalence_relation = - shrink_strategy.compute_equivalence_relation(ts, distances, new_size, log); + shrink_strategy.compute_equivalence_relation( + ts, distances, new_size, log); // TODO: We currently violate this; see issue250 - //assert(equivalence_relation.size() <= target_size); + // assert(equivalence_relation.size() <= target_size); return fts.apply_abstraction(index, equivalence_relation, log); } return false; } bool shrink_before_merge_step( - FactoredTransitionSystem &fts, - int index1, - int index2, - int max_states, - int max_states_before_merge, - int shrink_threshold_before_merge, - const ShrinkStrategy &shrink_strategy, - utils::LogProxy &log) { + FactoredTransitionSystem &fts, int index1, int index2, int max_states, + int max_states_before_merge, int shrink_threshold_before_merge, + const ShrinkStrategy &shrink_strategy, utils::LogProxy &log) { /* Compute the size limit for both transition systems as imposed by max_states and max_states_before_merge. */ pair new_sizes = compute_shrink_sizes( fts.get_transition_system(index1).get_size(), - fts.get_transition_system(index2).get_size(), - max_states_before_merge, + fts.get_transition_system(index2).get_size(), max_states_before_merge, max_states); /* @@ -121,22 +111,14 @@ bool shrink_before_merge_step( required. */ bool shrunk1 = shrink_factor( - fts, - index1, - new_sizes.first, - shrink_threshold_before_merge, - shrink_strategy, - log); + fts, index1, new_sizes.first, shrink_threshold_before_merge, + shrink_strategy, log); if (shrunk1) { fts.statistics(index1, log); } bool shrunk2 = shrink_factor( - fts, - index2, - new_sizes.second, - shrink_threshold_before_merge, - shrink_strategy, - log); + fts, index2, new_sizes.second, shrink_threshold_before_merge, + shrink_strategy, log); if (shrunk2) { fts.statistics(index2, log); } @@ -144,11 +126,8 @@ bool shrink_before_merge_step( } bool prune_step( - FactoredTransitionSystem &fts, - int index, - bool prune_unreachable_states, - bool prune_irrelevant_states, - utils::LogProxy &log) { + FactoredTransitionSystem &fts, int index, bool prune_unreachable_states, + bool prune_irrelevant_states, utils::LogProxy &log) { assert(prune_unreachable_states || prune_irrelevant_states); const TransitionSystem &ts = fts.get_transition_system(index); const Distances &distances = fts.get_distances(index); @@ -184,10 +163,8 @@ bool prune_step( state_equivalence_relation.push_back(state_equivalence_class); } } - if (log.is_at_least_verbose() && - (unreachable_count || irrelevant_count)) { - log << ts.tag() - << "unreachable: " << unreachable_count << " states, " + if (log.is_at_least_verbose() && (unreachable_count || irrelevant_count)) { + log << ts.tag() << "unreachable: " << unreachable_count << " states, " << "irrelevant: " << irrelevant_count << " states (" << "total dead: " << dead_count << " states)" << endl; } @@ -195,10 +172,10 @@ bool prune_step( } vector compute_abstraction_mapping( - int num_states, - const StateEquivalenceRelation &equivalence_relation) { + int num_states, const StateEquivalenceRelation &equivalence_relation) { vector abstraction_mapping(num_states, PRUNED_STATE); - for (size_t class_no = 0; class_no < equivalence_relation.size(); ++class_no) { + for (size_t class_no = 0; class_no < equivalence_relation.size(); + ++class_no) { const StateEquivalenceClass &state_equivalence_class = equivalence_relation[class_no]; for (int state : state_equivalence_class) { diff --git a/src/search/merge_and_shrink/utils.h b/src/search/merge_and_shrink/utils.h index b1aee067fd..4fe3f9b903 100644 --- a/src/search/merge_and_shrink/utils.h +++ b/src/search/merge_and_shrink/utils.h @@ -29,9 +29,7 @@ class TransitionSystem; are preferred over less balanced ones. */ extern std::pair compute_shrink_sizes( - int size1, - int size2, - int max_states_before_merge, + int size1, int size2, int max_states_before_merge, int max_states_after_merge); /* @@ -47,14 +45,9 @@ extern std::pair compute_shrink_sizes( factors was shrunk. */ extern bool shrink_before_merge_step( - FactoredTransitionSystem &fts, - int index1, - int index2, - int max_states, - int max_states_before_merge, - int shrink_threshold_before_merge, - const ShrinkStrategy &shrink_strategy, - utils::LogProxy &log); + FactoredTransitionSystem &fts, int index1, int index2, int max_states, + int max_states_before_merge, int shrink_threshold_before_merge, + const ShrinkStrategy &shrink_strategy, utils::LogProxy &log); /* Prune unreachable and/or irrelevant states of the factor at index. This @@ -64,19 +57,15 @@ extern bool shrink_before_merge_step( TODO: maybe this functionality belongs to a new class PruneStrategy. */ extern bool prune_step( - FactoredTransitionSystem &fts, - int index, - bool prune_unreachable_states, - bool prune_irrelevant_states, - utils::LogProxy &log); + FactoredTransitionSystem &fts, int index, bool prune_unreachable_states, + bool prune_irrelevant_states, utils::LogProxy &log); /* Compute the abstraction mapping based on the given state equivalence relation. */ extern std::vector compute_abstraction_mapping( - int num_states, - const StateEquivalenceRelation &equivalence_relation); + int num_states, const StateEquivalenceRelation &equivalence_relation); extern bool is_goal_relevant(const TransitionSystem &ts); } diff --git a/src/search/open_list.h b/src/search/open_list.h index 683e5b81e3..dbedc7ba05 100644 --- a/src/search/open_list.h +++ b/src/search/open_list.h @@ -1,13 +1,12 @@ #ifndef OPEN_LIST_H #define OPEN_LIST_H -#include - #include "evaluation_context.h" #include "operator_id.h" -class StateID; +#include +class StateID; template class OpenList { @@ -21,8 +20,8 @@ class OpenList { to be inserted is not preferred. Hence, these conditions need not be checked by the implementation. */ - virtual void do_insertion(EvaluationContext &eval_context, - const Entry &entry) = 0; + virtual void do_insertion( + EvaluationContext &eval_context, const Entry &entry) = 0; public: explicit OpenList(bool preferred_only = false); @@ -125,14 +124,12 @@ class OpenList { EvaluationContext &eval_context) const = 0; }; - using StateOpenListEntry = StateID; using EdgeOpenListEntry = std::pair; using StateOpenList = OpenList; using EdgeOpenList = OpenList; - template OpenList::OpenList(bool only_preferred) : only_preferred(only_preferred) { diff --git a/src/search/open_list_factory.cc b/src/search/open_list_factory.cc index 7da73c3c9d..a7669bfcd1 100644 --- a/src/search/open_list_factory.cc +++ b/src/search/open_list_factory.cc @@ -5,7 +5,6 @@ using namespace std; - template<> unique_ptr OpenListFactory::create_open_list() { return create_state_open_list(); @@ -16,23 +15,20 @@ unique_ptr OpenListFactory::create_open_list() { return create_edge_open_list(); } -void add_open_list_options_to_feature( - plugins::Feature &feature) { +void add_open_list_options_to_feature(plugins::Feature &feature) { feature.add_option( - "pref_only", - "insert only nodes generated by preferred operators", + "pref_only", "insert only nodes generated by preferred operators", "false"); } -tuple get_open_list_arguments_from_options( - const plugins::Options &opts) { +tuple get_open_list_arguments_from_options(const plugins::Options &opts) { return make_tuple(opts.get("pref_only")); } -static class OpenListFactoryCategoryPlugin : public plugins::TypedCategoryPlugin { +static class OpenListFactoryCategoryPlugin + : public plugins::TypedCategoryPlugin { public: OpenListFactoryCategoryPlugin() : TypedCategoryPlugin("OpenList") { // TODO: use document_synopsis() for the wiki page. } -} -_category_plugin; +} _category_plugin; diff --git a/src/search/open_list_factory.h b/src/search/open_list_factory.h index c8d436550a..60ba126709 100644 --- a/src/search/open_list_factory.h +++ b/src/search/open_list_factory.h @@ -7,7 +7,6 @@ #include - class OpenListFactory { public: OpenListFactory() = default; @@ -28,9 +27,7 @@ class OpenListFactory { std::unique_ptr> create_open_list(); }; - -extern void add_open_list_options_to_feature( - plugins::Feature &feature); -extern std::tuple -get_open_list_arguments_from_options(const plugins::Options &opts); +extern void add_open_list_options_to_feature(plugins::Feature &feature); +extern std::tuple get_open_list_arguments_from_options( + const plugins::Options &opts); #endif diff --git a/src/search/open_lists/alternation_open_list.cc b/src/search/open_lists/alternation_open_list.cc index fb49ce037e..8f8574ed92 100644 --- a/src/search/open_lists/alternation_open_list.cc +++ b/src/search/open_lists/alternation_open_list.cc @@ -21,8 +21,8 @@ class AlternationOpenList : public OpenList { const int boost_amount; protected: - virtual void do_insertion(EvaluationContext &eval_context, - const Entry &entry) override; + virtual void do_insertion( + EvaluationContext &eval_context, const Entry &entry) override; public: AlternationOpenList( @@ -34,13 +34,11 @@ class AlternationOpenList : public OpenList { virtual void boost_preferred() override; virtual void get_path_dependent_evaluators( set &evals) override; - virtual bool is_dead_end( - EvaluationContext &eval_context) const override; + virtual bool is_dead_end(EvaluationContext &eval_context) const override; virtual bool is_reliable_dead_end( EvaluationContext &eval_context) const override; }; - template AlternationOpenList::AlternationOpenList( const vector> &sublists, int boost) @@ -126,37 +124,31 @@ bool AlternationOpenList::is_reliable_dead_end( return false; } - AlternationOpenListFactory::AlternationOpenListFactory( const vector> &sublists, int boost) - : sublists(sublists), - boost(boost) { + : sublists(sublists), boost(boost) { utils::verify_list_not_empty(sublists, "sublists"); } -unique_ptr -AlternationOpenListFactory::create_state_open_list() { +unique_ptr AlternationOpenListFactory::create_state_open_list() { return make_unique>( sublists, boost); } -unique_ptr -AlternationOpenListFactory::create_edge_open_list() { - return make_unique>( - sublists, boost); +unique_ptr AlternationOpenListFactory::create_edge_open_list() { + return make_unique>(sublists, boost); } class AlternationOpenListFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + OpenListFactory, AlternationOpenListFactory> { public: AlternationOpenListFeature() : TypedFeature("alt") { document_title("Alternation open list"); - document_synopsis( - "alternates between several open lists."); + document_synopsis("alternates between several open lists."); add_list_option>( - "sublists", - "open lists between which this one alternates"); + "sublists", "open lists between which this one alternates"); add_option( "boost", "boost value for contained open lists that are restricted " @@ -164,12 +156,11 @@ class AlternationOpenListFeature "0"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get_list>("sublists"), - opts.get("boost") - ); + opts.get("boost")); } }; diff --git a/src/search/open_lists/best_first_open_list.cc b/src/search/open_lists/best_first_open_list.cc index 52397b93c6..95a979bd36 100644 --- a/src/search/open_lists/best_first_open_list.cc +++ b/src/search/open_lists/best_first_open_list.cc @@ -22,8 +22,8 @@ class BestFirstOpenList : public OpenList { shared_ptr evaluator; protected: - virtual void do_insertion(EvaluationContext &eval_context, - const Entry &entry) override; + virtual void do_insertion( + EvaluationContext &eval_context, const Entry &entry) override; public: BestFirstOpenList(const shared_ptr &eval, bool preferred_only); @@ -31,9 +31,9 @@ class BestFirstOpenList : public OpenList { virtual Entry remove_min() override; virtual bool empty() const override; virtual void clear() override; - virtual void get_path_dependent_evaluators(set &evals) override; - virtual bool is_dead_end( - EvaluationContext &eval_context) const override; + virtual void get_path_dependent_evaluators( + set &evals) override; + virtual bool is_dead_end(EvaluationContext &eval_context) const override; virtual bool is_reliable_dead_end( EvaluationContext &eval_context) const override; }; @@ -41,9 +41,7 @@ class BestFirstOpenList : public OpenList { template BestFirstOpenList::BestFirstOpenList( const shared_ptr &evaluator, bool preferred_only) - : OpenList(preferred_only), - size(0), - evaluator(evaluator) { + : OpenList(preferred_only), size(0), evaluator(evaluator) { } template @@ -100,20 +98,15 @@ bool BestFirstOpenList::is_reliable_dead_end( BestFirstOpenListFactory::BestFirstOpenListFactory( const shared_ptr &eval, bool pref_only) - : eval(eval), - pref_only(pref_only) { + : eval(eval), pref_only(pref_only) { } -unique_ptr -BestFirstOpenListFactory::create_state_open_list() { - return make_unique>( - eval, pref_only); +unique_ptr BestFirstOpenListFactory::create_state_open_list() { + return make_unique>(eval, pref_only); } -unique_ptr -BestFirstOpenListFactory::create_edge_open_list() { - return make_unique>( - eval, pref_only); +unique_ptr BestFirstOpenListFactory::create_edge_open_list() { + return make_unique>(eval, pref_only); } class BestFirstOpenListFeature @@ -136,9 +129,8 @@ class BestFirstOpenListFeature "takes time O(log(n)), where n is the number of buckets."); } - - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get>("eval"), get_open_list_arguments_from_options(opts)); diff --git a/src/search/open_lists/epsilon_greedy_open_list.cc b/src/search/open_lists/epsilon_greedy_open_list.cc index 88ce8b6db2..9774f8b4cd 100644 --- a/src/search/open_lists/epsilon_greedy_open_list.cc +++ b/src/search/open_lists/epsilon_greedy_open_list.cc @@ -40,20 +40,20 @@ class EpsilonGreedyOpenList : public OpenList { int next_id; protected: - virtual void do_insertion(EvaluationContext &eval_context, - const Entry &entry) override; + virtual void do_insertion( + EvaluationContext &eval_context, const Entry &entry) override; public: EpsilonGreedyOpenList( - const shared_ptr &eval, double epsilon, - int random_seed, bool pref_only); + const shared_ptr &eval, double epsilon, int random_seed, + bool pref_only); virtual Entry remove_min() override; - virtual bool is_dead_end( - EvaluationContext &eval_context) const override; + virtual bool is_dead_end(EvaluationContext &eval_context) const override; virtual bool is_reliable_dead_end( EvaluationContext &eval_context) const override; - virtual void get_path_dependent_evaluators(set &evals) override; + virtual void get_path_dependent_evaluators( + set &evals) override; virtual bool empty() const override; virtual void clear() override; }; @@ -82,8 +82,8 @@ void EpsilonGreedyOpenList::do_insertion( template EpsilonGreedyOpenList::EpsilonGreedyOpenList( - const shared_ptr &eval, double epsilon, - int random_seed, bool pref_only) + const shared_ptr &eval, double epsilon, int random_seed, + bool pref_only) : OpenList(pref_only), rng(utils::get_rng(random_seed)), evaluator(eval), @@ -138,8 +138,8 @@ void EpsilonGreedyOpenList::clear() { } EpsilonGreedyOpenListFactory::EpsilonGreedyOpenListFactory( - const shared_ptr &eval, double epsilon, - int random_seed, bool pref_only) + const shared_ptr &eval, double epsilon, int random_seed, + bool pref_only) : eval(eval), epsilon(epsilon), random_seed(random_seed), @@ -152,21 +152,22 @@ EpsilonGreedyOpenListFactory::create_state_open_list() { eval, epsilon, random_seed, pref_only); } -unique_ptr -EpsilonGreedyOpenListFactory::create_edge_open_list() { +unique_ptr EpsilonGreedyOpenListFactory::create_edge_open_list() { return make_unique>( eval, epsilon, random_seed, pref_only); } class EpsilonGreedyOpenListFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + OpenListFactory, EpsilonGreedyOpenListFactory> { public: EpsilonGreedyOpenListFeature() : TypedFeature("epsilon_greedy") { document_title("Epsilon-greedy open list"); document_synopsis( "Chooses an entry uniformly randomly with probability " "'epsilon', otherwise it returns the minimum entry. " - "The algorithm is based on" + utils::format_conference_reference( + "The algorithm is based on" + + utils::format_conference_reference( {"Richard Valenzano", "Nathan R. Sturtevant", "Jonathan Schaeffer", "Fan Xie"}, "A Comparison of Knowledge-Based GBFS Enhancements and" @@ -174,28 +175,24 @@ class EpsilonGreedyOpenListFeature "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7943/8066", "Proceedings of the Twenty-Fourth International Conference" " on Automated Planning and Scheduling (ICAPS 2014)", - "375-379", - "AAAI Press", - "2014")); + "375-379", "AAAI Press", "2014")); add_option>("eval", "evaluator"); add_option( - "epsilon", - "probability for choosing the next entry randomly", - "0.2", - plugins::Bounds("0.0", "1.0")); + "epsilon", "probability for choosing the next entry randomly", + "0.2", plugins::Bounds("0.0", "1.0")); utils::add_rng_options_to_feature(*this); add_open_list_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + EpsilonGreedyOpenListFactory>( opts.get>("eval"), opts.get("epsilon"), utils::get_rng_arguments_from_options(opts), - get_open_list_arguments_from_options(opts) - ); + get_open_list_arguments_from_options(opts)); } }; diff --git a/src/search/open_lists/epsilon_greedy_open_list.h b/src/search/open_lists/epsilon_greedy_open_list.h index a72c34684f..24c58d3b0b 100644 --- a/src/search/open_lists/epsilon_greedy_open_list.h +++ b/src/search/open_lists/epsilon_greedy_open_list.h @@ -48,8 +48,8 @@ class EpsilonGreedyOpenListFactory : public OpenListFactory { bool pref_only; public: EpsilonGreedyOpenListFactory( - const std::shared_ptr &eval, double epsilon, - int random_seed, bool pref_only); + const std::shared_ptr &eval, double epsilon, int random_seed, + bool pref_only); virtual std::unique_ptr create_state_open_list() override; virtual std::unique_ptr create_edge_open_list() override; diff --git a/src/search/open_lists/pareto_open_list.cc b/src/search/open_lists/pareto_open_list.cc index 096eacf940..1018489389 100644 --- a/src/search/open_lists/pareto_open_list.cc +++ b/src/search/open_lists/pareto_open_list.cc @@ -38,8 +38,8 @@ class ParetoOpenList : public OpenList { void remove_key(const KeyType &key); protected: - virtual void do_insertion(EvaluationContext &eval_context, - const Entry &entry) override; + virtual void do_insertion( + EvaluationContext &eval_context, const Entry &entry) override; public: ParetoOpenList( @@ -49,17 +49,17 @@ class ParetoOpenList : public OpenList { virtual Entry remove_min() override; virtual bool empty() const override; virtual void clear() override; - virtual void get_path_dependent_evaluators(set &evals) override; - virtual bool is_dead_end( - EvaluationContext &eval_context) const override; + virtual void get_path_dependent_evaluators( + set &evals) override; + virtual bool is_dead_end(EvaluationContext &eval_context) const override; virtual bool is_reliable_dead_end( EvaluationContext &eval_context) const override; }; template ParetoOpenList::ParetoOpenList( - const vector> &evals, - bool state_uniform_selection, int random_seed, bool pref_only) + const vector> &evals, bool state_uniform_selection, + int random_seed, bool pref_only) : OpenList(pref_only), rng(utils::get_rng(random_seed)), state_uniform_selection(state_uniform_selection), @@ -125,7 +125,8 @@ void ParetoOpenList::do_insertion( vector key; key.reserve(evaluators.size()); for (const shared_ptr &evaluator : evaluators) - key.push_back(eval_context.get_evaluator_value_or_infinity(evaluator.get())); + key.push_back( + eval_context.get_evaluator_value_or_infinity(evaluator.get())); Bucket &bucket = buckets[key]; bool newkey = bucket.empty(); @@ -197,8 +198,7 @@ void ParetoOpenList::get_path_dependent_evaluators( } template -bool ParetoOpenList::is_dead_end( - EvaluationContext &eval_context) const { +bool ParetoOpenList::is_dead_end(EvaluationContext &eval_context) const { // TODO: Document this behaviour. // If one safe heuristic detects a dead end, return true. if (is_reliable_dead_end(eval_context)) @@ -221,22 +221,20 @@ bool ParetoOpenList::is_reliable_dead_end( } ParetoOpenListFactory::ParetoOpenListFactory( - const vector> &evals, - bool state_uniform_selection, int random_seed, bool pref_only) + const vector> &evals, bool state_uniform_selection, + int random_seed, bool pref_only) : evals(evals), state_uniform_selection(state_uniform_selection), random_seed(random_seed), pref_only(pref_only) { } -unique_ptr -ParetoOpenListFactory::create_state_open_list() { +unique_ptr ParetoOpenListFactory::create_state_open_list() { return make_unique>( evals, state_uniform_selection, random_seed, pref_only); } -unique_ptr -ParetoOpenListFactory::create_edge_open_list() { +unique_ptr ParetoOpenListFactory::create_edge_open_list() { return make_unique>( evals, state_uniform_selection, random_seed, pref_only); } @@ -262,14 +260,13 @@ class ParetoOpenListFeature add_open_list_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get_list>("evals"), opts.get("state_uniform_selection"), utils::get_rng_arguments_from_options(opts), - get_open_list_arguments_from_options(opts) - ); + get_open_list_arguments_from_options(opts)); } }; diff --git a/src/search/open_lists/tiebreaking_open_list.cc b/src/search/open_lists/tiebreaking_open_list.cc index c50bf5c577..8a50ad6e27 100644 --- a/src/search/open_lists/tiebreaking_open_list.cc +++ b/src/search/open_lists/tiebreaking_open_list.cc @@ -32,31 +32,31 @@ class TieBreakingOpenList : public OpenList { int dimension() const; protected: - virtual void do_insertion(EvaluationContext &eval_context, - const Entry &entry) override; + virtual void do_insertion( + EvaluationContext &eval_context, const Entry &entry) override; public: TieBreakingOpenList( - const vector> &evals, - bool unsafe_pruning, bool pref_only); + const vector> &evals, bool unsafe_pruning, + bool pref_only); virtual Entry remove_min() override; virtual bool empty() const override; virtual void clear() override; - virtual void get_path_dependent_evaluators(set &evals) override; - virtual bool is_dead_end( - EvaluationContext &eval_context) const override; + virtual void get_path_dependent_evaluators( + set &evals) override; + virtual bool is_dead_end(EvaluationContext &eval_context) const override; virtual bool is_reliable_dead_end( EvaluationContext &eval_context) const override; }; - template TieBreakingOpenList::TieBreakingOpenList( - const vector> &evals, - bool unsafe_pruning, bool pref_only) + const vector> &evals, bool unsafe_pruning, + bool pref_only) : OpenList(pref_only), - size(0), evaluators(evals), + size(0), + evaluators(evals), allow_unsafe_pruning(unsafe_pruning) { } @@ -66,7 +66,8 @@ void TieBreakingOpenList::do_insertion( vector key; key.reserve(evaluators.size()); for (const shared_ptr &evaluator : evaluators) - key.push_back(eval_context.get_evaluator_value_or_infinity(evaluator.get())); + key.push_back( + eval_context.get_evaluator_value_or_infinity(evaluator.get())); buckets[key].push_back(entry); ++size; @@ -140,28 +141,25 @@ bool TieBreakingOpenList::is_reliable_dead_end( } TieBreakingOpenListFactory::TieBreakingOpenListFactory( - const vector> &evals, - bool unsafe_pruning, bool pref_only) - : evals(evals), - unsafe_pruning(unsafe_pruning), - pref_only(pref_only) { + const vector> &evals, bool unsafe_pruning, + bool pref_only) + : evals(evals), unsafe_pruning(unsafe_pruning), pref_only(pref_only) { utils::verify_list_not_empty(evals, "evals"); } -unique_ptr -TieBreakingOpenListFactory::create_state_open_list() { +unique_ptr TieBreakingOpenListFactory::create_state_open_list() { return make_unique>( evals, unsafe_pruning, pref_only); } -unique_ptr -TieBreakingOpenListFactory::create_edge_open_list() { +unique_ptr TieBreakingOpenListFactory::create_edge_open_list() { return make_unique>( evals, unsafe_pruning, pref_only); } class TieBreakingOpenListFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + OpenListFactory, TieBreakingOpenListFactory> { public: TieBreakingOpenListFeature() : TypedFeature("tiebreaking") { document_title("Tie-breaking open list"); @@ -175,13 +173,12 @@ class TieBreakingOpenListFeature add_open_list_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get_list>("evals"), opts.get("unsafe_pruning"), - get_open_list_arguments_from_options(opts) - ); + get_open_list_arguments_from_options(opts)); } }; diff --git a/src/search/open_lists/type_based_open_list.cc b/src/search/open_lists/type_based_open_list.cc index f37aa43a3c..9b93d98013 100644 --- a/src/search/open_lists/type_based_open_list.cc +++ b/src/search/open_lists/type_based_open_list.cc @@ -33,8 +33,7 @@ class TypeBasedOpenList : public OpenList { public: explicit TypeBasedOpenList( - const vector> &evaluators, - int random_seed); + const vector> &evaluators, int random_seed); virtual Entry remove_min() override; virtual bool empty() const override; @@ -42,7 +41,8 @@ class TypeBasedOpenList : public OpenList { virtual bool is_dead_end(EvaluationContext &eval_context) const override; virtual bool is_reliable_dead_end( EvaluationContext &eval_context) const override; - virtual void get_path_dependent_evaluators(set &evals) override; + virtual void get_path_dependent_evaluators( + set &evals) override; }; template @@ -69,8 +69,7 @@ void TypeBasedOpenList::do_insertion( template TypeBasedOpenList::TypeBasedOpenList( const vector> &evaluators, int random_seed) - : evaluators(evaluators), - rng(utils::get_rng(random_seed)) { + : evaluators(evaluators), rng(utils::get_rng(random_seed)) { } template @@ -137,19 +136,16 @@ void TypeBasedOpenList::get_path_dependent_evaluators( TypeBasedOpenListFactory::TypeBasedOpenListFactory( const vector> &evaluators, int random_seed) - : evaluators(evaluators), - random_seed(random_seed) { + : evaluators(evaluators), random_seed(random_seed) { utils::verify_list_not_empty(evaluators, "evaluators"); } -unique_ptr -TypeBasedOpenListFactory::create_state_open_list() { +unique_ptr TypeBasedOpenListFactory::create_state_open_list() { return make_unique>( evaluators, random_seed); } -unique_ptr -TypeBasedOpenListFactory::create_edge_open_list() { +unique_ptr TypeBasedOpenListFactory::create_edge_open_list() { return make_unique>( evaluators, random_seed); } @@ -165,16 +161,15 @@ class TypeBasedOpenListFeature "When retrieving an entry, a bucket is chosen uniformly at " "random and one of the contained entries is selected " "uniformly randomly. " - "The algorithm is based on" + utils::format_conference_reference( + "The algorithm is based on" + + utils::format_conference_reference( {"Fan Xie", "Martin Mueller", "Robert Holte", "Tatsuya Imai"}, "Type-Based Exploration with Multiple Search Queues for" " Satisficing Planning", "http://www.aaai.org/ocs/index.php/AAAI/AAAI14/paper/view/8472/8705", "Proceedings of the Twenty-Eigth AAAI Conference Conference" " on Artificial Intelligence (AAAI 2014)", - "2395-2401", - "AAAI Press", - "2014")); + "2395-2401", "AAAI Press", "2014")); add_list_option>( "evaluators", @@ -182,12 +177,11 @@ class TypeBasedOpenListFeature utils::add_rng_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get_list>("evaluators"), - utils::get_rng_arguments_from_options(opts) - ); + utils::get_rng_arguments_from_options(opts)); } }; diff --git a/src/search/operator_cost.cc b/src/search/operator_cost.cc index b2e45be109..b49dd9c039 100644 --- a/src/search/operator_cost.cc +++ b/src/search/operator_cost.cc @@ -1,15 +1,16 @@ #include "operator_cost.h" -#include "plugins/plugin.h" #include "task_proxy.h" +#include "plugins/plugin.h" #include "utils/system.h" #include using namespace std; -static int get_adjusted_action_cost(int cost, OperatorCost cost_type, bool is_unit_cost) { +static int get_adjusted_action_cost( + int cost, OperatorCost cost_type, bool is_unit_cost) { switch (cost_type) { case NORMAL: return cost; @@ -25,7 +26,8 @@ static int get_adjusted_action_cost(int cost, OperatorCost cost_type, bool is_un } } -int get_adjusted_action_cost(const OperatorProxy &op, OperatorCost cost_type, bool is_unit_cost) { +int get_adjusted_action_cost( + const OperatorProxy &op, OperatorCost cost_type, bool is_unit_cost) { if (op.is_axiom()) return 0; else @@ -46,13 +48,13 @@ tuple get_cost_type_arguments_from_options( return make_tuple(opts.get("cost_type")); } -static plugins::TypedEnumPlugin _enum_plugin({ - {"normal", "all actions are accounted for with their real cost"}, - {"one", "all actions are accounted for as unit cost"}, - {"plusone", "all actions are accounted for as their real cost + 1 " - "(except if all actions have original cost 1, " - "in which case cost 1 is used). " - "This is the behaviour known for the heuristics of the LAMA planner. " - "This is intended to be used by the heuristics, not search algorithms, " - "but is supported for both."} -}); +static plugins::TypedEnumPlugin _enum_plugin( + {{"normal", "all actions are accounted for with their real cost"}, + {"one", "all actions are accounted for as unit cost"}, + {"plusone", + "all actions are accounted for as their real cost + 1 " + "(except if all actions have original cost 1, " + "in which case cost 1 is used). " + "This is the behaviour known for the heuristics of the LAMA planner. " + "This is intended to be used by the heuristics, not search algorithms, " + "but is supported for both."}}); diff --git a/src/search/operator_cost.h b/src/search/operator_cost.h index be6d0942cd..96522ac05e 100644 --- a/src/search/operator_cost.h +++ b/src/search/operator_cost.h @@ -10,9 +10,15 @@ class Feature; class Options; } -enum OperatorCost {NORMAL = 0, ONE = 1, PLUSONE = 2, MAX_OPERATOR_COST}; +enum OperatorCost { + NORMAL = 0, + ONE = 1, + PLUSONE = 2, + MAX_OPERATOR_COST +}; -int get_adjusted_action_cost(const OperatorProxy &op, OperatorCost cost_type, bool is_unit_cost); +int get_adjusted_action_cost( + const OperatorProxy &op, OperatorCost cost_type, bool is_unit_cost); extern void add_cost_type_options_to_feature(plugins::Feature &feature); extern std::tuple get_cost_type_arguments_from_options( const plugins::Options &opts); diff --git a/src/search/operator_counting/constraint_generator.cc b/src/search/operator_counting/constraint_generator.cc index 9f5fe89d6b..eef2b0bca8 100644 --- a/src/search/operator_counting/constraint_generator.cc +++ b/src/search/operator_counting/constraint_generator.cc @@ -9,12 +9,13 @@ void ConstraintGenerator::initialize_constraints( const shared_ptr &, lp::LinearProgram &) { } -static class ConstraintGeneratorCategoryPlugin : public plugins::TypedCategoryPlugin { +static class ConstraintGeneratorCategoryPlugin + : public plugins::TypedCategoryPlugin { public: - ConstraintGeneratorCategoryPlugin() : TypedCategoryPlugin("ConstraintGenerator") { + ConstraintGeneratorCategoryPlugin() + : TypedCategoryPlugin("ConstraintGenerator") { // TODO: Replace empty string by synopsis for the wiki page. - //document_synopsis(""); + // document_synopsis(""); } -} -_category_plugin; +} _category_plugin; } diff --git a/src/search/operator_counting/constraint_generator.h b/src/search/operator_counting/constraint_generator.h index b218e9b0bb..5f1881dc95 100644 --- a/src/search/operator_counting/constraint_generator.h +++ b/src/search/operator_counting/constraint_generator.h @@ -1,11 +1,11 @@ #ifndef OPERATOR_COUNTING_CONSTRAINT_GENERATOR_H #define OPERATOR_COUNTING_CONSTRAINT_GENERATOR_H +#include "../algorithms/named_vector.h" + #include #include -#include "../algorithms/named_vector.h" - class AbstractTask; class State; diff --git a/src/search/operator_counting/delete_relaxation_if_constraints.cc b/src/search/operator_counting/delete_relaxation_if_constraints.cc index cc22cba743..7fc0ea8f5f 100644 --- a/src/search/operator_counting/delete_relaxation_if_constraints.cc +++ b/src/search/operator_counting/delete_relaxation_if_constraints.cc @@ -11,20 +11,18 @@ using namespace std; namespace operator_counting { -static void add_lp_variables(int count, LPVariables &variables, vector &indices, - double lower, double upper, double objective, - bool is_integer) { +static void add_lp_variables( + int count, LPVariables &variables, vector &indices, double lower, + double upper, double objective, bool is_integer) { for (int i = 0; i < count; ++i) { indices.push_back(variables.size()); variables.emplace_back(lower, upper, objective, is_integer); } } - DeleteRelaxationIFConstraints::DeleteRelaxationIFConstraints( bool use_time_vars, bool use_integer_vars) - : use_time_vars(use_time_vars), - use_integer_vars(use_integer_vars) { + : use_time_vars(use_time_vars), use_integer_vars(use_integer_vars) { } int DeleteRelaxationIFConstraints::get_var_op_used(const OperatorProxy &op) { @@ -60,14 +58,15 @@ void DeleteRelaxationIFConstraints::create_auxiliary_variables( int num_vars = vars.size(); // op_used - add_lp_variables(num_ops, variables, lp_var_id_op_used, 0, 1, 0, use_integer_vars); + add_lp_variables( + num_ops, variables, lp_var_id_op_used, 0, 1, 0, use_integer_vars); // fact_reached lp_var_id_fact_reached.resize(num_vars); for (VariableProxy var : vars) { - add_lp_variables(var.get_domain_size(), variables, - lp_var_id_fact_reached[var.get_id()], - 0, 1, 0, use_integer_vars); + add_lp_variables( + var.get_domain_size(), variables, + lp_var_id_fact_reached[var.get_id()], 0, 1, 0, use_integer_vars); } // first_achiever @@ -75,28 +74,32 @@ void DeleteRelaxationIFConstraints::create_auxiliary_variables( for (OperatorProxy op : ops) { lp_var_id_first_achiever[op.get_id()].resize(num_vars); for (VariableProxy var : vars) { - add_lp_variables(var.get_domain_size(), variables, - lp_var_id_first_achiever[op.get_id()][var.get_id()], - 0, 1, 0, use_integer_vars); + add_lp_variables( + var.get_domain_size(), variables, + lp_var_id_first_achiever[op.get_id()][var.get_id()], 0, 1, 0, + use_integer_vars); } } if (use_time_vars) { // op_time - add_lp_variables(num_ops, variables, lp_var_id_op_time, 0, num_ops, 0, use_integer_vars); + add_lp_variables( + num_ops, variables, lp_var_id_op_time, 0, num_ops, 0, + use_integer_vars); // fact_time lp_var_id_fact_time.resize(num_vars); for (VariableProxy var : vars) { - add_lp_variables(var.get_domain_size(), variables, - lp_var_id_fact_time[var.get_id()], - 0, num_ops, 0, use_integer_vars); + add_lp_variables( + var.get_domain_size(), variables, + lp_var_id_fact_time[var.get_id()], 0, num_ops, 0, + use_integer_vars); } } } -void DeleteRelaxationIFConstraints::create_constraints(const TaskProxy &task_proxy, - lp::LinearProgram &lp) { +void DeleteRelaxationIFConstraints::create_constraints( + const TaskProxy &task_proxy, lp::LinearProgram &lp) { LPVariables &variables = lp.get_variables(); LPConstraints &constraints = lp.get_constraints(); double infinity = lp.get_infinity(); @@ -213,7 +216,6 @@ void DeleteRelaxationIFConstraints::create_constraints(const TaskProxy &task_pro } } - void DeleteRelaxationIFConstraints::initialize_constraints( const shared_ptr &task, lp::LinearProgram &lp) { TaskProxy task_proxy(*task); @@ -221,7 +223,6 @@ void DeleteRelaxationIFConstraints::initialize_constraints( create_constraints(task_proxy, lp); } - bool DeleteRelaxationIFConstraints::update_constraints( const State &state, lp::LPSolver &lp_solver) { // Unset old bounds. @@ -231,30 +232,32 @@ bool DeleteRelaxationIFConstraints::update_constraints( last_state.clear(); // Set new bounds. for (FactProxy f : state) { - lp_solver.set_constraint_lower_bound(get_constraint_id(f.get_pair()), -1); + lp_solver.set_constraint_lower_bound( + get_constraint_id(f.get_pair()), -1); last_state.push_back(f.get_pair()); } return false; } class DeleteRelaxationIFConstraintsFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + ConstraintGenerator, DeleteRelaxationIFConstraints> { public: - DeleteRelaxationIFConstraintsFeature() : TypedFeature("delete_relaxation_if_constraints") { + DeleteRelaxationIFConstraintsFeature() + : TypedFeature("delete_relaxation_if_constraints") { document_title("Delete relaxation constraints from Imai and Fukunaga"); document_synopsis( "Operator-counting constraints based on the delete relaxation. By " "default the constraints encode an easy-to-compute relaxation of h^+^. " "With the right settings, these constraints can be used to compute the " "optimal delete-relaxation heuristic h^+^ (see example below). " - "For details, see" + utils::format_journal_reference( + "For details, see" + + utils::format_journal_reference( {"Tatsuya Imai", "Alex Fukunaga"}, "On a practical, integer-linear programming model for delete-free" "tasks and its use as a heuristic for cost-optimal planning", "https://www.jair.org/index.php/jair/article/download/10972/26119/", - "Journal of Artificial Intelligence Research", - "54", - "631-677", + "Journal of Artificial Intelligence Research", "54", "631-677", "2015")); add_option( @@ -288,8 +291,8 @@ class DeleteRelaxationIFConstraintsFeature "option {{{delete_relaxation_rr_constraints}}}.\n"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return make_shared( opts.get("use_time_vars"), opts.get("use_integer_vars")); diff --git a/src/search/operator_counting/delete_relaxation_if_constraints.h b/src/search/operator_counting/delete_relaxation_if_constraints.h index dab21c8b29..28ae825bd0 100644 --- a/src/search/operator_counting/delete_relaxation_if_constraints.h +++ b/src/search/operator_counting/delete_relaxation_if_constraints.h @@ -1,7 +1,7 @@ #ifndef OPERATOR_COUNTING_DELETE_RELAXATION_IF_CONSTRAINTS_H #define OPERATOR_COUNTING_DELETE_RELAXATION_IF_CONSTRAINTS_H -#include "constraint_generator.h" +#include "constraint_generator.h" #include "../task_proxy.h" diff --git a/src/search/operator_counting/delete_relaxation_rr_constraints.cc b/src/search/operator_counting/delete_relaxation_rr_constraints.cc index 0e305279ec..1160cd3cea 100644 --- a/src/search/operator_counting/delete_relaxation_rr_constraints.cc +++ b/src/search/operator_counting/delete_relaxation_rr_constraints.cc @@ -1,10 +1,10 @@ #include "delete_relaxation_rr_constraints.h" +#include "../task_proxy.h" + #include "../algorithms/priority_queues.h" #include "../lp/lp_solver.h" #include "../plugins/plugin.h" -#include "../algorithms/priority_queues.h" -#include "../task_proxy.h" #include "../utils/markup.h" #include @@ -91,7 +91,8 @@ class VEGraph { continue; } if (predecessor != successor) { - new_shortcuts.push_back(make_tuple(predecessor, fact, successor)); + new_shortcuts.push_back( + make_tuple(predecessor, fact, successor)); } } } @@ -152,7 +153,8 @@ class VEGraph { } } - const utils::HashSet> &get_delta() const { + const utils::HashSet> & + get_delta() const { return delta; } @@ -210,9 +212,9 @@ DeleteRelaxationRRConstraints::create_auxiliary_variables( for (int value = 0; value < num_values; ++value) { variables.emplace_back(0, 1, 0, use_integer_vars); #ifndef NDEBUG - variables.set_name(variables.size() - 1, - "f_" + var.get_name() + "_" - + var.get_fact(value).get_name()); + variables.set_name( + variables.size() - 1, + "f_" + var.get_name() + "_" + var.get_fact(value).get_name()); #endif } } @@ -225,9 +227,9 @@ DeleteRelaxationRRConstraints::create_auxiliary_variables( lp_var_ids.fpa_ids[op.get_id()][eff] = variables.size(); variables.emplace_back(0, 1, 0, use_integer_vars); #ifndef NDEBUG - variables.set_name(variables.size() - 1, - "f_" + eff_proxy.get_fact().get_name() - + "_achieved_by_" + op.get_name()); + variables.set_name( + variables.size() - 1, "f_" + eff_proxy.get_fact().get_name() + + "_achieved_by_" + op.get_name()); #endif } } @@ -235,7 +237,8 @@ DeleteRelaxationRRConstraints::create_auxiliary_variables( } void DeleteRelaxationRRConstraints::create_auxiliary_variables_ve( - const TaskProxy &task_proxy, const VEGraph &ve_graph, LPVariables &variables, + const TaskProxy &task_proxy, const VEGraph &ve_graph, + LPVariables &variables, DeleteRelaxationRRConstraints::LPVariableIDs &lp_var_ids) const { utils::unused_variable(task_proxy); // Add e_{i,j} variables. @@ -244,11 +247,13 @@ void DeleteRelaxationRRConstraints::create_auxiliary_variables_ve( variables.emplace_back(0, 1, 0, use_integer_vars); #ifndef NDEBUG auto [f1, f2] = edge; - FactProxy f1_proxy = task_proxy.get_variables()[f1.var].get_fact(f1.value); - FactProxy f2_proxy = task_proxy.get_variables()[f2.var].get_fact(f2.value); - variables.set_name(variables.size() - 1, - "e_" + f1_proxy.get_name() - + "_before_" + f2_proxy.get_name()); + FactProxy f1_proxy = + task_proxy.get_variables()[f1.var].get_fact(f1.value); + FactProxy f2_proxy = + task_proxy.get_variables()[f2.var].get_fact(f2.value); + variables.set_name( + variables.size() - 1, + "e_" + f1_proxy.get_name() + "_before_" + f2_proxy.get_name()); #endif } } @@ -268,8 +273,8 @@ void DeleteRelaxationRRConstraints::create_auxiliary_variables_tl( for (int value = 0; value < num_values; ++value) { variables.emplace_back(1, num_facts, 0, use_integer_vars); #ifndef NDEBUG - variables.set_name(variables.size() - 1, - "t_" + var.get_fact(value).get_name()); + variables.set_name( + variables.size() - 1, "t_" + var.get_fact(value).get_name()); #endif } } @@ -431,7 +436,8 @@ void DeleteRelaxationRRConstraints::create_constraints_ve( constraint only makes sense if the reverse edge is in the graph. */ for (const pair &edge : ve_graph.get_edges()) { - pair reverse_edge = make_pair(edge.second, edge.first); + pair reverse_edge = + make_pair(edge.second, edge.first); if (lp_var_ids.has_e(reverse_edge)) { lp::LPConstraint constraint(-infinity, 1); constraint.insert(lp_var_ids.id_of_e(edge), 1); @@ -503,28 +509,25 @@ void DeleteRelaxationRRConstraints::create_constraints_tl( void DeleteRelaxationRRConstraints::initialize_constraints( const shared_ptr &task, lp::LinearProgram &lp) { TaskProxy task_proxy(*task); - LPVariableIDs lp_var_ids = create_auxiliary_variables( - task_proxy, lp.get_variables()); + LPVariableIDs lp_var_ids = + create_auxiliary_variables(task_proxy, lp.get_variables()); create_constraints(task_proxy, lp_var_ids, lp); switch (acyclicity_type) { - case AcyclicityType::VERTEX_ELIMINATION: - { + case AcyclicityType::VERTEX_ELIMINATION: { VEGraph ve_graph(task_proxy); create_auxiliary_variables_ve( task_proxy, ve_graph, lp.get_variables(), lp_var_ids); create_constraints_ve(task_proxy, ve_graph, lp_var_ids, lp); break; } - case AcyclicityType::TIME_LABELS: - { + case AcyclicityType::TIME_LABELS: { create_auxiliary_variables_tl( task_proxy, lp.get_variables(), lp_var_ids); create_constraints_tl(task_proxy, lp_var_ids, lp); break; } - case AcyclicityType::NONE: - { + case AcyclicityType::NONE: { break; } default: @@ -553,8 +556,8 @@ bool DeleteRelaxationRRConstraints::update_constraints( } class DeleteRelaxationRRConstraintsFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + ConstraintGenerator, DeleteRelaxationRRConstraints> { public: DeleteRelaxationRRConstraintsFeature() : TypedFeature("delete_relaxation_rr_constraints") { @@ -618,20 +621,19 @@ class DeleteRelaxationRRConstraintsFeature static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"time_labels", - "introduces MIP variables that encode the time at which each fact is " - "reached. Acyclicity is enforced with constraints that ensure that " - "preconditions of actions are reached before their effects."}, - {"vertex_elimination", - "introduces binary variables based on vertex elimination. These " - "variables encode that one fact has to be reached before another " - "fact. Instead of adding such variables for every pair of states, " - "they are only added for a subset sufficient to ensure acyclicity. " - "Constraints enforce that preconditions of actions are reached before " - "their effects and that the assignment encodes a valid order."}, - {"none", - "No acyclicity is enforced. The resulting heuristic is a relaxation " - "of the delete-relaxation heuristic."} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"time_labels", + "introduces MIP variables that encode the time at which each fact is " + "reached. Acyclicity is enforced with constraints that ensure that " + "preconditions of actions are reached before their effects."}, + {"vertex_elimination", + "introduces binary variables based on vertex elimination. These " + "variables encode that one fact has to be reached before another " + "fact. Instead of adding such variables for every pair of states, " + "they are only added for a subset sufficient to ensure acyclicity. " + "Constraints enforce that preconditions of actions are reached before " + "their effects and that the assignment encodes a valid order."}, + {"none", + "No acyclicity is enforced. The resulting heuristic is a relaxation " + "of the delete-relaxation heuristic."}}); } diff --git a/src/search/operator_counting/delete_relaxation_rr_constraints.h b/src/search/operator_counting/delete_relaxation_rr_constraints.h index cb43921cd9..bd913661d6 100644 --- a/src/search/operator_counting/delete_relaxation_rr_constraints.h +++ b/src/search/operator_counting/delete_relaxation_rr_constraints.h @@ -1,9 +1,10 @@ #ifndef OPERATOR_COUNTING_DELETE_RELAXATION_RR_CONSTRAINTS_H #define OPERATOR_COUNTING_DELETE_RELAXATION_RR_CONSTRAINTS_H -#include "constraint_generator.h" +#include "constraint_generator.h" #include "../task_proxy.h" + #include "../utils/hash.h" #include @@ -24,7 +25,9 @@ using LPConstraints = named_vector::NamedVector; using LPVariables = named_vector::NamedVector; enum class AcyclicityType { - TIME_LABELS, VERTEX_ELIMINATION, NONE + TIME_LABELS, + VERTEX_ELIMINATION, + NONE }; class DeleteRelaxationRRConstraints : public ConstraintGenerator { @@ -82,7 +85,6 @@ class DeleteRelaxationRRConstraints : public ConstraintGenerator { this makes it faster to unset the bounds when the state changes. */ std::vector last_state; - int get_constraint_id(FactPair f) const; LPVariableIDs create_auxiliary_variables( diff --git a/src/search/operator_counting/lm_cut_constraints.cc b/src/search/operator_counting/lm_cut_constraints.cc index 5813662d98..0f0a9ad9cb 100644 --- a/src/search/operator_counting/lm_cut_constraints.cc +++ b/src/search/operator_counting/lm_cut_constraints.cc @@ -18,16 +18,14 @@ void LMCutConstraints::initialize_constraints( make_unique(task_proxy); } - -bool LMCutConstraints::update_constraints(const State &state, - lp::LPSolver &lp_solver) { +bool LMCutConstraints::update_constraints( + const State &state, lp::LPSolver &lp_solver) { assert(landmark_generator); named_vector::NamedVector constraints; double infinity = lp_solver.get_infinity(); bool dead_end = landmark_generator->compute_landmarks( - state, nullptr, - [&](const vector &op_ids, int /*cost*/) { + state, nullptr, [&](const vector &op_ids, int /*cost*/) { constraints.emplace_back(1.0, infinity); lp::LPConstraint &landmark_constraint = constraints.back(); for (int op_id : op_ids) { @@ -53,29 +51,27 @@ class LMCutConstraintsFeature "For each landmark L the constraint sum_{o in L} Count_o >= 1 is added " "to the operator-counting LP temporarily. After the heuristic value " "for the state is computed, all temporary constraints are removed " - "again. For details, see" + utils::format_conference_reference( + "again. For details, see" + + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert", "Blai Bonet"}, "LP-based Heuristics for Cost-optimal Planning", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7892/8031", "Proceedings of the Twenty-Fourth International Conference" " on Automated Planning and Scheduling (ICAPS 2014)", - "226-234", - "AAAI Press", - "2014") + utils::format_conference_reference( + "226-234", "AAAI Press", "2014") + + utils::format_conference_reference( {"Blai Bonet"}, "An admissible heuristic for SAS+ planning obtained from the" " state equation", "http://ijcai.org/papers13/Papers/IJCAI13-335.pdf", "Proceedings of the Twenty-Third International Joint" " Conference on Artificial Intelligence (IJCAI 2013)", - "2268-2274", - "AAAI Press", - "2013")); + "2268-2274", "AAAI Press", "2013")); } - virtual shared_ptr - create_component(const plugins::Options &) const override { + virtual shared_ptr create_component( + const plugins::Options &) const override { return make_shared(); } }; diff --git a/src/search/operator_counting/lm_cut_constraints.h b/src/search/operator_counting/lm_cut_constraints.h index 8634dc6fb1..ae54fe46d1 100644 --- a/src/search/operator_counting/lm_cut_constraints.h +++ b/src/search/operator_counting/lm_cut_constraints.h @@ -14,9 +14,10 @@ class LMCutConstraints : public ConstraintGenerator { std::unique_ptr landmark_generator; public: virtual void initialize_constraints( - const std::shared_ptr &task, lp::LinearProgram &lp) override; - virtual bool update_constraints(const State &state, - lp::LPSolver &lp_solver) override; + const std::shared_ptr &task, + lp::LinearProgram &lp) override; + virtual bool update_constraints( + const State &state, lp::LPSolver &lp_solver) override; }; } diff --git a/src/search/operator_counting/operator_counting_heuristic.cc b/src/search/operator_counting/operator_counting_heuristic.cc index 3c20b6964e..2fb5a27ffe 100644 --- a/src/search/operator_counting/operator_counting_heuristic.cc +++ b/src/search/operator_counting/operator_counting_heuristic.cc @@ -20,18 +20,21 @@ OperatorCountingHeuristic::OperatorCountingHeuristic( : Heuristic(transform, cache_estimates, description, verbosity), constraint_generators(constraint_generators), lp_solver(lpsolver) { - utils::verify_list_not_empty(constraint_generators, "constraint_generators"); + utils::verify_list_not_empty( + constraint_generators, "constraint_generators"); lp_solver.set_mip_gap(0); named_vector::NamedVector variables; double infinity = lp_solver.get_infinity(); for (OperatorProxy op : task_proxy.get_operators()) { int op_cost = op.get_cost(); - variables.push_back(lp::LPVariable(0, infinity, op_cost, use_integer_operator_counts)); + variables.push_back( + lp::LPVariable(0, infinity, op_cost, use_integer_operator_counts)); #ifndef NDEBUG variables.set_name(op.get_id(), op.get_name()); #endif } - lp::LinearProgram lp(lp::LPObjectiveSense::MINIMIZE, move(variables), {}, infinity); + lp::LinearProgram lp( + lp::LPObjectiveSense::MINIMIZE, move(variables), {}, infinity); for (const auto &generator : constraint_generators) { generator->initialize_constraints(task, lp); } @@ -74,16 +77,15 @@ class OperatorCountingHeuristicFeature "are guaranteed to have a solution with Count_o = occurrences(o, pi) " "for every plan pi. Minimizing the total cost of operators subject to " "some operator-counting constraints is an admissible heuristic. " - "For details, see" + utils::format_conference_reference( + "For details, see" + + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert", "Blai Bonet"}, "LP-based Heuristics for Cost-optimal Planning", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7892/8031", "Proceedings of the Twenty-Fourth International Conference" " on Automated Planning and Scheduling (ICAPS 2014)", - "226-234", - "AAAI Press", - "2014")); + "226-234", "AAAI Press", "2014")); add_list_option>( "constraint_generators", @@ -118,15 +120,14 @@ class OperatorCountingHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get_list>( "constraint_generators"), opts.get("use_integer_operator_counts"), lp::get_lp_solver_arguments_from_options(opts), - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/operator_counting/operator_counting_heuristic.h b/src/search/operator_counting/operator_counting_heuristic.h index b7d8ef2b0d..4d9a5692fd 100644 --- a/src/search/operator_counting/operator_counting_heuristic.h +++ b/src/search/operator_counting/operator_counting_heuristic.h @@ -23,11 +23,10 @@ class OperatorCountingHeuristic : public Heuristic { public: OperatorCountingHeuristic( const std::vector> - &constraint_generators, + &constraint_generators, bool use_integer_operator_counts, lp::LPSolverType lpsolver, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/operator_counting/pho_constraints.cc b/src/search/operator_counting/pho_constraints.cc index 4670f05ab1..3e874b4b76 100644 --- a/src/search/operator_counting/pho_constraints.cc +++ b/src/search/operator_counting/pho_constraints.cc @@ -1,10 +1,10 @@ #include "pho_constraints.h" #include "../lp/lp_solver.h" -#include "../plugins/plugin.h" #include "../pdbs/pattern_database.h" #include "../pdbs/pattern_generator.h" #include "../pdbs/utils.h" +#include "../plugins/plugin.h" #include "../utils/markup.h" #include @@ -34,7 +34,8 @@ void PhOConstraints::initialize_constraints( pdbs = pattern_collection_info.get_pdbs(); pattern_generator = nullptr; TaskProxy task_proxy(*task); - named_vector::NamedVector &constraints = lp.get_constraints(); + named_vector::NamedVector &constraints = + lp.get_constraints(); constraint_offset = constraints.size(); for (const shared_ptr &pdb : *pdbs) { constraints.emplace_back(0, lp.get_infinity()); @@ -47,8 +48,8 @@ void PhOConstraints::initialize_constraints( } } -bool PhOConstraints::update_constraints(const State &state, - lp::LPSolver &lp_solver) { +bool PhOConstraints::update_constraints( + const State &state, lp::LPSolver &lp_solver) { state.unpack(); for (size_t i = 0; i < pdbs->size(); ++i) { int constraint_id = constraint_offset + i; @@ -70,27 +71,23 @@ class PhOConstraintsFeature document_synopsis( "The generator will compute a PDB for each pattern and add the" " constraint h(s) <= sum_{o in relevant(h)} Count_o. For details," - " see" + utils::format_conference_reference( + " see" + + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert"}, "Getting the Most Out of Pattern Databases for Classical Planning", "http://ijcai.org/papers13/Papers/IJCAI13-347.pdf", "Proceedings of the Twenty-Third International Joint" " Conference on Artificial Intelligence (IJCAI 2013)", - "2357-2364", - "AAAI Press", - "2013")); + "2357-2364", "AAAI Press", "2013")); add_option>( - "patterns", - "pattern generation method", - "systematic(2)"); + "patterns", "pattern generation method", "systematic(2)"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - opts.get>( - "patterns")); + opts.get>("patterns")); } }; diff --git a/src/search/operator_counting/pho_constraints.h b/src/search/operator_counting/pho_constraints.h index 696bb59aba..63e8c2db81 100644 --- a/src/search/operator_counting/pho_constraints.h +++ b/src/search/operator_counting/pho_constraints.h @@ -4,7 +4,6 @@ #include "constraint_generator.h" #include "../algorithms/named_vector.h" - #include "../pdbs/types.h" #include @@ -28,7 +27,8 @@ class PhOConstraints : public ConstraintGenerator { const std::shared_ptr &patterns); virtual void initialize_constraints( - const std::shared_ptr &task, lp::LinearProgram &lp) override; + const std::shared_ptr &task, + lp::LinearProgram &lp) override; virtual bool update_constraints( const State &state, lp::LPSolver &lp_solver) override; }; diff --git a/src/search/operator_counting/state_equation_constraints.cc b/src/search/operator_counting/state_equation_constraints.cc index 877056d8b7..81112fcfbf 100644 --- a/src/search/operator_counting/state_equation_constraints.cc +++ b/src/search/operator_counting/state_equation_constraints.cc @@ -11,14 +11,12 @@ using namespace std; namespace operator_counting { -StateEquationConstraints::StateEquationConstraints( - utils::Verbosity verbosity) +StateEquationConstraints::StateEquationConstraints(utils::Verbosity verbosity) : log(utils::get_log_for_verbosity(verbosity)) { } -static void add_indices_to_constraint(lp::LPConstraint &constraint, - const set &indices, - double coefficient) { +static void add_indices_to_constraint( + lp::LPConstraint &constraint, const set &indices, double coefficient) { for (int index : indices) { constraint.insert(index, coefficient); } @@ -62,8 +60,10 @@ void StateEquationConstraints::add_constraints( for (Proposition &prop : var_propositions) { lp::LPConstraint constraint(-infinity, infinity); add_indices_to_constraint(constraint, prop.always_produced_by, 1.0); - add_indices_to_constraint(constraint, prop.sometimes_produced_by, 1.0); - add_indices_to_constraint(constraint, prop.always_consumed_by, -1.0); + add_indices_to_constraint( + constraint, prop.sometimes_produced_by, 1.0); + add_indices_to_constraint( + constraint, prop.always_consumed_by, -1.0); if (!constraint.empty()) { prop.constraint_index = constraints.size(); constraints.push_back(constraint); @@ -91,8 +91,8 @@ void StateEquationConstraints::initialize_constraints( } } -bool StateEquationConstraints::update_constraints(const State &state, - lp::LPSolver &lp_solver) { +bool StateEquationConstraints::update_constraints( + const State &state, lp::LPSolver &lp_solver) { // Compute the bounds for the rows in the LP. for (size_t var = 0; var < propositions.size(); ++var) { int num_values = propositions[var].size(); @@ -120,54 +120,52 @@ bool StateEquationConstraints::update_constraints(const State &state, } class StateEquationConstraintsFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + ConstraintGenerator, StateEquationConstraints> { public: - StateEquationConstraintsFeature() : TypedFeature("state_equation_constraints") { + StateEquationConstraintsFeature() + : TypedFeature("state_equation_constraints") { document_title("State equation constraints"); document_synopsis( "For each fact, a permanent constraint is added that considers the net " "change of the fact, i.e., the total number of times the fact is added " "minus the total number of times is removed. The bounds of each " "constraint depend on the current state and the goal state and are " - "updated in each state. For details, see" + utils::format_conference_reference( + "updated in each state. For details, see" + + utils::format_conference_reference( {"Menkes van den Briel", "J. Benton", "Subbarao Kambhampati", "Thomas Vossen"}, "An LP-based heuristic for optimal planning", "http://link.springer.com/chapter/10.1007/978-3-540-74970-7_46", "Proceedings of the Thirteenth International Conference on" " Principles and Practice of Constraint Programming (CP 2007)", - "651-665", - "Springer-Verlag", - "2007") + utils::format_conference_reference( + "651-665", "Springer-Verlag", "2007") + + utils::format_conference_reference( {"Blai Bonet"}, "An admissible heuristic for SAS+ planning obtained from the" " state equation", "http://ijcai.org/papers13/Papers/IJCAI13-335.pdf", "Proceedings of the Twenty-Third International Joint" " Conference on Artificial Intelligence (IJCAI 2013)", - "2268-2274", - "AAAI Press", - "2013") + utils::format_conference_reference( + "2268-2274", "AAAI Press", "2013") + + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert", "Blai Bonet"}, "LP-based Heuristics for Cost-optimal Planning", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7892/8031", "Proceedings of the Twenty-Fourth International Conference" " on Automated Planning and Scheduling (ICAPS 2014)", - "226-234", - "AAAI Press", - "2014")); + "226-234", "AAAI Press", "2014")); utils::add_log_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( utils::get_log_arguments_from_options(opts)); } }; - static plugins::FeaturePlugin _plugin; } diff --git a/src/search/operator_counting/state_equation_constraints.h b/src/search/operator_counting/state_equation_constraints.h index 7d2c42e72b..f7e8024e04 100644 --- a/src/search/operator_counting/state_equation_constraints.h +++ b/src/search/operator_counting/state_equation_constraints.h @@ -38,12 +38,16 @@ class StateEquationConstraints : public ConstraintGenerator { std::vector goal_state; void build_propositions(const TaskProxy &task_proxy); - void add_constraints(named_vector::NamedVector &constraints, double infinity); + void add_constraints( + named_vector::NamedVector &constraints, + double infinity); public: explicit StateEquationConstraints(utils::Verbosity verbosity); virtual void initialize_constraints( - const std::shared_ptr &task, lp::LinearProgram &lp) override; - virtual bool update_constraints(const State &state, lp::LPSolver &lp_solver) override; + const std::shared_ptr &task, + lp::LinearProgram &lp) override; + virtual bool update_constraints( + const State &state, lp::LPSolver &lp_solver) override; }; } diff --git a/src/search/operator_id.h b/src/search/operator_id.h index beb520ac5a..9cf6c12501 100644 --- a/src/search/operator_id.h +++ b/src/search/operator_id.h @@ -27,8 +27,7 @@ class OperatorID { int index; public: - explicit OperatorID(int index) - : index(index) { + explicit OperatorID(int index) : index(index) { } static const OperatorID no_operator; diff --git a/src/search/parser/abstract_syntax_tree.cc b/src/search/parser/abstract_syntax_tree.cc index dbdfaee4d4..9c30ccebfa 100644 --- a/src/search/parser/abstract_syntax_tree.cc +++ b/src/search/parser/abstract_syntax_tree.cc @@ -27,8 +27,10 @@ class DecorateContext : public utils::Context { void add_variable(const string &name, const plugins::Type &type) { if (has_variable(name)) - error("Variable '" + name + "' is already defined in the " - "current scope. Shadowing variables is not supported."); + error( + "Variable '" + name + + "' is already defined in the " + "current scope. Shadowing variables is not supported."); variables.insert({name, &type}); } @@ -45,7 +47,9 @@ class DecorateContext : public utils::Context { return *variables[name]; } - const plugins::Registry &get_registry() const {return registry;} + const plugins::Registry &get_registry() const { + return registry; + } }; template @@ -69,9 +73,11 @@ DecoratedASTNodePtr ASTNode::decorate() const { return decorate(context); } -LetNode::LetNode(const string &variable_name, ASTNodePtr variable_definition, - ASTNodePtr nested_value) - : variable_name(variable_name), variable_definition(move(variable_definition)), +LetNode::LetNode( + const string &variable_name, ASTNodePtr variable_definition, + ASTNodePtr nested_value) + : variable_name(variable_name), + variable_definition(move(variable_definition)), nested_value(move(nested_value)) { } @@ -96,7 +102,8 @@ DecoratedASTNodePtr LetNode::decorate(DecorateContext &context) const { context.remove_variable(variable_name); } return make_unique( - variable_name, move(decorated_definition), move(decorated_nested_value)); + variable_name, move(decorated_definition), + move(decorated_nested_value)); } void LetNode::dump(string indent) const { @@ -115,16 +122,19 @@ const plugins::Type &LetNode::get_type(DecorateContext &context) const { return nested_type; } -FunctionCallNode::FunctionCallNode(const string &name, - vector &&positional_arguments, - unordered_map &&keyword_arguments, - const string &unparsed_config) - : name(name), positional_arguments(move(positional_arguments)), - keyword_arguments(move(keyword_arguments)), unparsed_config(unparsed_config) { +FunctionCallNode::FunctionCallNode( + const string &name, vector &&positional_arguments, + unordered_map &&keyword_arguments, + const string &unparsed_config) + : name(name), + positional_arguments(move(positional_arguments)), + keyword_arguments(move(keyword_arguments)), + unparsed_config(unparsed_config) { } static DecoratedASTNodePtr decorate_and_convert( - const ASTNode &node, const plugins::Type &target_type, DecorateContext &context) { + const ASTNode &node, const plugins::Type &target_type, + DecorateContext &context) { const plugins::Type &node_type = node.get_type(context); DecoratedASTNodePtr decorated_node = node.decorate(context); @@ -135,9 +145,8 @@ static DecoratedASTNodePtr decorate_and_convert( move(decorated_node), node_type, target_type); } else { ostringstream message; - message << "Cannot convert from type '" - << node_type.name() << "' to type '" << target_type.name() - << "'" << endl; + message << "Cannot convert from type '" << node_type.name() + << "' to type '" << target_type.name() << "'" << endl; context.error(message.str()); } } @@ -152,28 +161,30 @@ bool FunctionCallNode::collect_argument( return false; } - DecoratedASTNodePtr decorated_arg = decorate_and_convert( - arg, arg_info.type, context); + DecoratedASTNodePtr decorated_arg = + decorate_and_convert(arg, arg_info.type, context); if (arg_info.bounds.has_bound()) { DecoratedASTNodePtr decorated_min_node; { utils::TraceBlock block(context, "Handling lower bound"); ASTNodePtr min_node = parse_ast_node(arg_info.bounds.min, context); - decorated_min_node = decorate_and_convert( - *min_node, arg_info.type, context); + decorated_min_node = + decorate_and_convert(*min_node, arg_info.type, context); } DecoratedASTNodePtr decorated_max_node; { utils::TraceBlock block(context, "Handling upper bound"); ASTNodePtr max_node = parse_ast_node(arg_info.bounds.max, context); - decorated_max_node = decorate_and_convert( - *max_node, arg_info.type, context); + decorated_max_node = + decorate_and_convert(*max_node, arg_info.type, context); } decorated_arg = make_unique( - move(decorated_arg), move(decorated_min_node), move(decorated_max_node)); + move(decorated_arg), move(decorated_min_node), + move(decorated_max_node)); } - FunctionArgument function_arg(key, move(decorated_arg), arg_info.lazy_construction); + FunctionArgument function_arg( + key, move(decorated_arg), arg_info.lazy_construction); arguments.insert({key, move(function_arg)}); return true; } @@ -190,7 +201,8 @@ void FunctionCallNode::collect_keyword_arguments( for (const auto &key_and_arg : keyword_arguments) { const string &key = key_and_arg.first; const ASTNode &arg = *key_and_arg.second; - utils::TraceBlock block(context, "Checking the keyword argument '" + key + "'."); + utils::TraceBlock block( + context, "Checking the keyword argument '" + key + "'."); if (!argument_infos_by_key.count(key)) { vector valid_keys = get_keys(argument_infos_by_key); ostringstream message; @@ -202,14 +214,14 @@ void FunctionCallNode::collect_keyword_arguments( const plugins::ArgumentInfo &arg_info = argument_infos_by_key.at(key); bool success = collect_argument(arg, arg_info, context, arguments); if (!success) { - ABORT("Multiple keyword definitions using the same key '" - + key + "'. This should be impossible here because we " - + "sort by key earlier."); + ABORT( + "Multiple keyword definitions using the same key '" + key + + "'. This should be impossible here because we " + + "sort by key earlier."); } } } - /* This function has to be called *AFTER* collect_keyword_arguments. */ void FunctionCallNode::collect_positional_arguments( const vector &argument_infos, @@ -220,7 +232,7 @@ void FunctionCallNode::collect_positional_arguments( if (num_pos_args + num_kwargs > static_cast(argument_infos.size())) { vector allowed_keys; allowed_keys.reserve(argument_infos.size()); - for (const auto &arg_info: argument_infos) { + for (const auto &arg_info : argument_infos) { allowed_keys.push_back(arg_info.key); } vector given_positional_keys; @@ -248,9 +260,8 @@ void FunctionCallNode::collect_positional_arguments( const ASTNode &arg = *positional_arguments[i]; const plugins::ArgumentInfo &arg_info = argument_infos[i]; utils::TraceBlock block( - context, - "Checking the " + to_string(i + 1) + - ". positional argument (" + arg_info.key + ")"); + context, "Checking the " + to_string(i + 1) + + ". positional argument (" + arg_info.key + ")"); bool success = collect_argument(arg, arg_info, context, arguments); if (!success) { ostringstream message; @@ -270,17 +281,20 @@ void FunctionCallNode::collect_default_values( for (const plugins::ArgumentInfo &arg_info : argument_infos) { const string &key = arg_info.key; if (!arguments.count(key)) { - utils::TraceBlock block(context, "Checking the default for argument '" + key + "'."); + utils::TraceBlock block( + context, "Checking the default for argument '" + key + "'."); if (arg_info.has_default()) { ASTNodePtr arg; { utils::TraceBlock block(context, "Parsing default value"); arg = parse_ast_node(arg_info.default_value, context); } - bool success = collect_argument(*arg, arg_info, context, arguments); + bool success = + collect_argument(*arg, arg_info, context, arguments); if (!success) { - ABORT("Default argument for '" + key + "' set although " - + "value for keyword exists. This should be impossible."); + ABORT( + "Default argument for '" + key + "' set although " + + "value for keyword exists. This should be impossible."); } } else if (!arg_info.is_optional()) { context.error("Missing argument is mandatory!"); @@ -296,7 +310,8 @@ DecoratedASTNodePtr FunctionCallNode::decorate(DecorateContext &context) const { context.error("Plugin '" + name + "' is not defined."); } shared_ptr feature = registry.get_feature(name); - const vector &argument_infos = feature->get_arguments(); + const vector &argument_infos = + feature->get_arguments(); CollectedArguments arguments_by_key; collect_keyword_arguments(argument_infos, context, arguments_by_key); @@ -307,8 +322,8 @@ DecoratedASTNodePtr FunctionCallNode::decorate(DecorateContext &context) const { for (auto &key_and_arg : arguments_by_key) { arguments.push_back(move(key_and_arg.second)); } - return make_unique(feature, move(arguments), - unparsed_config); + return make_unique( + feature, move(arguments), unparsed_config); } void FunctionCallNode::dump(string indent) const { @@ -325,24 +340,27 @@ void FunctionCallNode::dump(string indent) const { } } -const plugins::Type &FunctionCallNode::get_type(DecorateContext &context) const { +const plugins::Type &FunctionCallNode::get_type( + DecorateContext &context) const { const plugins::Registry ®istry = context.get_registry(); if (!registry.has_feature(name)) { - context.error("No feature defined for FunctionCallNode '" + name + "'."); + context.error( + "No feature defined for FunctionCallNode '" + name + "'."); } - const shared_ptr &feature = registry.get_feature(name); + const shared_ptr &feature = + registry.get_feature(name); return feature->get_type(); } -ListNode::ListNode(vector &&elements) - : elements(move(elements)) { +ListNode::ListNode(vector &&elements) : elements(move(elements)) { } DecoratedASTNodePtr ListNode::decorate(DecorateContext &context) const { utils::TraceBlock block(context, "Checking list"); vector decorated_elements; if (!elements.empty()) { - const plugins::Type *common_element_type = get_common_element_type(context); + const plugins::Type *common_element_type = + get_common_element_type(context); if (!common_element_type) { vector element_type_names; element_type_names.reserve(elements.size()); @@ -350,17 +368,21 @@ DecoratedASTNodePtr ListNode::decorate(DecorateContext &context) const { const plugins::Type &element_type = element->get_type(context); element_type_names.push_back(element_type.name()); } - context.error("List contains elements of different types: [" - + utils::join(element_type_names, ", ") + "]."); + context.error( + "List contains elements of different types: [" + + utils::join(element_type_names, ", ") + "]."); } for (size_t i = 0; i < elements.size(); i++) { - utils::TraceBlock block(context, "Checking " + to_string(i) + ". element"); + utils::TraceBlock block( + context, "Checking " + to_string(i) + ". element"); const plugins::Type &element_type = elements[i]->get_type(context); - DecoratedASTNodePtr decorated_element_node = elements[i]->decorate(context); + DecoratedASTNodePtr decorated_element_node = + elements[i]->decorate(context); if (element_type != *common_element_type) { assert(element_type.can_convert_into(*common_element_type)); decorated_element_node = make_unique( - move(decorated_element_node), element_type, *common_element_type); + move(decorated_element_node), element_type, + *common_element_type); } decorated_elements.push_back(move(decorated_element_node)); } @@ -381,9 +403,9 @@ const plugins::Type *ListNode::get_common_element_type( const plugins::Type *common_element_type = nullptr; for (const ASTNodePtr &element : elements) { const plugins::Type *element_type = &element->get_type(context); - if ((!common_element_type) - || (!element_type->can_convert_into(*common_element_type) && - common_element_type->can_convert_into(*element_type))) { + if ((!common_element_type) || + (!element_type->can_convert_into(*common_element_type) && + common_element_type->can_convert_into(*element_type))) { common_element_type = element_type; } else if (!element_type->can_convert_into(*common_element_type)) { return nullptr; @@ -399,12 +421,12 @@ const plugins::Type &ListNode::get_type(DecorateContext &context) const { const plugins::Type *element_type = get_common_element_type(context); if (!element_type) context.error("List elements cannot be converted to common type."); - return plugins::TypeRegistry::instance()->create_list_type(*element_type); + return plugins::TypeRegistry::instance()->create_list_type( + *element_type); } } -LiteralNode::LiteralNode(const Token &value) - : value(value) { +LiteralNode::LiteralNode(const Token &value) : value(value) { } DecoratedASTNodePtr LiteralNode::decorate(DecorateContext &context) const { @@ -429,14 +451,15 @@ DecoratedASTNodePtr LiteralNode::decorate(DecorateContext &context) const { case TokenType::IDENTIFIER: return make_unique(value.content); default: - ABORT("LiteralNode has unexpected token type '" + - token_type_name(value.type) + "'."); + ABORT( + "LiteralNode has unexpected token type '" + + token_type_name(value.type) + "'."); } } void LiteralNode::dump(string indent) const { - cout << indent << token_type_name(value.type) << ": " - << value.content << endl; + cout << indent << token_type_name(value.type) << ": " << value.content + << endl; } const plugins::Type &LiteralNode::get_type(DecorateContext &context) const { @@ -455,8 +478,9 @@ const plugins::Type &LiteralNode::get_type(DecorateContext &context) const { } return plugins::TypeRegistry::SYMBOL_TYPE; default: - ABORT("LiteralNode has unexpected token type '" + - token_type_name(value.type) + "'."); + ABORT( + "LiteralNode has unexpected token type '" + + token_type_name(value.type) + "'."); } } } diff --git a/src/search/parser/abstract_syntax_tree.h b/src/search/parser/abstract_syntax_tree.h index 97b35c90f0..7bdf6a8fb2 100644 --- a/src/search/parser/abstract_syntax_tree.h +++ b/src/search/parser/abstract_syntax_tree.h @@ -35,8 +35,9 @@ class LetNode : public ASTNode { ASTNodePtr variable_definition; ASTNodePtr nested_value; public: - LetNode(const std::string &variable_name, ASTNodePtr variable_definition, - ASTNodePtr nested_value); + LetNode( + const std::string &variable_name, ASTNodePtr variable_definition, + ASTNodePtr nested_value); DecoratedASTNodePtr decorate(DecorateContext &context) const override; void dump(std::string indent) const override; const plugins::Type &get_type(DecorateContext &context) const override; @@ -48,29 +49,25 @@ class FunctionCallNode : public ASTNode { std::unordered_map keyword_arguments; std::string unparsed_config; - using CollectedArguments = std::unordered_map; + using CollectedArguments = + std::unordered_map; bool collect_argument( - const ASTNode &arg, - const plugins::ArgumentInfo &arg_info, - DecorateContext &context, - CollectedArguments &arguments) const; + const ASTNode &arg, const plugins::ArgumentInfo &arg_info, + DecorateContext &context, CollectedArguments &arguments) const; void collect_keyword_arguments( const std::vector &argument_infos, - DecorateContext &context, - CollectedArguments &arguments) const; + DecorateContext &context, CollectedArguments &arguments) const; void collect_positional_arguments( const std::vector &argument_infos, - DecorateContext &context, - CollectedArguments &arguments) const; + DecorateContext &context, CollectedArguments &arguments) const; void collect_default_values( const std::vector &argument_infos, - DecorateContext &context, - CollectedArguments &arguments) const; + DecorateContext &context, CollectedArguments &arguments) const; public: - FunctionCallNode(const std::string &name, - std::vector &&positional_arguments, - std::unordered_map &&keyword_arguments, - const std::string &unparsed_config); + FunctionCallNode( + const std::string &name, std::vector &&positional_arguments, + std::unordered_map &&keyword_arguments, + const std::string &unparsed_config); DecoratedASTNodePtr decorate(DecorateContext &context) const override; void dump(std::string indent) const override; const plugins::Type &get_type(DecorateContext &context) const override; @@ -82,7 +79,8 @@ class ListNode : public ASTNode { explicit ListNode(std::vector &&elements); DecoratedASTNodePtr decorate(DecorateContext &context) const override; void dump(std::string indent) const override; - const plugins::Type *get_common_element_type(DecorateContext &context) const; + const plugins::Type *get_common_element_type( + DecorateContext &context) const; const plugins::Type &get_type(DecorateContext &context) const override; }; diff --git a/src/search/parser/decorated_abstract_syntax_tree.cc b/src/search/parser/decorated_abstract_syntax_tree.cc index 552091f631..9a014c38de 100644 --- a/src/search/parser/decorated_abstract_syntax_tree.cc +++ b/src/search/parser/decorated_abstract_syntax_tree.cc @@ -10,7 +10,8 @@ using namespace std; namespace parser { -void ConstructContext::set_variable(const string &name, const plugins::Any &value) { +void ConstructContext::set_variable( + const string &name, const plugins::Any &value) { variables[name] = value; } @@ -27,7 +28,8 @@ plugins::Any ConstructContext::get_variable(const string &name) const { return variable; } -LazyValue::LazyValue(const DecoratedASTNode &node, const ConstructContext &context) +LazyValue::LazyValue( + const DecoratedASTNode &node, const ConstructContext &context) : context(context), node(node.clone()) { } @@ -55,9 +57,9 @@ vector LazyValue::construct_lazy_list() { elements.reserve(list_node->get_elements().size()); int elem = 1; for (const DecoratedASTNodePtr &element : list_node->get_elements()) { - utils::TraceBlock(context, - "Create LazyValue for " + to_string(elem) + - ". list element"); + utils::TraceBlock( + context, + "Create LazyValue for " + to_string(elem) + ". list element"); elements.emplace_back(LazyValue(*element, context)); elem++; } @@ -70,8 +72,8 @@ plugins::Any DecoratedASTNode::construct() const { return construct(context); } -FunctionArgument::FunctionArgument(const string &key, DecoratedASTNodePtr value, - bool lazy_construction) +FunctionArgument::FunctionArgument( + const string &key, DecoratedASTNodePtr value, bool lazy_construction) : key(key), value(move(value)), lazy_construction(lazy_construction) { } @@ -93,8 +95,7 @@ bool FunctionArgument::is_lazily_constructed() const { } DecoratedLetNode::DecoratedLetNode( - const string &variable_name, - DecoratedASTNodePtr variable_definition, + const string &variable_name, DecoratedASTNodePtr variable_definition, DecoratedASTNodePtr nested_value) : variable_name(variable_name), variable_definition(move(variable_definition)), @@ -105,7 +106,8 @@ plugins::Any DecoratedLetNode::construct(ConstructContext &context) const { utils::TraceBlock block(context, "Constructing let-expression"); plugins::Any variable_value; { - utils::TraceBlock block(context, "Constructing variable '" + variable_name + "'"); + utils::TraceBlock block( + context, "Constructing variable '" + variable_name + "'"); variable_value = variable_definition->construct(context); } plugins::Any result; @@ -127,18 +129,23 @@ void DecoratedLetNode::dump(string indent) const { } DecoratedFunctionCallNode::DecoratedFunctionCallNode( - const shared_ptr &feature, vector &&arguments, - const string &unparsed_config) - : feature(feature), arguments(move(arguments)), unparsed_config(unparsed_config) { -} - -plugins::Any DecoratedFunctionCallNode::construct(ConstructContext &context) const { - utils::TraceBlock block(context, "Constructing feature '" + feature->get_key() + "': " + - unparsed_config); + const shared_ptr &feature, + vector &&arguments, const string &unparsed_config) + : feature(feature), + arguments(move(arguments)), + unparsed_config(unparsed_config) { +} + +plugins::Any DecoratedFunctionCallNode::construct( + ConstructContext &context) const { + utils::TraceBlock block( + context, "Constructing feature '" + feature->get_key() + + "': " + unparsed_config); plugins::Options opts; opts.set_unparsed_config(unparsed_config); for (const FunctionArgument &arg : arguments) { - utils::TraceBlock block(context, "Constructing argument '" + arg.get_key() + "'"); + utils::TraceBlock block( + context, "Constructing argument '" + arg.get_key() + "'"); if (arg.is_lazily_constructed()) { opts.set(arg.get_key(), LazyValue(arg.get_value(), context)); } else { @@ -149,8 +156,8 @@ plugins::Any DecoratedFunctionCallNode::construct(ConstructContext &context) con } void DecoratedFunctionCallNode::dump(string indent) const { - cout << indent << "FUNC:" << feature->get_title() - << " (returns " << feature->get_type().name() << ")" << endl; + cout << indent << "FUNC:" << feature->get_title() << " (returns " + << feature->get_type().name() << ")" << endl; indent = "| " + indent; cout << indent << "ARGUMENTS:" << endl; for (const FunctionArgument &arg : arguments) { @@ -167,7 +174,8 @@ plugins::Any DecoratedListNode::construct(ConstructContext &context) const { vector result; int i = 0; for (const DecoratedASTNodePtr &element : elements) { - utils::TraceBlock block(context, "Constructing element " + to_string(i)); + utils::TraceBlock block( + context, "Constructing element " + to_string(i)); result.push_back(element->construct(context)); ++i; } @@ -182,8 +190,7 @@ void DecoratedListNode::dump(string indent) const { } } -VariableNode::VariableNode(const string &name) - : name(name) { +VariableNode::VariableNode(const string &name) : name(name) { } plugins::Any VariableNode::construct(ConstructContext &context) const { @@ -198,17 +205,19 @@ void VariableNode::dump(string indent) const { cout << indent << "VAR: " << name << endl; } -BoolLiteralNode::BoolLiteralNode(const string &value) - : value(value) { +BoolLiteralNode::BoolLiteralNode(const string &value) : value(value) { } plugins::Any BoolLiteralNode::construct(ConstructContext &context) const { - utils::TraceBlock block(context, "Constructing bool value from '" + value + "'"); + utils::TraceBlock block( + context, "Constructing bool value from '" + value + "'"); istringstream stream(value); bool x; if ((stream >> boolalpha >> x).fail()) { - ABORT("Could not parse bool constant '" + value + "'" - " (this should have been caught before constructing this node)."); + ABORT( + "Could not parse bool constant '" + value + + "'" + " (this should have been caught before constructing this node)."); } return x; } @@ -217,12 +226,12 @@ void BoolLiteralNode::dump(string indent) const { cout << indent << "BOOL: " << value << endl; } -StringLiteralNode::StringLiteralNode(const string &value) - : value(value) { +StringLiteralNode::StringLiteralNode(const string &value) : value(value) { } plugins::Any StringLiteralNode::construct(ConstructContext &context) const { - utils::TraceBlock block(context, "Constructing string value from '" + value + "'"); + utils::TraceBlock block( + context, "Constructing string value from '" + value + "'"); if (!(value.starts_with('"') && value.ends_with('"'))) { ABORT("String literal value is not enclosed in quotation marks" " (this should have been caught before constructing this node)."); @@ -257,15 +266,17 @@ void StringLiteralNode::dump(string indent) const { cout << indent << "STRING: " << value << endl; } -IntLiteralNode::IntLiteralNode(const string &value) - : value(value) { +IntLiteralNode::IntLiteralNode(const string &value) : value(value) { } plugins::Any IntLiteralNode::construct(ConstructContext &context) const { - utils::TraceBlock block(context, "Constructing int value from '" + value + "'"); + utils::TraceBlock block( + context, "Constructing int value from '" + value + "'"); if (value.empty()) { - ABORT("Empty value in int constant '" + value + "'" - " (this should have been caught before constructing this node)."); + ABORT( + "Empty value in int constant '" + value + + "'" + " (this should have been caught before constructing this node)."); } else if (value == "infinity") { return numeric_limits::max(); } @@ -282,8 +293,10 @@ plugins::Any IntLiteralNode::construct(ConstructContext &context) const { } else if (suffix == 'g') { factor = 1000000000; } else { - ABORT("Invalid suffix in int constant '" + value + "'" - " (this should have been caught before constructing this node)."); + ABORT( + "Invalid suffix in int constant '" + value + + "'" + " (this should have been caught before constructing this node)."); } prefix.pop_back(); } @@ -292,8 +305,10 @@ plugins::Any IntLiteralNode::construct(ConstructContext &context) const { int x; stream >> noskipws >> x; if (stream.fail() || !stream.eof()) { - ABORT("Could not parse int constant '" + value + "'" - " (this should have been caught before constructing this node)."); + ABORT( + "Could not parse int constant '" + value + + "'" + " (this should have been caught before constructing this node)."); } int min_int = numeric_limits::min(); @@ -310,12 +325,12 @@ void IntLiteralNode::dump(string indent) const { cout << indent << "INT: " << value << endl; } -FloatLiteralNode::FloatLiteralNode(const string &value) - : value(value) { +FloatLiteralNode::FloatLiteralNode(const string &value) : value(value) { } plugins::Any FloatLiteralNode::construct(ConstructContext &context) const { - utils::TraceBlock block(context, "Constructing float value from '" + value + "'"); + utils::TraceBlock block( + context, "Constructing float value from '" + value + "'"); if (value == "infinity") { return numeric_limits::infinity(); } else { @@ -323,8 +338,10 @@ plugins::Any FloatLiteralNode::construct(ConstructContext &context) const { double x; stream >> noskipws >> x; if (stream.fail() || !stream.eof()) { - ABORT("Could not parse double constant '" + value + "'" - " (this should have been caught before constructing this node)."); + ABORT( + "Could not parse double constant '" + value + + "'" + " (this should have been caught before constructing this node)."); } return x; } @@ -334,8 +351,7 @@ void FloatLiteralNode::dump(string indent) const { cout << indent << "FLOAT: " << value << endl; } -SymbolNode::SymbolNode(const string &value) - : value(value) { +SymbolNode::SymbolNode(const string &value) : value(value) { } plugins::Any SymbolNode::construct(ConstructContext &) const { @@ -353,7 +369,8 @@ ConvertNode::ConvertNode( } plugins::Any ConvertNode::construct(ConstructContext &context) const { - utils::TraceBlock block(context, "Constructing value that requires conversion"); + utils::TraceBlock block( + context, "Constructing value that requires conversion"); plugins::Any constructed_value; { utils::TraceBlock block( @@ -362,28 +379,33 @@ plugins::Any ConvertNode::construct(ConstructContext &context) const { } plugins::Any converted_value; { - utils::TraceBlock block(context, "Converting constructed value from '" + from_type.name() + - "' to '" + to_type.name() + "'"); - converted_value = plugins::convert(constructed_value, from_type, - to_type, context); + utils::TraceBlock block( + context, "Converting constructed value from '" + from_type.name() + + "' to '" + to_type.name() + "'"); + converted_value = + plugins::convert(constructed_value, from_type, to_type, context); } return converted_value; } void ConvertNode::dump(string indent) const { - cout << indent << "CONVERT: " - << from_type.name() << " to " << to_type.name() << endl; + cout << indent << "CONVERT: " << from_type.name() << " to " + << to_type.name() << endl; value->dump("| " + indent); } CheckBoundsNode::CheckBoundsNode( - DecoratedASTNodePtr value, DecoratedASTNodePtr min_value, DecoratedASTNodePtr max_value) - : value(move(value)), min_value(move(min_value)), max_value(move(max_value)) { + DecoratedASTNodePtr value, DecoratedASTNodePtr min_value, + DecoratedASTNodePtr max_value) + : value(move(value)), + min_value(move(min_value)), + max_value(move(max_value)) { } template -static bool satisfies_bounds(const plugins::Any &v_, const plugins::Any &min_, - const plugins::Any &max_) { +static bool satisfies_bounds( + const plugins::Any &v_, const plugins::Any &min_, + const plugins::Any &max_) { T v = plugins::any_cast(v_); T min = plugins::any_cast(min_); T max = plugins::any_cast(max_); @@ -411,10 +433,11 @@ plugins::Any CheckBoundsNode::construct(ConstructContext &context) const { utils::TraceBlock block(context, "Checking bounds"); const type_info &type = v.type(); if (min.type() != type || max.type() != type) { - ABORT("Types of bounds (" + - string(min.type().name()) + ", " + max.type().name() + - ") do not match type of value (" + type.name() + ")" + - " (this should have been caught before constructing this node)."); + ABORT( + "Types of bounds (" + string(min.type().name()) + ", " + + max.type().name() + ") do not match type of value (" + + type.name() + ")" + + " (this should have been caught before constructing this node)."); } bool bounds_satisfied = true; @@ -423,7 +446,8 @@ plugins::Any CheckBoundsNode::construct(ConstructContext &context) const { } else if (type == typeid(double)) { bounds_satisfied = satisfies_bounds(v, min, max); } else { - ABORT("Bounds are only supported for arguments of type int or double."); + ABORT( + "Bounds are only supported for arguments of type int or double."); } if (!bounds_satisfied) { context.error("Value is not in bounds."); @@ -439,9 +463,11 @@ void CheckBoundsNode::dump(string indent) const { max_value->dump("| " + indent); } -// We are keeping all copy functionality together because it should be removed soon. +// We are keeping all copy functionality together because it should be removed +// soon. FunctionArgument::FunctionArgument(const FunctionArgument &other) - : key(other.key), value(other.value->clone()), + : key(other.key), + value(other.value->clone()), lazy_construction(other.lazy_construction) { } @@ -461,7 +487,8 @@ unique_ptr DecoratedLetNode::clone() const { DecoratedFunctionCallNode::DecoratedFunctionCallNode( const DecoratedFunctionCallNode &other) - : feature(other.feature), arguments(other.arguments), + : feature(other.feature), + arguments(other.arguments), unparsed_config(other.unparsed_config) { } @@ -488,8 +515,7 @@ shared_ptr DecoratedListNode::clone_shared() const { return make_shared(*this); } -VariableNode::VariableNode(const VariableNode &other) - : name(other.name) { +VariableNode::VariableNode(const VariableNode &other) : name(other.name) { } unique_ptr VariableNode::clone() const { @@ -548,8 +574,7 @@ shared_ptr FloatLiteralNode::clone_shared() const { return make_shared(*this); } -SymbolNode::SymbolNode(const SymbolNode &other) - : value(other.value) { +SymbolNode::SymbolNode(const SymbolNode &other) : value(other.value) { } unique_ptr SymbolNode::clone() const { @@ -561,7 +586,8 @@ shared_ptr SymbolNode::clone_shared() const { } ConvertNode::ConvertNode(const ConvertNode &other) - : value(other.value->clone()), from_type(other.from_type), + : value(other.value->clone()), + from_type(other.from_type), to_type(other.to_type) { } @@ -574,7 +600,8 @@ shared_ptr ConvertNode::clone_shared() const { } CheckBoundsNode::CheckBoundsNode(const CheckBoundsNode &other) - : value(other.value->clone()), min_value(other.min_value->clone()), + : value(other.value->clone()), + min_value(other.min_value->clone()), max_value(other.max_value->clone()) { } diff --git a/src/search/parser/decorated_abstract_syntax_tree.h b/src/search/parser/decorated_abstract_syntax_tree.h index d7304213da..83e5cb5492 100644 --- a/src/search/parser/decorated_abstract_syntax_tree.h +++ b/src/search/parser/decorated_abstract_syntax_tree.h @@ -14,7 +14,8 @@ class Options; } namespace parser { -// TODO: if we can get rid of lazy values, this class could be moved to the cc file. +// TODO: if we can get rid of lazy values, this class could be moved to the cc +// file. class ConstructContext : public utils::Context { std::unordered_map variables; public: @@ -31,7 +32,8 @@ class DecoratedASTNode { virtual plugins::Any construct(ConstructContext &context) const = 0; virtual void dump(std::string indent = "+") const = 0; - // TODO: This is here only for the iterated search. Once we switch to builders, we won't need it any more. + // TODO: This is here only for the iterated search. Once we switch to + // builders, we won't need it any more. virtual std::unique_ptr clone() const = 0; virtual std::shared_ptr clone_shared() const = 0; }; @@ -59,17 +61,20 @@ class FunctionArgument { std::string key; DecoratedASTNodePtr value; - // TODO: This is here only for the iterated search. Once we switch to builders, we won't need it any more. + // TODO: This is here only for the iterated search. Once we switch to + // builders, we won't need it any more. bool lazy_construction; public: - FunctionArgument(const std::string &key, DecoratedASTNodePtr value, - bool lazy_construction); + FunctionArgument( + const std::string &key, DecoratedASTNodePtr value, + bool lazy_construction); std::string get_key() const; const DecoratedASTNode &get_value() const; void dump(const std::string &indent) const; - // TODO: This is here only for the iterated search. Once we switch to builders, we won't need it any more. + // TODO: This is here only for the iterated search. Once we switch to + // builders, we won't need it any more. bool is_lazily_constructed() const; FunctionArgument(const FunctionArgument &other); }; @@ -87,7 +92,8 @@ class DecoratedLetNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; DecoratedLetNode(const DecoratedLetNode &other); @@ -106,7 +112,8 @@ class DecoratedFunctionCallNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; DecoratedFunctionCallNode(const DecoratedFunctionCallNode &other); @@ -120,7 +127,8 @@ class DecoratedListNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; DecoratedListNode(const DecoratedListNode &other); @@ -137,7 +145,8 @@ class VariableNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; VariableNode(const VariableNode &other); @@ -151,7 +160,8 @@ class BoolLiteralNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; BoolLiteralNode(const BoolLiteralNode &other); @@ -165,7 +175,8 @@ class StringLiteralNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; StringLiteralNode(const StringLiteralNode &other); @@ -179,7 +190,8 @@ class IntLiteralNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; IntLiteralNode(const IntLiteralNode &other); @@ -193,7 +205,8 @@ class FloatLiteralNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; FloatLiteralNode(const FloatLiteralNode &other); @@ -207,7 +220,8 @@ class SymbolNode : public DecoratedASTNode { plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; SymbolNode(const SymbolNode &other); @@ -218,14 +232,15 @@ class ConvertNode : public DecoratedASTNode { const plugins::Type &from_type; const plugins::Type &to_type; public: - ConvertNode(DecoratedASTNodePtr value, - const plugins::Type &from_type, - const plugins::Type &to_type); + ConvertNode( + DecoratedASTNodePtr value, const plugins::Type &from_type, + const plugins::Type &to_type); plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; ConvertNode(const ConvertNode &other); @@ -236,13 +251,15 @@ class CheckBoundsNode : public DecoratedASTNode { DecoratedASTNodePtr min_value; DecoratedASTNodePtr max_value; public: - CheckBoundsNode(DecoratedASTNodePtr value, DecoratedASTNodePtr min_value, - DecoratedASTNodePtr max_value); + CheckBoundsNode( + DecoratedASTNodePtr value, DecoratedASTNodePtr min_value, + DecoratedASTNodePtr max_value); plugins::Any construct(ConstructContext &context) const override; void dump(std::string indent) const override; - // TODO: once we get rid of lazy construction, this should no longer be necessary. + // TODO: once we get rid of lazy construction, this should no longer be + // necessary. virtual std::unique_ptr clone() const override; virtual std::shared_ptr clone_shared() const override; CheckBoundsNode(const CheckBoundsNode &other); diff --git a/src/search/parser/lexical_analyzer.cc b/src/search/parser/lexical_analyzer.cc index 9812fd3e77..a1e5498e0c 100644 --- a/src/search/parser/lexical_analyzer.cc +++ b/src/search/parser/lexical_analyzer.cc @@ -12,8 +12,9 @@ using namespace std; namespace parser { static regex build_regex(const string &s) { - return regex("^\\s*(" + s + ")\\s*", - regex_constants::ECMAScript | regex_constants::icase); + return regex( + "^\\s*(" + s + ")\\s*", + regex_constants::ECMAScript | regex_constants::icase); } static vector> construct_token_type_expressions() { @@ -40,19 +41,20 @@ static vector> construct_token_type_expressions() { 'infinity', 'true', 'false', and 'let') from being recognized as identifiers. */ - {TokenType::IDENTIFIER, R"([a-zA-Z_]\w*)"} - }; + {TokenType::IDENTIFIER, R"([a-zA-Z_]\w*)"}}; vector> token_type_expression; token_type_expression.reserve(token_type_str_expression.size()); for (const auto &pair : token_type_str_expression) { - token_type_expression.emplace_back(pair.first, build_regex(pair.second)); + token_type_expression.emplace_back( + pair.first, build_regex(pair.second)); } return token_type_expression; } static const vector> token_type_expressions = construct_token_type_expressions(); -static string highlight_position(const string &text, string::const_iterator pos) { +static string highlight_position( + const string &text, string::const_iterator pos) { ostringstream message_stream; int distance_to_highlight = pos - text.begin(); for (const string &line : utils::split(text, "\n")) { @@ -86,7 +88,9 @@ TokenStream split_tokens(const string &text) { for (const auto &type_and_expression : token_type_expressions) { TokenType token_type = type_and_expression.first; const regex &expression = type_and_expression.second; - if (regex_search(start, end, match, expression, regex_constants::match_continuous)) { + if (regex_search( + start, end, match, expression, + regex_constants::match_continuous)) { tokens.push_back({match[1], token_type}); start += match[0].length(); has_match = true; @@ -94,8 +98,9 @@ TokenStream split_tokens(const string &text) { } } if (!has_match) { - context.error("Unable to recognize next token:\n" + - highlight_position(text, start)); + context.error( + "Unable to recognize next token:\n" + + highlight_position(text, start)); } } return TokenStream(move(tokens)); diff --git a/src/search/parser/syntax_analyzer.cc b/src/search/parser/syntax_analyzer.cc index c4a4eaa133..61bd06a5cb 100644 --- a/src/search/parser/syntax_analyzer.cc +++ b/src/search/parser/syntax_analyzer.cc @@ -16,11 +16,11 @@ class SyntaxAnalyzerContext : public utils::Context { public: SyntaxAnalyzerContext(TokenStream &tokens, int lookahead) - : tokens(tokens), - lookahead(lookahead) { + : tokens(tokens), lookahead(lookahead) { } - virtual string decorate_block_name(const string &block_name) const override { + virtual string decorate_block_name( + const string &block_name) const override { ostringstream decorated_block_name; int pos = tokens.get_position(); decorated_block_name << block_name << ": " @@ -33,29 +33,33 @@ class SyntaxAnalyzerContext : public utils::Context { virtual void error(const string &message) const override { ostringstream message_with_tokens; string all_tokens = tokens.str(0, tokens.size()); - string remaining_tokens = tokens.str(tokens.get_position(), tokens.size()); - message_with_tokens << all_tokens << endl - << string(all_tokens.size() - remaining_tokens.size(), ' ') - << "^" << endl - << message; + string remaining_tokens = + tokens.str(tokens.get_position(), tokens.size()); + message_with_tokens + << all_tokens << endl + << string(all_tokens.size() - remaining_tokens.size(), ' ') << "^" + << endl + << message; throw utils::ContextError(str() + "\n\n" + message_with_tokens.str()); } }; static ASTNodePtr parse_node(TokenStream &tokens, SyntaxAnalyzerContext &); -static void parse_argument(TokenStream &tokens, - vector &positional_arguments, - unordered_map &keyword_arguments, - SyntaxAnalyzerContext &context) { - if (tokens.has_tokens(2) - && tokens.peek(context, 0).type == TokenType::IDENTIFIER - && tokens.peek(context, 1).type == TokenType::EQUALS) { +static void parse_argument( + TokenStream &tokens, vector &positional_arguments, + unordered_map &keyword_arguments, + SyntaxAnalyzerContext &context) { + if (tokens.has_tokens(2) && + tokens.peek(context, 0).type == TokenType::IDENTIFIER && + tokens.peek(context, 1).type == TokenType::EQUALS) { string argument_name = tokens.pop(context).content; tokens.pop(context, TokenType::EQUALS); if (keyword_arguments.count(argument_name)) { - context.error("Multiple definitions of the same keyword " - "argument '" + argument_name + "'."); + context.error( + "Multiple definitions of the same keyword " + "argument '" + + argument_name + "'."); } keyword_arguments[argument_name] = parse_node(tokens, context); } else { @@ -67,7 +71,8 @@ static void parse_argument(TokenStream &tokens, } } -static ASTNodePtr parse_let(TokenStream &tokens, SyntaxAnalyzerContext &context) { +static ASTNodePtr parse_let( + TokenStream &tokens, SyntaxAnalyzerContext &context) { utils::TraceBlock block(context, "Parsing Let"); tokens.pop(context, TokenType::LET); tokens.pop(context, TokenType::OPENING_PARENTHESIS); @@ -86,7 +91,8 @@ static ASTNodePtr parse_let(TokenStream &tokens, SyntaxAnalyzerContext &context) variable_definition = parse_node(tokens, context); } { - utils::TraceBlock block(context, "Parsing comma after variable definition."); + utils::TraceBlock block( + context, "Parsing comma after variable definition."); tokens.pop(context, TokenType::COMMA); } ASTNodePtr nested_value; @@ -106,11 +112,14 @@ static void parse_sequence( int num_argument = 1; while (tokens.peek(context).type != terminal_token) { { - utils::TraceBlock block(context, "Parsing " + to_string(num_argument) + ". argument"); + utils::TraceBlock block( + context, "Parsing " + to_string(num_argument) + ". argument"); func(); } { - utils::TraceBlock block(context, "Parsing token after " + to_string(num_argument) + ". argument"); + utils::TraceBlock block( + context, "Parsing token after " + to_string(num_argument) + + ". argument"); TokenType next_type = tokens.peek(context).type; if (next_type == terminal_token) { return; @@ -131,8 +140,8 @@ static void parse_sequence( } } -static ASTNodePtr parse_function(TokenStream &tokens, - SyntaxAnalyzerContext &context) { +static ASTNodePtr parse_function( + TokenStream &tokens, SyntaxAnalyzerContext &context) { int initial_token_stream_index = tokens.get_position(); utils::TraceBlock block(context, "Parsing plugin"); string plugin_name; @@ -147,25 +156,26 @@ static ASTNodePtr parse_function(TokenStream &tokens, { utils::TraceBlock block(context, "Parsing plugin arguments"); auto callback = [&]() -> void { - parse_argument(tokens, positional_arguments, keyword_arguments, context); - }; - parse_sequence(tokens, context, TokenType::CLOSING_PARENTHESIS, callback); + parse_argument( + tokens, positional_arguments, keyword_arguments, context); + }; + parse_sequence( + tokens, context, TokenType::CLOSING_PARENTHESIS, callback); } tokens.pop(context, TokenType::CLOSING_PARENTHESIS); - string unparsed_config = tokens.str(initial_token_stream_index, tokens.get_position()); + string unparsed_config = + tokens.str(initial_token_stream_index, tokens.get_position()); return make_unique( - plugin_name, move(positional_arguments), move(keyword_arguments), unparsed_config); + plugin_name, move(positional_arguments), move(keyword_arguments), + unparsed_config); } -static unordered_set literal_tokens { - TokenType::BOOLEAN, - TokenType::STRING, - TokenType::INTEGER, - TokenType::FLOAT, - TokenType::IDENTIFIER -}; +static unordered_set literal_tokens{ + TokenType::BOOLEAN, TokenType::STRING, TokenType::INTEGER, TokenType::FLOAT, + TokenType::IDENTIFIER}; -static ASTNodePtr parse_literal(TokenStream &tokens, SyntaxAnalyzerContext &context) { +static ASTNodePtr parse_literal( + TokenStream &tokens, SyntaxAnalyzerContext &context) { utils::TraceBlock block(context, "Parsing Literal"); Token token = tokens.pop(context); if (!literal_tokens.count(token.type)) { @@ -176,15 +186,16 @@ static ASTNodePtr parse_literal(TokenStream &tokens, SyntaxAnalyzerContext &cont return make_unique(token); } -static ASTNodePtr parse_list(TokenStream &tokens, SyntaxAnalyzerContext &context) { +static ASTNodePtr parse_list( + TokenStream &tokens, SyntaxAnalyzerContext &context) { utils::TraceBlock block(context, "Parsing List"); tokens.pop(context, TokenType::OPENING_BRACKET); vector elements; { utils::TraceBlock block(context, "Parsing list arguments"); auto callback = [&]() -> void { - elements.push_back(parse_node(tokens, context)); - }; + elements.push_back(parse_node(tokens, context)); + }; parse_sequence(tokens, context, TokenType::CLOSING_BRACKET, callback); } tokens.pop(context, TokenType::CLOSING_BRACKET); @@ -192,17 +203,17 @@ static ASTNodePtr parse_list(TokenStream &tokens, SyntaxAnalyzerContext &context } static vector parse_node_token_types = { - TokenType::OPENING_BRACKET, TokenType::LET, TokenType::BOOLEAN, - TokenType::STRING, TokenType::INTEGER, TokenType::FLOAT, + TokenType::OPENING_BRACKET, TokenType::LET, TokenType::BOOLEAN, + TokenType::STRING, TokenType::INTEGER, TokenType::FLOAT, TokenType::IDENTIFIER}; -static ASTNodePtr parse_node(TokenStream &tokens, - SyntaxAnalyzerContext &context) { +static ASTNodePtr parse_node( + TokenStream &tokens, SyntaxAnalyzerContext &context) { utils::TraceBlock block(context, "Identify node type"); Token token = tokens.peek(context); - if (find(parse_node_token_types.begin(), - parse_node_token_types.end(), - token.type) == parse_node_token_types.end()) { + if (find( + parse_node_token_types.begin(), parse_node_token_types.end(), + token.type) == parse_node_token_types.end()) { ostringstream message; message << "Unexpected token '" << token << "'. Expected any of the following token types: " @@ -221,8 +232,8 @@ static ASTNodePtr parse_node(TokenStream &tokens, case TokenType::FLOAT: return parse_literal(tokens, context); case TokenType::IDENTIFIER: - if (tokens.has_tokens(2) - && tokens.peek(context, 1).type == TokenType::OPENING_PARENTHESIS) { + if (tokens.has_tokens(2) && + tokens.peek(context, 1).type == TokenType::OPENING_PARENTHESIS) { return parse_function(tokens, context); } else { return parse_literal(tokens, context); diff --git a/src/search/parser/token_stream.cc b/src/search/parser/token_stream.cc index b2147f5a9b..03e156a399 100644 --- a/src/search/parser/token_stream.cc +++ b/src/search/parser/token_stream.cc @@ -12,10 +12,8 @@ using namespace std; namespace parser { static string case_insensitive_to_lower(const string &content, TokenType type) { - if (type == TokenType::BOOLEAN || - type == TokenType::INTEGER || - type == TokenType::FLOAT || - type == TokenType::IDENTIFIER) { + if (type == TokenType::BOOLEAN || type == TokenType::INTEGER || + type == TokenType::FLOAT || type == TokenType::IDENTIFIER) { return utils::tolower(content); } else { return content; diff --git a/src/search/pdbs/abstract_operator.cc b/src/search/pdbs/abstract_operator.cc index 7a5f47074e..44c231a128 100644 --- a/src/search/pdbs/abstract_operator.cc +++ b/src/search/pdbs/abstract_operator.cc @@ -8,9 +8,7 @@ using namespace std; namespace pdbs { AbstractOperator::AbstractOperator( - int concrete_op_id, - int cost, - vector &®ression_preconditions, + int concrete_op_id, int cost, vector &®ression_preconditions, int hash_effect) : concrete_op_id(concrete_op_id), cost(cost), @@ -18,9 +16,9 @@ AbstractOperator::AbstractOperator( hash_effect(hash_effect) { } -void AbstractOperator::dump(const Pattern &pattern, - const VariablesProxy &variables, - utils::LogProxy &log) const { +void AbstractOperator::dump( + const Pattern &pattern, const VariablesProxy &variables, + utils::LogProxy &log) const { if (log.is_at_least_debug()) { log << "AbstractOperator:" << endl; log << "Preconditions:" << endl; diff --git a/src/search/pdbs/abstract_operator.h b/src/search/pdbs/abstract_operator.h index d8eba45c2a..b90773e12f 100644 --- a/src/search/pdbs/abstract_operator.h +++ b/src/search/pdbs/abstract_operator.h @@ -40,10 +40,8 @@ class AbstractOperator { int hash_effect; public: AbstractOperator( - int concrete_op_id, - int cost, - std::vector &®ression_preconditions, - int hash_effect); + int concrete_op_id, int cost, + std::vector &®ression_preconditions, int hash_effect); /* Returns variable value pairs which represent the preconditions of @@ -73,9 +71,9 @@ class AbstractOperator { return cost; } - void dump(const Pattern &pattern, - const VariablesProxy &variables, - utils::LogProxy &log) const; + void dump( + const Pattern &pattern, const VariablesProxy &variables, + utils::LogProxy &log) const; }; } diff --git a/src/search/pdbs/canonical_pdbs_heuristic.cc b/src/search/pdbs/canonical_pdbs_heuristic.cc index 5ba5bcda40..70d3d4d10f 100644 --- a/src/search/pdbs/canonical_pdbs_heuristic.cc +++ b/src/search/pdbs/canonical_pdbs_heuristic.cc @@ -47,12 +47,8 @@ static CanonicalPDBs get_canonical_pdbs( and the pattern cliques. */ prune_dominated_cliques( - *patterns, - *pdbs, - *pattern_cliques, - num_variables, - max_time_dominance_pruning, - log); + *patterns, *pdbs, *pattern_cliques, num_variables, + max_time_dominance_pruning, log); } dump_pattern_collection_generation_statistics( @@ -67,8 +63,7 @@ CanonicalPDBsHeuristic::CanonicalPDBsHeuristic( const string &description, utils::Verbosity verbosity) : Heuristic(transform, cache_estimates, description, verbosity), canonical_pdbs( - get_canonical_pdbs( - task, patterns, max_time_dominance_pruning, log)) { + get_canonical_pdbs(task, patterns, max_time_dominance_pruning, log)) { } int CanonicalPDBsHeuristic::compute_heuristic(const State &ancestor_state) { @@ -88,8 +83,7 @@ void add_canonical_pdbs_options_to_feature(plugins::Feature &feature) { "turns off dominance pruning. Dominance pruning excludes patterns " "and additive subsets that will never contribute to the heuristic " "value because there are dominating subsets in the collection.", - "infinity", - plugins::Bounds("0.0", "infinity")); + "infinity", plugins::Bounds("0.0", "infinity")); } tuple get_canonical_pdbs_arguments_from_options( @@ -112,9 +106,7 @@ class CanonicalPDBsHeuristicFeature "for a given state."); add_option>( - "patterns", - "pattern generation method", - "systematic(1)"); + "patterns", "pattern generation method", "systematic(1)"); add_canonical_pdbs_options_to_feature(*this); add_heuristic_options_to_feature(*this, "cpdbs"); @@ -128,14 +120,12 @@ class CanonicalPDBsHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - opts.get>( - "patterns"), + opts.get>("patterns"), get_canonical_pdbs_arguments_from_options(opts), - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/canonical_pdbs_heuristic.h b/src/search/pdbs/canonical_pdbs_heuristic.h index e796b42a32..b56db75272 100644 --- a/src/search/pdbs/canonical_pdbs_heuristic.h +++ b/src/search/pdbs/canonical_pdbs_heuristic.h @@ -22,9 +22,8 @@ class CanonicalPDBsHeuristic : public Heuristic { CanonicalPDBsHeuristic( const std::shared_ptr &patterns, double max_time_dominance_pruning, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; void add_canonical_pdbs_options_to_feature(plugins::Feature &feature); diff --git a/src/search/pdbs/cegar.cc b/src/search/pdbs/cegar.cc index 0c2c5e179c..49c6579ec4 100644 --- a/src/search/pdbs/cegar.cc +++ b/src/search/pdbs/cegar.cc @@ -35,12 +35,12 @@ class PatternInfo { public: PatternInfo( const shared_ptr &&pdb, - const vector> &&plan, - bool unsolvable) + const vector> &&plan, bool unsolvable) : pdb(move(pdb)), plan(move(plan)), unsolvable(unsolvable), - solved(false) {} + solved(false) { + } const shared_ptr &get_pdb() const { return pdb; @@ -72,8 +72,7 @@ struct Flaw { int variable; Flaw(int collection_index, int variable) - : collection_index(collection_index), - variable(variable) { + : collection_index(collection_index), variable(variable) { } }; @@ -111,8 +110,7 @@ class CEGAR { violated preconditions. */ FlawList get_violated_preconditions( - int collection_index, - const OperatorProxy &op, + int collection_index, const OperatorProxy &op, const vector ¤t_state) const; /* Try to apply the plan of the pattern at the given index in the @@ -147,27 +145,19 @@ class CEGAR { void refine(const FlawList &flaws); public: CEGAR( - int max_pdb_size, - int max_collection_size, - double max_time, - bool use_wildcard_plans, - utils::LogProxy &log, + int max_pdb_size, int max_collection_size, double max_time, + bool use_wildcard_plans, utils::LogProxy &log, const shared_ptr &rng, - const shared_ptr &task, - const vector &goals, + const shared_ptr &task, const vector &goals, unordered_set &&blacklisted_variables = unordered_set()); PatternCollectionInformation compute_pattern_collection(); }; CEGAR::CEGAR( - int max_pdb_size, - int max_collection_size, - double max_time, - bool use_wildcard_plans, - utils::LogProxy &log, + int max_pdb_size, int max_collection_size, double max_time, + bool use_wildcard_plans, utils::LogProxy &log, const shared_ptr &rng, - const shared_ptr &task, - const vector &goals, + const shared_ptr &task, const vector &goals, unordered_set &&blacklisted_variables) : max_pdb_size(max_pdb_size), max_collection_size(max_collection_size), @@ -213,8 +203,7 @@ void CEGAR::print_collection() const { } } -bool CEGAR::time_limit_reached( - const utils::CountdownTimer &timer) const { +bool CEGAR::time_limit_reached(const utils::CountdownTimer &timer) const { if (timer.is_expired()) { if (log.is_at_least_normal()) { log << "CEGAR time limit reached" << endl; @@ -232,7 +221,8 @@ unique_ptr CEGAR::compute_pattern_info(Pattern &&pattern) const { bool unsolvable = false; State initial_state = task_proxy.get_initial_state(); initial_state.unpack(); - if (pdb->get_value(initial_state.get_unpacked_values()) == numeric_limits::max()) { + if (pdb->get_value(initial_state.get_unpacked_values()) == + numeric_limits::max()) { unsolvable = true; if (log.is_at_least_verbose()) { log << "projection onto pattern " << pdb->get_pattern() @@ -240,7 +230,8 @@ unique_ptr CEGAR::compute_pattern_info(Pattern &&pattern) const { } } else { if (log.is_at_least_verbose()) { - log << "##### Plan for pattern " << pdb->get_pattern() << " #####" << endl; + log << "##### Plan for pattern " << pdb->get_pattern() << " #####" + << endl; int step = 1; for (const vector &equivalent_ops : plan) { log << "step #" << step << endl; @@ -284,8 +275,7 @@ static void apply_op_to_state(vector &state, const OperatorProxy &op) { } FlawList CEGAR::get_violated_preconditions( - int collection_index, - const OperatorProxy &op, + int collection_index, const OperatorProxy &op, const vector ¤t_state) const { FlawList flaws; for (FactProxy precondition : op.get_preconditions()) { @@ -303,18 +293,20 @@ FlawList CEGAR::get_violated_preconditions( return flaws; } -FlawList CEGAR::apply_plan(int collection_index, vector ¤t_state) const { +FlawList CEGAR::apply_plan( + int collection_index, vector ¤t_state) const { PatternInfo &pattern_info = *pattern_collection[collection_index]; const vector> &plan = pattern_info.get_plan(); if (log.is_at_least_verbose()) { - log << "executing plan for pattern " - << pattern_info.get_pattern() << ": "; + log << "executing plan for pattern " << pattern_info.get_pattern() + << ": "; } for (const vector &equivalent_ops : plan) { FlawList step_flaws; for (OperatorID op_id : equivalent_ops) { OperatorProxy op = task_proxy.get_operators()[op_id]; - FlawList operator_flaws = get_violated_preconditions(collection_index, op, current_state); + FlawList operator_flaws = + get_violated_preconditions(collection_index, op, current_state); /* If the operator is applicable, clear step_flaws, update the state @@ -327,9 +319,10 @@ FlawList CEGAR::apply_plan(int collection_index, vector ¤t_state) con apply_op_to_state(current_state, op); break; } else { - step_flaws.insert(step_flaws.end(), - make_move_iterator(operator_flaws.begin()), - make_move_iterator(operator_flaws.end())); + step_flaws.insert( + step_flaws.end(), + make_move_iterator(operator_flaws.begin()), + make_move_iterator(operator_flaws.end())); } } @@ -370,13 +363,15 @@ bool CEGAR::get_flaws_for_pattern( if (blacklisted_variables.empty()) { if (log.is_at_least_verbose()) { log << "there are no blacklisted variables, " - "task solved." << endl; + "task solved." + << endl; } return true; } else { if (log.is_at_least_verbose()) { log << "there are blacklisted variables, " - "marking pattern as solved." << endl; + "marking pattern as solved." + << endl; } pattern_info.mark_as_solved(); } @@ -400,14 +395,16 @@ bool CEGAR::get_flaws_for_pattern( } else { if (log.is_at_least_verbose()) { log << "there are no non-blacklisted goal variables " - "left, marking pattern as solved." << endl; + "left, marking pattern as solved." + << endl; } pattern_info.mark_as_solved(); } } } else { - flaws.insert(flaws.end(), make_move_iterator(new_flaws.begin()), - make_move_iterator(new_flaws.end())); + flaws.insert( + flaws.end(), make_move_iterator(new_flaws.begin()), + make_move_iterator(new_flaws.end())); } return false; } @@ -464,7 +461,8 @@ void CEGAR::merge_patterns(int index1, int index2) { int pdb_size2 = pattern_collection[index2]->get_pdb()->get_size(); // Compute merged_pattern_info pattern. - unique_ptr merged_pattern_info = compute_pattern_info(move(new_pattern)); + unique_ptr merged_pattern_info = + compute_pattern_info(move(new_pattern)); // Update collection size. collection_size -= pdb_size1; @@ -493,7 +491,8 @@ void CEGAR::add_variable_to_pattern(int collection_index, int var) { new_pattern.push_back(var); sort(new_pattern.begin(), new_pattern.end()); - unique_ptr new_pattern_info = compute_pattern_info(move(new_pattern)); + unique_ptr new_pattern_info = + compute_pattern_info(move(new_pattern)); collection_size -= pattern_info.get_pdb()->get_size(); collection_size += new_pattern_info->get_pdb()->get_size(); @@ -535,8 +534,7 @@ void CEGAR::refine(const FlawList &flaws) { } else { // Variable is not yet in the collection. if (log.is_at_least_verbose()) { - log << "var" << var - << " is not in the collection yet" << endl; + log << "var" << var << " is not in the collection yet" << endl; } if (can_add_variable_to_pattern(collection_index, var)) { if (log.is_at_least_verbose()) { @@ -595,8 +593,7 @@ PatternCollectionInformation CEGAR::compute_pattern_collection() { if (concrete_solution_index != -1) { if (log.is_at_least_normal()) { - log << "task solved during computation of abstraction" - << endl; + log << "task solved during computation of abstraction" << endl; } break; } @@ -633,7 +630,8 @@ PatternCollectionInformation CEGAR::compute_pattern_collection() { } else { for (const unique_ptr &pattern_info : pattern_collection) { if (pattern_info) { - const shared_ptr &pdb = pattern_info->get_pdb(); + const shared_ptr &pdb = + pattern_info->get_pdb(); patterns->push_back(pdb->get_pattern()); pdbs->push_back(pdb); } @@ -647,9 +645,7 @@ PatternCollectionInformation CEGAR::compute_pattern_collection() { if (log.is_at_least_normal()) { log << "CEGAR number of iterations: " << iteration << endl; dump_pattern_collection_generation_statistics( - "CEGAR", - timer.get_elapsed_time(), - pattern_collection_information, + "CEGAR", timer.get_elapsed_time(), pattern_collection_information, log); } @@ -657,52 +653,32 @@ PatternCollectionInformation CEGAR::compute_pattern_collection() { } PatternCollectionInformation generate_pattern_collection_with_cegar( - int max_pdb_size, - int max_collection_size, - double max_time, - bool use_wildcard_plans, - utils::LogProxy &log, + int max_pdb_size, int max_collection_size, double max_time, + bool use_wildcard_plans, utils::LogProxy &log, const shared_ptr &rng, - const shared_ptr &task, - const vector &goals, + const shared_ptr &task, const vector &goals, unordered_set &&blacklisted_variables) { CEGAR cegar( - max_pdb_size, - max_collection_size, - max_time, - use_wildcard_plans, - log, - rng, - task, - goals, - move(blacklisted_variables)); + max_pdb_size, max_collection_size, max_time, use_wildcard_plans, log, + rng, task, goals, move(blacklisted_variables)); return cegar.compute_pattern_collection(); } PatternInformation generate_pattern_with_cegar( - int max_pdb_size, - double max_time, - bool use_wildcard_plans, - utils::LogProxy &log, - const shared_ptr &rng, - const shared_ptr &task, - const FactPair &goal, + int max_pdb_size, double max_time, bool use_wildcard_plans, + utils::LogProxy &log, const shared_ptr &rng, + const shared_ptr &task, const FactPair &goal, unordered_set &&blacklisted_variables) { vector goals = {goal}; CEGAR cegar( - max_pdb_size, - max_pdb_size, - max_time, - use_wildcard_plans, - log, - rng, - task, - goals, - move(blacklisted_variables)); - PatternCollectionInformation collection_info = cegar.compute_pattern_collection(); + max_pdb_size, max_pdb_size, max_time, use_wildcard_plans, log, rng, + task, goals, move(blacklisted_variables)); + PatternCollectionInformation collection_info = + cegar.compute_pattern_collection(); shared_ptr new_patterns = collection_info.get_patterns(); if (new_patterns->size() > 1) { - cerr << "CEGAR limited to one goal computed more than one pattern" << endl; + cerr << "CEGAR limited to one goal computed more than one pattern" + << endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } diff --git a/src/search/pdbs/cegar.h b/src/search/pdbs/cegar.h index 88178481ac..982afa67f8 100644 --- a/src/search/pdbs/cegar.h +++ b/src/search/pdbs/cegar.h @@ -37,15 +37,13 @@ namespace pdbs { inducing the same abstract transition. */ extern PatternCollectionInformation generate_pattern_collection_with_cegar( - int max_pdb_size, - int max_collection_size, - double max_time, - bool use_wildcard_plans, - utils::LogProxy &log, + int max_pdb_size, int max_collection_size, double max_time, + bool use_wildcard_plans, utils::LogProxy &log, const std::shared_ptr &rng, const std::shared_ptr &task, const std::vector &goals, - std::unordered_set &&blacklisted_variables = std::unordered_set()); + std::unordered_set &&blacklisted_variables = + std::unordered_set()); /* This function implements the CEGAR algorithm as described above, however @@ -53,16 +51,15 @@ extern PatternCollectionInformation generate_pattern_collection_with_cegar( pattern instead of a pattern collection. */ extern PatternInformation generate_pattern_with_cegar( - int max_pdb_size, - double max_time, - bool use_wildcard_plans, + int max_pdb_size, double max_time, bool use_wildcard_plans, utils::LogProxy &log, const std::shared_ptr &rng, - const std::shared_ptr &task, - const FactPair &goal, - std::unordered_set &&blacklisted_variables = std::unordered_set()); + const std::shared_ptr &task, const FactPair &goal, + std::unordered_set &&blacklisted_variables = + std::unordered_set()); -extern void add_cegar_implementation_notes_to_feature(plugins::Feature &feature); +extern void add_cegar_implementation_notes_to_feature( + plugins::Feature &feature); extern void add_cegar_wildcard_option_to_feature(plugins::Feature &feature); std::tuple get_cegar_wildcard_arguments_from_options( const plugins::Options &opts); diff --git a/src/search/pdbs/dominance_pruning.cc b/src/search/pdbs/dominance_pruning.cc index ff61e9458d..213e40bfb3 100644 --- a/src/search/pdbs/dominance_pruning.cc +++ b/src/search/pdbs/dominance_pruning.cc @@ -108,8 +108,7 @@ class Pruner { public: Pruner( const PatternCollection &patterns, - const vector &pattern_cliques, - int num_variables) + const vector &pattern_cliques, int num_variables) : patterns(patterns), pattern_cliques(pattern_cliques), num_variables(num_variables) { @@ -141,7 +140,8 @@ class Pruner { information we collected so far. */ if (log.is_at_least_normal()) { - log << "Time limit reached. Abort dominance pruning." << endl; + log << "Time limit reached. Abort dominance pruning." + << endl; } break; } @@ -152,11 +152,8 @@ class Pruner { }; void prune_dominated_cliques( - PatternCollection &patterns, - PDBCollection &pdbs, - vector &pattern_cliques, - int num_variables, - double max_time, + PatternCollection &patterns, PDBCollection &pdbs, + vector &pattern_cliques, int num_variables, double max_time, utils::LogProxy &log) { if (log.is_at_least_normal()) { log << "Running dominance pruning..." << endl; @@ -166,10 +163,8 @@ void prune_dominated_cliques( int num_patterns = patterns.size(); int num_cliques = pattern_cliques.size(); - vector pruned = Pruner( - patterns, - pattern_cliques, - num_variables).get_pruned_cliques(timer, log); + vector pruned = Pruner(patterns, pattern_cliques, num_variables) + .get_pruned_cliques(timer, log); vector remaining_pattern_cliques; vector is_remaining_pattern(num_patterns, false); @@ -192,7 +187,8 @@ void prune_dominated_cliques( remaining_patterns.reserve(num_remaining_patterns); remaining_pdbs.reserve(num_remaining_patterns); vector old_to_new_pattern_id(num_patterns, -1); - for (PatternID old_pattern_id = 0; old_pattern_id < num_patterns; ++old_pattern_id) { + for (PatternID old_pattern_id = 0; old_pattern_id < num_patterns; + ++old_pattern_id) { if (is_remaining_pattern[old_pattern_id]) { PatternID new_pattern_id = remaining_patterns.size(); old_to_new_pattern_id[old_pattern_id] = new_pattern_id; diff --git a/src/search/pdbs/dominance_pruning.h b/src/search/pdbs/dominance_pruning.h index 569a49533b..7bf2fb77a3 100644 --- a/src/search/pdbs/dominance_pruning.h +++ b/src/search/pdbs/dominance_pruning.h @@ -14,12 +14,9 @@ namespace pdbs { p_superset is a superset of p_subset. */ extern void prune_dominated_cliques( - PatternCollection &patterns, - PDBCollection &pdbs, - std::vector &pattern_cliques, - int num_variables, - double max_time, - utils::LogProxy &log); + PatternCollection &patterns, PDBCollection &pdbs, + std::vector &pattern_cliques, int num_variables, + double max_time, utils::LogProxy &log); } #endif diff --git a/src/search/pdbs/incremental_canonical_pdbs.cc b/src/search/pdbs/incremental_canonical_pdbs.cc index d6b9f10ef6..2132e8a62a 100644 --- a/src/search/pdbs/incremental_canonical_pdbs.cc +++ b/src/search/pdbs/incremental_canonical_pdbs.cc @@ -12,8 +12,8 @@ namespace pdbs { IncrementalCanonicalPDBs::IncrementalCanonicalPDBs( const TaskProxy &task_proxy, const PatternCollection &intitial_patterns) : task_proxy(task_proxy), - patterns(make_shared(intitial_patterns.begin(), - intitial_patterns.end())), + patterns(make_shared( + intitial_patterns.begin(), intitial_patterns.end())), pattern_databases(make_shared()), pattern_cliques(nullptr), size(0) { @@ -37,8 +37,7 @@ void IncrementalCanonicalPDBs::add_pdb(const shared_ptr &pdb) { } void IncrementalCanonicalPDBs::recompute_pattern_cliques() { - pattern_cliques = compute_pattern_cliques(*patterns, - are_additive); + pattern_cliques = compute_pattern_cliques(*patterns, are_additive); } vector IncrementalCanonicalPDBs::get_pattern_cliques( @@ -55,7 +54,8 @@ int IncrementalCanonicalPDBs::get_value(const State &state) const { bool IncrementalCanonicalPDBs::is_dead_end(const State &state) const { state.unpack(); for (const shared_ptr &pdb : *pattern_databases) - if (pdb->get_value(state.get_unpacked_values()) == numeric_limits::max()) + if (pdb->get_value(state.get_unpacked_values()) == + numeric_limits::max()) return true; return false; } diff --git a/src/search/pdbs/incremental_canonical_pdbs.h b/src/search/pdbs/incremental_canonical_pdbs.h index ed4e288c94..e0a6cb9759 100644 --- a/src/search/pdbs/incremental_canonical_pdbs.h +++ b/src/search/pdbs/incremental_canonical_pdbs.h @@ -28,8 +28,9 @@ class IncrementalCanonicalPDBs { void recompute_pattern_cliques(); public: - IncrementalCanonicalPDBs(const TaskProxy &task_proxy, - const PatternCollection &intitial_patterns); + IncrementalCanonicalPDBs( + const TaskProxy &task_proxy, + const PatternCollection &intitial_patterns); virtual ~IncrementalCanonicalPDBs() = default; // Adds a new PDB to the collection and recomputes pattern_cliques. diff --git a/src/search/pdbs/match_tree.cc b/src/search/pdbs/match_tree.cc index 20b3fa6d58..a003575a78 100644 --- a/src/search/pdbs/match_tree.cc +++ b/src/search/pdbs/match_tree.cc @@ -66,9 +66,7 @@ bool MatchTree::Node::is_leaf_node() const { } MatchTree::MatchTree(const TaskProxy &task_proxy, const Projection &projection) - : task_proxy(task_proxy), - projection(projection), - root(nullptr) { + : task_proxy(task_proxy), projection(projection), root(nullptr) { } MatchTree::~MatchTree() { @@ -76,8 +74,8 @@ MatchTree::~MatchTree() { } void MatchTree::insert_recursive( - int op_id, const vector ®ression_preconditions, - int pre_index, Node **edge_from_parent) { + int op_id, const vector ®ression_preconditions, int pre_index, + Node **edge_from_parent) { if (*edge_from_parent == nullptr) { // We don't exist yet: create a new node. *edge_from_parent = new Node(); @@ -123,11 +121,13 @@ void MatchTree::insert_recursive( edge_to_child = &node->star_successor; } - insert_recursive(op_id, regression_preconditions, pre_index, edge_to_child); + insert_recursive( + op_id, regression_preconditions, pre_index, edge_to_child); } } -void MatchTree::insert(int op_id, const vector ®ression_preconditions) { +void MatchTree::insert( + int op_id, const vector ®ression_preconditions) { insert_recursive(op_id, regression_preconditions, 0, &root); } @@ -140,9 +140,9 @@ void MatchTree::get_applicable_operator_ids_recursive( some informal experiments. */ - operator_ids.insert(operator_ids.end(), - node->applicable_operator_ids.begin(), - node->applicable_operator_ids.end()); + operator_ids.insert( + operator_ids.end(), node->applicable_operator_ids.begin(), + node->applicable_operator_ids.end()); if (node->is_leaf_node()) return; @@ -188,11 +188,11 @@ void MatchTree::dump_recursive(Node *node, utils::LogProxy &log) const { } else { for (int val = 0; val < node->var_domain_size; ++val) { if (node->successors[val]) { - log << "recursive call for child with value " << val << endl; + log << "recursive call for child with value " << val + << endl; dump_recursive(node->successors[val], log); log << "back from recursive call (for successors[" << val - << "]) to node with var_id = " << node->var_id - << endl; + << "]) to node with var_id = " << node->var_id << endl; } else { log << "no child for value " << val << endl; } diff --git a/src/search/pdbs/match_tree.h b/src/search/pdbs/match_tree.h index 7d92be2ed2..bc8c72f3a3 100644 --- a/src/search/pdbs/match_tree.h +++ b/src/search/pdbs/match_tree.h @@ -1,9 +1,8 @@ #ifndef PDBS_MATCH_TREE_H #define PDBS_MATCH_TREE_H -#include "types.h" - #include "pattern_database.h" +#include "types.h" #include "../task_proxy.h" @@ -30,10 +29,9 @@ class MatchTree { Projection projection; struct Node; Node *root; - void insert_recursive(int op_id, - const std::vector ®ression_preconditions, - int pre_index, - Node **edge_from_parent); + void insert_recursive( + int op_id, const std::vector ®ression_preconditions, + int pre_index, Node **edge_from_parent); void get_applicable_operator_ids_recursive( Node *node, int state_index, std::vector &operator_ids) const; void dump_recursive(Node *node, utils::LogProxy &log) const; @@ -42,12 +40,12 @@ class MatchTree { Initialize an empty match tree. We copy projection to ensure that the match tree remains in a valid state independently of projection. */ - MatchTree(const TaskProxy &task_proxy, - const Projection &projection); + MatchTree(const TaskProxy &task_proxy, const Projection &projection); ~MatchTree(); /* Insert an abstract operator into the match tree, creating or enlarging it. */ - void insert(int op_id, const std::vector ®ression_preconditions); + void insert( + int op_id, const std::vector ®ression_preconditions); /* Extracts all IDs of applicable abstract operators for the abstract state diff --git a/src/search/pdbs/pattern_cliques.cc b/src/search/pdbs/pattern_cliques.cc index fe25f5f669..eb4a7cb462 100644 --- a/src/search/pdbs/pattern_cliques.cc +++ b/src/search/pdbs/pattern_cliques.cc @@ -9,9 +9,9 @@ using namespace std; namespace pdbs { -bool are_patterns_additive(const Pattern &pattern1, - const Pattern &pattern2, - const VariableAdditivity &are_additive) { +bool are_patterns_additive( + const Pattern &pattern1, const Pattern &pattern2, + const VariableAdditivity &are_additive) { for (int v1 : pattern1) { for (int v2 : pattern2) { if (!are_additive[v1][v2]) { @@ -55,7 +55,8 @@ shared_ptr> compute_pattern_cliques( } } - shared_ptr> max_cliques = make_shared>(); + shared_ptr> max_cliques = + make_shared>(); max_cliques::compute_max_cliques(cgraph, *max_cliques); return max_cliques; } @@ -63,8 +64,7 @@ shared_ptr> compute_pattern_cliques( vector compute_pattern_cliques_with_pattern( const PatternCollection &patterns, const vector &known_pattern_cliques, - const Pattern &new_pattern, - const VariableAdditivity &are_additive) { + const Pattern &new_pattern, const VariableAdditivity &are_additive) { vector cliques_additive_with_pattern; for (const PatternClique &known_clique : known_pattern_cliques) { // Take all patterns which are additive to new_pattern. diff --git a/src/search/pdbs/pattern_cliques.h b/src/search/pdbs/pattern_cliques.h index 6c6b3de4b9..a5c1328276 100644 --- a/src/search/pdbs/pattern_cliques.h +++ b/src/search/pdbs/pattern_cliques.h @@ -15,9 +15,9 @@ extern VariableAdditivity compute_additive_vars(const TaskProxy &task_proxy); /* Returns true iff the two patterns are additive i.e. there is no operator which affects variables in pattern one as well as in pattern two. */ -extern bool are_patterns_additive(const Pattern &pattern1, - const Pattern &pattern2, - const VariableAdditivity &are_additive); +extern bool are_patterns_additive( + const Pattern &pattern1, const Pattern &pattern2, + const VariableAdditivity &are_additive); /* Computes pattern cliques of the given patterns. @@ -73,8 +73,7 @@ extern std::shared_ptr> compute_pattern_cliques( extern std::vector compute_pattern_cliques_with_pattern( const PatternCollection &patterns, const std::vector &known_pattern_cliques, - const Pattern &new_pattern, - const VariableAdditivity &are_additive); + const Pattern &new_pattern, const VariableAdditivity &are_additive); } #endif diff --git a/src/search/pdbs/pattern_collection_generator_combo.cc b/src/search/pdbs/pattern_collection_generator_combo.cc index 2140c6e3d9..0bac4287db 100644 --- a/src/search/pdbs/pattern_collection_generator_combo.cc +++ b/src/search/pdbs/pattern_collection_generator_combo.cc @@ -33,9 +33,9 @@ PatternCollectionInformation PatternCollectionGeneratorCombo::compute_patterns( TaskProxy task_proxy(*task); shared_ptr patterns = make_shared(); - PatternGeneratorGreedy large_pattern_generator( - max_states, verbosity); - Pattern large_pattern = large_pattern_generator.generate(task).get_pattern(); + PatternGeneratorGreedy large_pattern_generator(max_states, verbosity); + Pattern large_pattern = + large_pattern_generator.generate(task).get_pattern(); set used_vars(large_pattern.begin(), large_pattern.end()); patterns->push_back(move(large_pattern)); @@ -51,23 +51,22 @@ PatternCollectionInformation PatternCollectionGeneratorCombo::compute_patterns( } class PatternCollectionGeneratorComboFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + PatternCollectionGenerator, PatternCollectionGeneratorCombo> { public: PatternCollectionGeneratorComboFeature() : TypedFeature("combo") { add_option( - "max_states", - "maximum abstraction size for combo strategy", - "1000000", - plugins::Bounds("1", "infinity")); + "max_states", "maximum abstraction size for combo strategy", + "1000000", plugins::Bounds("1", "infinity")); add_generator_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorCombo>( opts.get("max_states"), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pattern_collection_generator_combo.h b/src/search/pdbs/pattern_collection_generator_combo.h index e24e04eedc..15f4ab292b 100644 --- a/src/search/pdbs/pattern_collection_generator_combo.h +++ b/src/search/pdbs/pattern_collection_generator_combo.h @@ -16,8 +16,7 @@ class PatternCollectionGeneratorCombo : public PatternCollectionGenerator { virtual PatternCollectionInformation compute_patterns( const std::shared_ptr &task) override; public: - PatternCollectionGeneratorCombo( - int max_states, utils::Verbosity verbosity); + PatternCollectionGeneratorCombo(int max_states, utils::Verbosity verbosity); }; } diff --git a/src/search/pdbs/pattern_collection_generator_disjoint_cegar.cc b/src/search/pdbs/pattern_collection_generator_disjoint_cegar.cc index cbcc99181b..851f1c538a 100644 --- a/src/search/pdbs/pattern_collection_generator_disjoint_cegar.cc +++ b/src/search/pdbs/pattern_collection_generator_disjoint_cegar.cc @@ -10,10 +10,10 @@ using namespace std; namespace pdbs { -PatternCollectionGeneratorDisjointCegar::PatternCollectionGeneratorDisjointCegar( - int max_pdb_size, int max_collection_size, double max_time, - bool use_wildcard_plans, int random_seed, - utils::Verbosity verbosity) +PatternCollectionGeneratorDisjointCegar:: + PatternCollectionGeneratorDisjointCegar( + int max_pdb_size, int max_collection_size, double max_time, + bool use_wildcard_plans, int random_seed, utils::Verbosity verbosity) : PatternCollectionGenerator(verbosity), max_pdb_size(max_pdb_size), max_collection_size(max_collection_size), @@ -26,34 +26,32 @@ string PatternCollectionGeneratorDisjointCegar::name() const { return "disjoint CEGAR pattern collection generator"; } -PatternCollectionInformation PatternCollectionGeneratorDisjointCegar::compute_patterns( +PatternCollectionInformation +PatternCollectionGeneratorDisjointCegar::compute_patterns( const shared_ptr &task) { // Store the set of goals in random order. TaskProxy task_proxy(*task); vector goals = get_goals_in_random_order(task_proxy, *rng); return generate_pattern_collection_with_cegar( - max_pdb_size, - max_collection_size, - max_time, - use_wildcard_plans, - log, - rng, - task, - move(goals)); + max_pdb_size, max_collection_size, max_time, use_wildcard_plans, log, + rng, task, move(goals)); } class PatternCollectionGeneratorDisjointCegarFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + PatternCollectionGenerator, PatternCollectionGeneratorDisjointCegar> { public: - PatternCollectionGeneratorDisjointCegarFeature() : TypedFeature("disjoint_cegar") { + PatternCollectionGeneratorDisjointCegarFeature() + : TypedFeature("disjoint_cegar") { document_title("Disjoint CEGAR"); document_synopsis( "This pattern collection generator uses the CEGAR algorithm to " "compute a pattern for the planning task. See below " "for a description of the algorithm and some implementation notes. " "The original algorithm (called single CEGAR) is described in the " - "paper " + get_rovner_et_al_reference()); + "paper " + + get_rovner_et_al_reference()); // TODO: these options could be move to the base class; see issue1022. add_option( @@ -61,22 +59,19 @@ class PatternCollectionGeneratorDisjointCegarFeature "maximum number of states per pattern database (ignored for the " "initial collection consisting of a singleton pattern for each goal " "variable)", - "1000000", - plugins::Bounds("1", "infinity")); + "1000000", plugins::Bounds("1", "infinity")); add_option( "max_collection_size", "maximum number of states in the pattern collection (ignored for the " "initial collection consisting of a singleton pattern for each goal " "variable)", - "10000000", - plugins::Bounds("1", "infinity")); + "10000000", plugins::Bounds("1", "infinity")); add_option( "max_time", "maximum time in seconds for this pattern collection generator " "(ignored for computing the initial collection consisting of a " "singleton pattern for each goal variable)", - "infinity", - plugins::Bounds("0.0", "infinity")); + "infinity", plugins::Bounds("0.0", "infinity")); add_cegar_wildcard_option_to_feature(*this); utils::add_rng_options_to_feature(*this); add_generator_options_to_feature(*this); @@ -86,16 +81,16 @@ class PatternCollectionGeneratorDisjointCegarFeature virtual shared_ptr create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( - opts.get("max_pdb_size"), - opts.get("max_collection_size"), + return plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorDisjointCegar>( + opts.get("max_pdb_size"), opts.get("max_collection_size"), opts.get("max_time"), get_cegar_wildcard_arguments_from_options(opts), utils::get_rng_arguments_from_options(opts), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; -static plugins::FeaturePlugin _plugin; +static plugins::FeaturePlugin + _plugin; } diff --git a/src/search/pdbs/pattern_collection_generator_disjoint_cegar.h b/src/search/pdbs/pattern_collection_generator_disjoint_cegar.h index 32ac888bad..6e826c72e1 100644 --- a/src/search/pdbs/pattern_collection_generator_disjoint_cegar.h +++ b/src/search/pdbs/pattern_collection_generator_disjoint_cegar.h @@ -12,7 +12,8 @@ namespace pdbs { This pattern collection generator uses the CEGAR algorithm to compute a disjoint pattern collection for the given task. See cegar.h for more details. */ -class PatternCollectionGeneratorDisjointCegar : public PatternCollectionGenerator { +class PatternCollectionGeneratorDisjointCegar + : public PatternCollectionGenerator { const int max_pdb_size; const int max_collection_size; const double max_time; @@ -25,8 +26,7 @@ class PatternCollectionGeneratorDisjointCegar : public PatternCollectionGenerato public: PatternCollectionGeneratorDisjointCegar( int max_pdb_size, int max_collection_size, double max_time, - bool use_wildcard_plans, int random_seed, - utils::Verbosity verbosity); + bool use_wildcard_plans, int random_seed, utils::Verbosity verbosity); }; } diff --git a/src/search/pdbs/pattern_collection_generator_genetic.cc b/src/search/pdbs/pattern_collection_generator_genetic.cc index 0a5c5acb27..af1702673b 100644 --- a/src/search/pdbs/pattern_collection_generator_genetic.cc +++ b/src/search/pdbs/pattern_collection_generator_genetic.cc @@ -59,9 +59,10 @@ void PatternCollectionGeneratorGenetic::select( // [0..total_so_far) double random = rng->random() * total_so_far; // Find first entry which is strictly greater than random. - selected = upper_bound(cumulative_fitness.begin(), - cumulative_fitness.end(), random) - - cumulative_fitness.begin(); + selected = upper_bound( + cumulative_fitness.begin(), cumulative_fitness.end(), + random) - + cumulative_fitness.begin(); } new_pattern_collections.push_back(pattern_collections[selected]); } @@ -161,7 +162,8 @@ bool PatternCollectionGeneratorGenetic::mark_used_variables( return false; } -void PatternCollectionGeneratorGenetic::evaluate(vector &fitness_values) { +void PatternCollectionGeneratorGenetic::evaluate( + vector &fitness_values) { TaskProxy task_proxy(*task); for (size_t i = 0; i < pattern_collections.size(); ++i) { const auto &collection = pattern_collections[i]; @@ -172,7 +174,8 @@ void PatternCollectionGeneratorGenetic::evaluate(vector &fitness_values) double fitness = 0; bool pattern_valid = true; vector variables_used(task_proxy.get_variables().size(), false); - shared_ptr pattern_collection = make_shared(); + shared_ptr pattern_collection = + make_shared(); pattern_collection->reserve(collection.size()); for (const vector &bitvector : collection) { Pattern pattern = transform_to_pattern_normal_form(bitvector); @@ -242,8 +245,8 @@ void PatternCollectionGeneratorGenetic::bin_packing() { if (next_var_size > pdb_max_size) // var never fits into a bin. continue; - if (!utils::is_product_within_limit(current_size, next_var_size, - pdb_max_size)) { + if (!utils::is_product_within_limit( + current_size, next_var_size, pdb_max_size)) { // Open a new bin for var. pattern_collection.push_back(pattern); pattern.clear(); @@ -289,7 +292,8 @@ string PatternCollectionGeneratorGenetic::name() const { return "genetic pattern collection generator"; } -PatternCollectionInformation PatternCollectionGeneratorGenetic::compute_patterns( +PatternCollectionInformation +PatternCollectionGeneratorGenetic::compute_patterns( const shared_ptr &task_) { task = task_; genetic_algorithm(); @@ -300,7 +304,8 @@ PatternCollectionInformation PatternCollectionGeneratorGenetic::compute_patterns } class PatternCollectionGeneratorGeneticFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + PatternCollectionGenerator, PatternCollectionGeneratorGenetic> { public: PatternCollectionGeneratorGeneticFeature() : TypedFeature("genetic") { document_title("Genetic Algorithm Patterns"); @@ -311,36 +316,29 @@ class PatternCollectionGeneratorGeneticFeature "to optimize the pattern collections with an objective function that " "estimates the mean heuristic value of the the pattern collections. " "Pattern collections with higher mean heuristic estimates are more " - "likely selected for the next generation." + utils::format_conference_reference( + "likely selected for the next generation." + + utils::format_conference_reference( {"Stefan Edelkamp"}, "Automated Creation of Pattern Database Search Heuristics", "http://www.springerlink.com/content/20613345434608x1/", "Proceedings of the 4th Workshop on Model Checking and Artificial" " Intelligence (!MoChArt 2006)", - "35-50", - "AAAI Press", - "2007")); + "35-50", "AAAI Press", "2007")); add_option( - "pdb_max_size", - "maximal number of states per pattern database ", - "50000", - plugins::Bounds("1", "infinity")); + "pdb_max_size", "maximal number of states per pattern database ", + "50000", plugins::Bounds("1", "infinity")); add_option( "num_collections", "number of pattern collections to maintain in the genetic " "algorithm (population size)", - "5", - plugins::Bounds("1", "infinity")); + "5", plugins::Bounds("1", "infinity")); add_option( - "num_episodes", - "number of episodes for the genetic algorithm", - "30", - plugins::Bounds("0", "infinity")); + "num_episodes", "number of episodes for the genetic algorithm", + "30", plugins::Bounds("0", "infinity")); add_option( "mutation_probability", - "probability for flipping a bit in the genetic algorithm", - "0.01", + "probability for flipping a bit in the genetic algorithm", "0.01", plugins::Bounds("0.0", "1.0")); add_option( "disjoint", @@ -351,9 +349,8 @@ class PatternCollectionGeneratorGeneticFeature add_generator_options_to_feature(*this); document_note( - "Note", - "This pattern generation method uses the " - "zero/one pattern database heuristic."); + "Note", "This pattern generation method uses the " + "zero/one pattern database heuristic."); document_note( "Implementation Notes", "The standard genetic algorithm procedure as described in the paper is " @@ -391,17 +388,16 @@ class PatternCollectionGeneratorGeneticFeature document_language_support("axioms", "not supported"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( - opts.get("pdb_max_size"), - opts.get("num_collections"), + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorGenetic>( + opts.get("pdb_max_size"), opts.get("num_collections"), opts.get("num_episodes"), opts.get("mutation_probability"), opts.get("disjoint"), utils::get_rng_arguments_from_options(opts), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pattern_collection_generator_genetic.h b/src/search/pdbs/pattern_collection_generator_genetic.h index f1c1bc8df7..225049acef 100644 --- a/src/search/pdbs/pattern_collection_generator_genetic.h +++ b/src/search/pdbs/pattern_collection_generator_genetic.h @@ -84,8 +84,8 @@ class PatternCollectionGeneratorGenetic : public PatternCollectionGenerator { anything was already used (in which case we do not mark the remaining variables). */ - bool mark_used_variables(const Pattern &pattern, - std::vector &variables_used) const; + bool mark_used_variables( + const Pattern &pattern, std::vector &variables_used) const; void remove_irrelevant_variables(Pattern &pattern) const; /* diff --git a/src/search/pdbs/pattern_collection_generator_hillclimbing.cc b/src/search/pdbs/pattern_collection_generator_hillclimbing.cc index 5dbb70741d..a4e7fee3e5 100644 --- a/src/search/pdbs/pattern_collection_generator_hillclimbing.cc +++ b/src/search/pdbs/pattern_collection_generator_hillclimbing.cc @@ -32,8 +32,7 @@ namespace pdbs { /* Since this exception class is only used for control flow and thus has no need for an error message, we use a standalone class instead of inheriting from utils::Exception. */ -class HillClimbingTimeout { -}; +class HillClimbingTimeout {}; static vector get_goal_variables(const TaskProxy &task_proxy) { vector goal_vars; @@ -77,8 +76,10 @@ static vector get_goal_variables(const TaskProxy &task_proxy) { This method precomputes all variables which satisfy conditions 1. or 2. for a given neighbour variable already in the pattern. */ -static vector> compute_relevant_neighbours(const TaskProxy &task_proxy) { - const causal_graph::CausalGraph &causal_graph = task_proxy.get_causal_graph(); +static vector> compute_relevant_neighbours( + const TaskProxy &task_proxy) { + const causal_graph::CausalGraph &causal_graph = + task_proxy.get_causal_graph(); const vector goal_vars = get_goal_variables(task_proxy); vector> connected_vars_by_variable; @@ -88,10 +89,13 @@ static vector> compute_relevant_neighbours(const TaskProxy &task_pro int var_id = var.get_id(); // Consider variables connected backwards via pre->eff arcs. - const vector &pre_to_eff_predecessors = causal_graph.get_eff_to_pre(var_id); + const vector &pre_to_eff_predecessors = + causal_graph.get_eff_to_pre(var_id); - // Consider goal variables connected (forwards) via eff--eff and pre->eff arcs. - const vector &causal_graph_successors = causal_graph.get_successors(var_id); + // Consider goal variables connected (forwards) via eff--eff and + // pre->eff arcs. + const vector &causal_graph_successors = + causal_graph.get_successors(var_id); vector goal_variable_successors; set_intersection( causal_graph_successors.begin(), causal_graph_successors.end(), @@ -110,7 +114,6 @@ static vector> compute_relevant_neighbours(const TaskProxy &task_pro return connected_vars_by_variable; } - PatternCollectionGeneratorHillclimbing::PatternCollectionGeneratorHillclimbing( int pdb_max_size, int collection_max_size, int num_samples, int min_improvement, double max_time, int random_seed, @@ -124,15 +127,14 @@ PatternCollectionGeneratorHillclimbing::PatternCollectionGeneratorHillclimbing( rng(utils::get_rng(random_seed)), num_rejected(0), hill_climbing_timer(nullptr) { - utils::verify_argument(min_improvement <= num_samples, - "Minimum improvement must not be higher than number of samples."); + utils::verify_argument( + min_improvement <= num_samples, + "Minimum improvement must not be higher than number of samples."); } int PatternCollectionGeneratorHillclimbing::generate_candidate_pdbs( - const TaskProxy &task_proxy, - const vector> &relevant_neighbours, - const PatternDatabase &pdb, - set &generated_patterns, + const TaskProxy &task_proxy, const vector> &relevant_neighbours, + const PatternDatabase &pdb, set &generated_patterns, PDBCollection &candidate_pdbs) { const Pattern &pattern = pdb.get_pattern(); int pdb_size = pdb.get_size(); @@ -144,15 +146,14 @@ int PatternCollectionGeneratorHillclimbing::generate_candidate_pdbs( // Only use variables which are not already in the pattern. vector relevant_vars; set_difference( - connected_vars.begin(), connected_vars.end(), - pattern.begin(), pattern.end(), - back_inserter(relevant_vars)); + connected_vars.begin(), connected_vars.end(), pattern.begin(), + pattern.end(), back_inserter(relevant_vars)); for (int rel_var_id : relevant_vars) { VariableProxy rel_var = task_proxy.get_variables()[rel_var_id]; int rel_var_size = rel_var.get_domain_size(); - if (utils::is_product_within_limit(pdb_size, rel_var_size, - pdb_max_size)) { + if (utils::is_product_within_limit( + pdb_size, rel_var_size, pdb_max_size)) { Pattern new_pattern(pattern); new_pattern.push_back(rel_var_id); sort(new_pattern.begin(), new_pattern.end()); @@ -165,8 +166,8 @@ int PatternCollectionGeneratorHillclimbing::generate_candidate_pdbs( generated_patterns.insert(new_pattern); candidate_pdbs.push_back( compute_pdb(task_proxy, new_pattern)); - max_pdb_size = max(max_pdb_size, - candidate_pdbs.back()->get_size()); + max_pdb_size = + max(max_pdb_size, candidate_pdbs.back()->get_size()); } } else { ++num_rejected; @@ -177,18 +178,16 @@ int PatternCollectionGeneratorHillclimbing::generate_candidate_pdbs( } void PatternCollectionGeneratorHillclimbing::sample_states( - const sampling::RandomWalkSampler &sampler, - int init_h, + const sampling::RandomWalkSampler &sampler, int init_h, vector &samples) { assert(samples.empty()); samples.reserve(num_samples); for (int i = 0; i < num_samples; ++i) { - samples.push_back(sampler.sample_state( - init_h, - [this](const State &state) { - return current_pdbs->is_dead_end(state); - })); + samples.push_back( + sampler.sample_state(init_h, [this](const State &state) { + return current_pdbs->is_dead_end(state); + })); if (hill_climbing_timer->is_expired()) { throw HillClimbingTimeout(); } @@ -196,8 +195,7 @@ void PatternCollectionGeneratorHillclimbing::sample_states( } pair PatternCollectionGeneratorHillclimbing::find_best_improving_pdb( - const vector &samples, - const vector &samples_h_values, + const vector &samples, const vector &samples_h_values, PDBCollection &candidate_pdbs) { /* TODO: The original implementation by Haslum et al. uses A* to compute @@ -353,15 +351,15 @@ void PatternCollectionGeneratorHillclimbing::hill_climbing( int init_h = current_pdbs->get_value(initial_state); bool dead_end = init_h == numeric_limits::max(); if (log.is_at_least_verbose()) { - log << "current collection size is " - << current_pdbs->get_size() << endl; - log << "current initial h value: " - << (dead_end ? "infinite" : to_string(init_h)) + log << "current collection size is " << current_pdbs->get_size() << endl; + log << "current initial h value: " + << (dead_end ? "infinite" : to_string(init_h)) << endl; } if (dead_end) { if (log.is_at_least_normal()) { - log << "Initial state is a dead end. Stop hill climbing." << endl; + log << "Initial state is a dead end. Stop hill climbing." + << endl; } break; } @@ -373,8 +371,8 @@ void PatternCollectionGeneratorHillclimbing::hill_climbing( samples_h_values.push_back(current_pdbs->get_value(sample)); } - pair improvement_and_index = - find_best_improving_pdb(samples, samples_h_values, candidate_pdbs); + pair improvement_and_index = find_best_improving_pdb( + samples, samples_h_values, candidate_pdbs); int improvement = improvement_and_index.first; int best_pdb_index = improvement_and_index.second; @@ -411,8 +409,7 @@ void PatternCollectionGeneratorHillclimbing::hill_climbing( if (log.is_at_least_verbose()) { log << "Hill climbing time so far: " - << hill_climbing_timer->get_elapsed_time() - << endl; + << hill_climbing_timer->get_elapsed_time() << endl; } } } catch (HillClimbingTimeout &) { @@ -423,11 +420,12 @@ void PatternCollectionGeneratorHillclimbing::hill_climbing( if (log.is_at_least_normal()) { log << "Hill climbing iterations: " << num_iterations << endl; - log << "Hill climbing generated patterns: " << generated_patterns.size() << endl; + log << "Hill climbing generated patterns: " << generated_patterns.size() + << endl; log << "Hill climbing rejected patterns: " << num_rejected << endl; log << "Hill climbing maximum PDB size: " << max_pdb_size << endl; - log << "Hill climbing time: " - << hill_climbing_timer->get_elapsed_time() << endl; + log << "Hill climbing time: " << hill_climbing_timer->get_elapsed_time() + << endl; } delete hill_climbing_timer; @@ -438,7 +436,8 @@ string PatternCollectionGeneratorHillclimbing::name() const { return "hill climbing pattern collection generator"; } -PatternCollectionInformation PatternCollectionGeneratorHillclimbing::compute_patterns( +PatternCollectionInformation +PatternCollectionGeneratorHillclimbing::compute_patterns( const shared_ptr &task) { TaskProxy task_proxy(*task); utils::Timer timer; @@ -470,9 +469,8 @@ void add_hillclimbing_options_to_feature(plugins::Feature &feature) { "all patterns consisting of a single goal variable, even if this " "violates the pdb_max_size or collection_max_size limits."); feature.document_note( - "Note", - "This pattern generation method generates patterns optimized " - "for use with the canonical pattern database heuristic."); + "Note", "This pattern generation method generates patterns optimized " + "for use with the canonical pattern database heuristic."); feature.document_note( "Implementation Notes", "The following will very briefly describe the algorithm and explain " @@ -524,28 +522,23 @@ void add_hillclimbing_options_to_feature(plugins::Feature &feature) { true); feature.add_option( - "pdb_max_size", - "maximal number of states per pattern database ", - "2000000", - plugins::Bounds("1", "infinity")); + "pdb_max_size", "maximal number of states per pattern database ", + "2000000", plugins::Bounds("1", "infinity")); feature.add_option( "collection_max_size", - "maximal number of states in the pattern collection", - "20000000", + "maximal number of states in the pattern collection", "20000000", plugins::Bounds("1", "infinity")); feature.add_option( "num_samples", "number of samples (random states) on which to evaluate each " "candidate pattern collection", - "1000", - plugins::Bounds("1", "infinity")); + "1000", plugins::Bounds("1", "infinity")); feature.add_option( "min_improvement", "minimum number of samples on which a candidate pattern " "collection must improve on the current one to be considered " "as the next pattern collection ", - "10", - plugins::Bounds("1", "infinity")); + "10", plugins::Bounds("1", "infinity")); feature.add_option( "max_time", "maximum time in seconds for improving the initial pattern " @@ -553,70 +546,68 @@ void add_hillclimbing_options_to_feature(plugins::Feature &feature) { "is performed at all. Note that this limit only affects hill " "climbing. Use max_time_dominance_pruning to limit the time " "spent for pruning dominated patterns.", - "infinity", - plugins::Bounds("0.0", "infinity")); + "infinity", plugins::Bounds("0.0", "infinity")); utils::add_rng_options_to_feature(feature); } -tuple -get_hillclimbing_arguments_from_options(const plugins::Options &opts) { +tuple get_hillclimbing_arguments_from_options( + const plugins::Options &opts) { return tuple_cat( make_tuple( - opts.get("pdb_max_size"), - opts.get("collection_max_size"), - opts.get("num_samples"), - opts.get("min_improvement"), + opts.get("pdb_max_size"), opts.get("collection_max_size"), + opts.get("num_samples"), opts.get("min_improvement"), opts.get("max_time")), utils::get_rng_arguments_from_options(opts)); } static basic_string paper_references() { return utils::format_conference_reference( - {"Patrik Haslum", "Adi Botea", "Malte Helmert", "Blai Bonet", - "Sven Koenig"}, - "Domain-Independent Construction of Pattern Database Heuristics for" - " Cost-Optimal Planning", - "https://ai.dmi.unibas.ch/papers/haslum-et-al-aaai07.pdf", - "Proceedings of the 22nd AAAI Conference on Artificial" - " Intelligence (AAAI 2007)", - "1007-1012", - "AAAI Press", - "2007") + - "For implementation notes, see:" + utils::format_conference_reference( - {"Silvan Sievers", "Manuela Ortlieb", "Malte Helmert"}, - "Efficient Implementation of Pattern Database Heuristics for" - " Classical Planning", - "https://ai.dmi.unibas.ch/papers/sievers-et-al-socs2012.pdf", - "Proceedings of the Fifth Annual Symposium on Combinatorial" - " Search (SoCS 2012)", - "105-111", - "AAAI Press", - "2012"); + {"Patrik Haslum", "Adi Botea", "Malte Helmert", "Blai Bonet", + "Sven Koenig"}, + "Domain-Independent Construction of Pattern Database Heuristics for" + " Cost-Optimal Planning", + "https://ai.dmi.unibas.ch/papers/haslum-et-al-aaai07.pdf", + "Proceedings of the 22nd AAAI Conference on Artificial" + " Intelligence (AAAI 2007)", + "1007-1012", "AAAI Press", "2007") + + "For implementation notes, see:" + + utils::format_conference_reference( + {"Silvan Sievers", "Manuela Ortlieb", "Malte Helmert"}, + "Efficient Implementation of Pattern Database Heuristics for" + " Classical Planning", + "https://ai.dmi.unibas.ch/papers/sievers-et-al-socs2012.pdf", + "Proceedings of the Fifth Annual Symposium on Combinatorial" + " Search (SoCS 2012)", + "105-111", "AAAI Press", "2012"); } class PatternCollectionGeneratorHillclimbingFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + PatternCollectionGenerator, PatternCollectionGeneratorHillclimbing> { public: - PatternCollectionGeneratorHillclimbingFeature() : TypedFeature("hillclimbing") { + PatternCollectionGeneratorHillclimbingFeature() + : TypedFeature("hillclimbing") { document_title("Hill climbing"); document_synopsis( "This algorithm uses hill climbing to generate patterns " "optimized for the Evaluator#Canonical_PDB heuristic. It it described " - "in the following paper:" + paper_references()); + "in the following paper:" + + paper_references()); add_hillclimbing_options_to_feature(*this); add_generator_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorHillclimbing>( get_hillclimbing_arguments_from_options(opts), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; -static plugins::FeaturePlugin _plugin; +static plugins::FeaturePlugin + _plugin; class IPDBFeature : public plugins::TypedFeature { @@ -631,17 +622,19 @@ class IPDBFeature "generation. It is a short-hand for the command-line option " "{{{cpdbs(hillclimbing())}}}. " "Both the heuristic and the pattern generation algorithm are described " - "in the following paper:" + paper_references() + + "in the following paper:" + + paper_references() + "See also Evaluator#Canonical_PDB and " "PatternCollectionGenerator#Hill_climbing for more details."); add_hillclimbing_options_to_feature(*this); /* Add, possibly among others, the options for dominance pruning. - Note that using dominance pruning during hill climbing could lead to fewer - discovered patterns and pattern collections. A dominated pattern - (or pattern collection) might no longer be dominated after more patterns - are added. We thus only use dominance pruning on the resulting collection. + Note that using dominance pruning during hill climbing could lead to + fewer discovered patterns and pattern collections. A dominated pattern + (or pattern collection) might no longer be dominated after more + patterns are added. We thus only use dominance pruning on the + resulting collection. */ add_canonical_pdbs_options_to_feature(*this); add_heuristic_options_to_feature(*this, "cpdbs"); @@ -656,19 +649,17 @@ class IPDBFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { shared_ptr pgh = - plugins::make_shared_from_arg_tuples( + plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorHillclimbing>( get_hillclimbing_arguments_from_options(opts), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); return plugins::make_shared_from_arg_tuples( - pgh, - opts.get("max_time_dominance_pruning"), - get_heuristic_arguments_from_options(opts) - ); + pgh, opts.get("max_time_dominance_pruning"), + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pattern_collection_generator_hillclimbing.h b/src/search/pdbs/pattern_collection_generator_hillclimbing.h index 471bddde9f..809bac3fca 100644 --- a/src/search/pdbs/pattern_collection_generator_hillclimbing.h +++ b/src/search/pdbs/pattern_collection_generator_hillclimbing.h @@ -26,7 +26,8 @@ class IncrementalCanonicalPDBs; class PatternDatabase; // Implementation of the pattern generation algorithm by Haslum et al. -class PatternCollectionGeneratorHillclimbing : public PatternCollectionGenerator { +class PatternCollectionGeneratorHillclimbing + : public PatternCollectionGenerator { // maximum number of states for each pdb const int pdb_max_size; // maximum added size of all pdbs @@ -55,8 +56,7 @@ class PatternCollectionGeneratorHillclimbing : public PatternCollectionGenerator int generate_candidate_pdbs( const TaskProxy &task_proxy, const std::vector> &relevant_neighbours, - const PatternDatabase &pdb, - std::set &generated_patterns, + const PatternDatabase &pdb, std::set &generated_patterns, PDBCollection &candidate_pdbs); /* @@ -70,8 +70,7 @@ class PatternCollectionGeneratorHillclimbing : public PatternCollectionGenerator a sample state, thus totalling exactly num_samples of sample states. */ void sample_states( - const sampling::RandomWalkSampler &sampler, - int init_h, + const sampling::RandomWalkSampler &sampler, int init_h, std::vector &samples); /* @@ -91,9 +90,7 @@ class PatternCollectionGeneratorHillclimbing : public PatternCollectionGenerator the h-value of the current pattern collection. */ bool is_heuristic_improved( - const PatternDatabase &pdb, - const State &sample, - int h_collection, + const PatternDatabase &pdb, const State &sample, int h_collection, const PDBCollection &pdbs, const std::vector &pattern_cliques); @@ -138,11 +135,9 @@ class PatternCollectionGeneratorHillclimbing : public PatternCollectionGenerator utils::Verbosity verbosity); }; -extern void add_hillclimbing_options_to_feature( - plugins::Feature &feature); +extern void add_hillclimbing_options_to_feature(plugins::Feature &feature); std::tuple -get_hillclimbing_arguments_from_options( - const plugins::Options &opts); +get_hillclimbing_arguments_from_options(const plugins::Options &opts); } #endif diff --git a/src/search/pdbs/pattern_collection_generator_manual.cc b/src/search/pdbs/pattern_collection_generator_manual.cc index 3e67a9d516..b8ab325ea5 100644 --- a/src/search/pdbs/pattern_collection_generator_manual.cc +++ b/src/search/pdbs/pattern_collection_generator_manual.cc @@ -32,9 +32,11 @@ PatternCollectionInformation PatternCollectionGeneratorManual::compute_patterns( } class PatternCollectionGeneratorManualFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + PatternCollectionGenerator, PatternCollectionGeneratorManual> { public: - PatternCollectionGeneratorManualFeature() : TypedFeature("manual_patterns") { + PatternCollectionGeneratorManualFeature() + : TypedFeature("manual_patterns") { add_list_option( "patterns", "list of patterns (which are lists of variable numbers of the planning " @@ -42,12 +44,12 @@ class PatternCollectionGeneratorManualFeature add_generator_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorManual>( opts.get_list("patterns"), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pattern_collection_generator_manual.h b/src/search/pdbs/pattern_collection_generator_manual.h index 91fc823d61..986bc9f018 100644 --- a/src/search/pdbs/pattern_collection_generator_manual.h +++ b/src/search/pdbs/pattern_collection_generator_manual.h @@ -15,8 +15,7 @@ class PatternCollectionGeneratorManual : public PatternCollectionGenerator { const std::shared_ptr &task) override; public: explicit PatternCollectionGeneratorManual( - const std::vector &patterns, - utils::Verbosity verbosity); + const std::vector &patterns, utils::Verbosity verbosity); }; } diff --git a/src/search/pdbs/pattern_collection_generator_multiple.cc b/src/search/pdbs/pattern_collection_generator_multiple.cc index 80cb6b16ab..1508abed15 100644 --- a/src/search/pdbs/pattern_collection_generator_multiple.cc +++ b/src/search/pdbs/pattern_collection_generator_multiple.cc @@ -26,8 +26,7 @@ PatternCollectionGeneratorMultiple::PatternCollectionGeneratorMultiple( pattern_generation_max_time(pattern_generation_max_time), total_max_time(total_max_time), stagnation_limit(stagnation_limit), - blacklisting_start_time( - total_max_time * blacklist_trigger_percentage), + blacklisting_start_time(total_max_time * blacklist_trigger_percentage), enable_blacklist_on_stagnation(enable_blacklist_on_stagnation), rng(utils::get_rng(random_seed)), random_seed(random_seed), @@ -53,7 +52,8 @@ void PatternCollectionGeneratorMultiple::check_blacklist_trigger_timer( } } -unordered_set PatternCollectionGeneratorMultiple::get_blacklisted_variables( +unordered_set +PatternCollectionGeneratorMultiple::get_blacklisted_variables( vector &non_goal_variables) { unordered_set blacklisted_variables; if (blacklisting && !non_goal_variables.empty()) { @@ -66,11 +66,11 @@ unordered_set PatternCollectionGeneratorMultiple::get_blacklisted_variables ++blacklist_size; rng->shuffle(non_goal_variables); blacklisted_variables.insert( - non_goal_variables.begin(), non_goal_variables.begin() + blacklist_size); + non_goal_variables.begin(), + non_goal_variables.begin() + blacklist_size); if (log.is_at_least_debug()) { log << "blacklisting " << blacklist_size << " out of " - << non_goal_variables.size() - << " non-goal variables: "; + << non_goal_variables.size() << " non-goal variables: "; for (int var : blacklisted_variables) { log << var << ", "; } @@ -81,8 +81,7 @@ unordered_set PatternCollectionGeneratorMultiple::get_blacklisted_variables } void PatternCollectionGeneratorMultiple::handle_generated_pattern( - PatternInformation &&pattern_info, - set &generated_patterns, + PatternInformation &&pattern_info, set &generated_patterns, shared_ptr &generated_pdbs, const utils::CountdownTimer &timer) { const Pattern &pattern = pattern_info.get_pattern(); @@ -131,13 +130,13 @@ bool PatternCollectionGeneratorMultiple::time_limit_reached( bool PatternCollectionGeneratorMultiple::check_for_stagnation( const utils::CountdownTimer &timer) { // Test if no new pattern was generated for longer than stagnation_limit. - if (timer.get_elapsed_time() - time_point_of_last_new_pattern > stagnation_limit) { + if (timer.get_elapsed_time() - time_point_of_last_new_pattern > + stagnation_limit) { if (enable_blacklist_on_stagnation) { if (blacklisting) { if (log.is_at_least_normal()) { log << "stagnation limit reached " - << "despite blacklisting, terminating" - << endl; + << "despite blacklisting, terminating" << endl; } return true; } else { @@ -162,7 +161,8 @@ string PatternCollectionGeneratorMultiple::name() const { return "multiple " + id() + " pattern collection generator"; } -PatternCollectionInformation PatternCollectionGeneratorMultiple::compute_patterns( +PatternCollectionInformation +PatternCollectionGeneratorMultiple::compute_patterns( const shared_ptr &task) { if (log.is_at_least_normal()) { log << "max pdb size: " << max_pdb_size << endl; @@ -211,23 +211,16 @@ PatternCollectionInformation PatternCollectionGeneratorMultiple::compute_pattern int remaining_pdb_size = min(remaining_collection_size, max_pdb_size); double remaining_time = - min(static_cast(timer.get_remaining_time()), pattern_generation_max_time); + min(static_cast(timer.get_remaining_time()), + pattern_generation_max_time); PatternInformation pattern_info = compute_pattern( - remaining_pdb_size, - remaining_time, - pattern_computation_rng, - task, - goals[goal_index], - move(blacklisted_variables)); + remaining_pdb_size, remaining_time, pattern_computation_rng, task, + goals[goal_index], move(blacklisted_variables)); handle_generated_pattern( - move(pattern_info), - generated_patterns, - generated_pdbs, - timer); + move(pattern_info), generated_patterns, generated_pdbs, timer); - if (collection_size_limit_reached() || - time_limit_reached(timer) || + if (collection_size_limit_reached() || time_limit_reached(timer) || check_for_stagnation(timer)) { break; } @@ -238,14 +231,12 @@ PatternCollectionInformation PatternCollectionGeneratorMultiple::compute_pattern assert(utils::in_bounds(goal_index, goals)); } - PatternCollectionInformation result = get_pattern_collection_info( - task_proxy, generated_pdbs, log); + PatternCollectionInformation result = + get_pattern_collection_info(task_proxy, generated_pdbs, log); if (log.is_at_least_normal()) { - log << name() << " number of iterations: " - << num_iterations << endl; + log << name() << " number of iterations: " << num_iterations << endl; log << name() << " average time per generator: " - << timer.get_elapsed_time() / num_iterations - << endl; + << timer.get_elapsed_time() / num_iterations << endl; } return result; } @@ -285,39 +276,33 @@ void add_multiple_options_to_feature(plugins::Feature &feature) { "maximum number of states for each pattern database, computed " "by compute_pattern (possibly ignored by singleton patterns consisting " "of a goal variable)", - "1M", - plugins::Bounds("1", "infinity")); + "1M", plugins::Bounds("1", "infinity")); feature.add_option( "max_collection_size", "maximum number of states in all pattern databases of the " "collection (possibly ignored, see max_pdb_size)", - "10M", - plugins::Bounds("1", "infinity")); + "10M", plugins::Bounds("1", "infinity")); feature.add_option( "pattern_generation_max_time", "maximum time in seconds for each call to the algorithm for " "computing a single pattern", - "infinity", - plugins::Bounds("0.0", "infinity")); + "infinity", plugins::Bounds("0.0", "infinity")); feature.add_option( "total_max_time", "maximum time in seconds for this pattern collection generator. " "It will always execute at least one iteration, i.e., call the " "algorithm for computing a single pattern at least once.", - "100.0", - plugins::Bounds("0.0", "infinity")); + "100.0", plugins::Bounds("0.0", "infinity")); feature.add_option( "stagnation_limit", "maximum time in seconds this pattern generator is allowed to run " "without generating a new pattern. It terminates prematurely if this " "limit is hit unless enable_blacklist_on_stagnation is enabled.", - "20.0", - plugins::Bounds("1.0", "infinity")); + "20.0", plugins::Bounds("1.0", "infinity")); feature.add_option( "blacklist_trigger_percentage", "percentage of total_max_time after which blacklisting is enabled", - "0.75", - plugins::Bounds("0.0", "1.0")); + "0.75", plugins::Bounds("0.0", "1.0")); feature.add_option( "enable_blacklist_on_stagnation", "if true, blacklisting is enabled when stagnation_limit is hit " @@ -331,13 +316,11 @@ void add_multiple_options_to_feature(plugins::Feature &feature) { add_generator_options_to_feature(feature); } -tuple +tuple get_multiple_arguments_from_options(const plugins::Options &opts) { return tuple_cat( make_tuple( - opts.get("max_pdb_size"), - opts.get("max_collection_size"), + opts.get("max_pdb_size"), opts.get("max_collection_size"), opts.get("pattern_generation_max_time"), opts.get("total_max_time"), opts.get("stagnation_limit"), diff --git a/src/search/pdbs/pattern_collection_generator_multiple.h b/src/search/pdbs/pattern_collection_generator_multiple.h index 1d6c3d5b84..04cac499c4 100644 --- a/src/search/pdbs/pattern_collection_generator_multiple.h +++ b/src/search/pdbs/pattern_collection_generator_multiple.h @@ -29,9 +29,9 @@ namespace pdbs { The algorithm runs until reaching a given time limit. Another parameter allows exiting early if no new patterns are found for a certain time ("stagnation"). - Further parameters allow enabling blacklisting for the given pattern computation - method after a certain time to force some diversification or to enable said - blacklisting when stagnating. + Further parameters allow enabling blacklisting for the given pattern + computation method after a certain time to force some diversification or to + enable said blacklisting when stagnating. */ class PatternCollectionGeneratorMultiple : public PatternCollectionGenerator { const int max_pdb_size; @@ -62,11 +62,9 @@ class PatternCollectionGeneratorMultiple : public PatternCollectionGenerator { virtual std::string id() const = 0; virtual void initialize(const std::shared_ptr &task) = 0; virtual PatternInformation compute_pattern( - int max_pdb_size, - double max_time, + int max_pdb_size, double max_time, const std::shared_ptr &rng, - const std::shared_ptr &task, - const FactPair &goal, + const std::shared_ptr &task, const FactPair &goal, std::unordered_set &&blacklisted_variables) = 0; virtual std::string name() const override; virtual PatternCollectionInformation compute_patterns( @@ -84,8 +82,8 @@ extern void add_multiple_algorithm_implementation_notes_to_feature( plugins::Feature &feature); extern void add_multiple_options_to_feature(plugins::Feature &feature); -extern std::tuple +extern std::tuple< + int, int, double, double, double, double, bool, int, utils::Verbosity> get_multiple_arguments_from_options(const plugins::Options &opts); } diff --git a/src/search/pdbs/pattern_collection_generator_multiple_cegar.cc b/src/search/pdbs/pattern_collection_generator_multiple_cegar.cc index 04e82f589f..0d159a4e6b 100644 --- a/src/search/pdbs/pattern_collection_generator_multiple_cegar.cc +++ b/src/search/pdbs/pattern_collection_generator_multiple_cegar.cc @@ -10,17 +10,17 @@ using namespace std; namespace pdbs { -PatternCollectionGeneratorMultipleCegar::PatternCollectionGeneratorMultipleCegar( - bool use_wildcard_plans, int max_pdb_size, int max_collection_size, - double pattern_generation_max_time, double total_max_time, - double stagnation_limit, double blacklist_trigger_percentage, - bool enable_blacklist_on_stagnation, int random_seed, - utils::Verbosity verbosity) +PatternCollectionGeneratorMultipleCegar:: + PatternCollectionGeneratorMultipleCegar( + bool use_wildcard_plans, int max_pdb_size, int max_collection_size, + double pattern_generation_max_time, double total_max_time, + double stagnation_limit, double blacklist_trigger_percentage, + bool enable_blacklist_on_stagnation, int random_seed, + utils::Verbosity verbosity) : PatternCollectionGeneratorMultiple( - max_pdb_size, max_collection_size, - pattern_generation_max_time, total_max_time, stagnation_limit, - blacklist_trigger_percentage, enable_blacklist_on_stagnation, - random_seed, verbosity), + max_pdb_size, max_collection_size, pattern_generation_max_time, + total_max_time, stagnation_limit, blacklist_trigger_percentage, + enable_blacklist_on_stagnation, random_seed, verbosity), use_wildcard_plans(use_wildcard_plans) { } @@ -29,32 +29,27 @@ string PatternCollectionGeneratorMultipleCegar::id() const { } PatternInformation PatternCollectionGeneratorMultipleCegar::compute_pattern( - int max_pdb_size, - double max_time, + int max_pdb_size, double max_time, const shared_ptr &rng, - const shared_ptr &task, - const FactPair &goal, + const shared_ptr &task, const FactPair &goal, unordered_set &&blacklisted_variables) { utils::LogProxy silent_log = utils::get_silent_log(); return generate_pattern_with_cegar( - max_pdb_size, - max_time, - use_wildcard_plans, - silent_log, - rng, - task, - goal, + max_pdb_size, max_time, use_wildcard_plans, silent_log, rng, task, goal, move(blacklisted_variables)); } class PatternCollectionGeneratorMultipleCegarFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + PatternCollectionGenerator, PatternCollectionGeneratorMultipleCegar> { public: - PatternCollectionGeneratorMultipleCegarFeature() : TypedFeature("multiple_cegar") { + PatternCollectionGeneratorMultipleCegarFeature() + : TypedFeature("multiple_cegar") { document_title("Multiple CEGAR"); document_synopsis( "This pattern collection generator implements the multiple CEGAR " - "algorithm described in the paper" + get_rovner_et_al_reference() + + "algorithm described in the paper" + + get_rovner_et_al_reference() + "It is an instantiation of the 'multiple algorithm framework'. " "To compute a pattern in each iteration, it uses the CEGAR algorithm " "restricted to a single goal variable. See below for descriptions of " @@ -69,12 +64,13 @@ class PatternCollectionGeneratorMultipleCegarFeature virtual shared_ptr create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + return plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorMultipleCegar>( get_cegar_wildcard_arguments_from_options(opts), - get_multiple_arguments_from_options(opts) - ); + get_multiple_arguments_from_options(opts)); } }; -static plugins::FeaturePlugin _plugin; +static plugins::FeaturePlugin + _plugin; } diff --git a/src/search/pdbs/pattern_collection_generator_multiple_cegar.h b/src/search/pdbs/pattern_collection_generator_multiple_cegar.h index a2e9b6d40f..5afa38ef6c 100644 --- a/src/search/pdbs/pattern_collection_generator_multiple_cegar.h +++ b/src/search/pdbs/pattern_collection_generator_multiple_cegar.h @@ -4,24 +4,23 @@ #include "pattern_collection_generator_multiple.h" namespace pdbs { -class PatternCollectionGeneratorMultipleCegar : public PatternCollectionGeneratorMultiple { +class PatternCollectionGeneratorMultipleCegar + : public PatternCollectionGeneratorMultiple { const bool use_wildcard_plans; virtual std::string id() const override; - virtual void initialize(const std::shared_ptr &) override {} + virtual void initialize(const std::shared_ptr &) override { + } virtual PatternInformation compute_pattern( - int max_pdb_size, - double max_time, + int max_pdb_size, double max_time, const std::shared_ptr &rng, - const std::shared_ptr &task, - const FactPair &goal, + const std::shared_ptr &task, const FactPair &goal, std::unordered_set &&blacklisted_variables) override; public: PatternCollectionGeneratorMultipleCegar( - bool use_wildcard_plans, int max_pdb_size, - int max_collection_size, double pattern_generation_max_time, - double total_max_time, double stagnation_limit, - double blacklist_trigger_percentage, + bool use_wildcard_plans, int max_pdb_size, int max_collection_size, + double pattern_generation_max_time, double total_max_time, + double stagnation_limit, double blacklist_trigger_percentage, bool enable_blacklist_on_stagnation, int random_seed, utils::Verbosity verbosity); }; diff --git a/src/search/pdbs/pattern_collection_generator_multiple_random.cc b/src/search/pdbs/pattern_collection_generator_multiple_random.cc index ab79ad13d2..d39142d648 100644 --- a/src/search/pdbs/pattern_collection_generator_multiple_random.cc +++ b/src/search/pdbs/pattern_collection_generator_multiple_random.cc @@ -13,17 +13,17 @@ using namespace std; namespace pdbs { -PatternCollectionGeneratorMultipleRandom::PatternCollectionGeneratorMultipleRandom( - bool bidirectional, int max_pdb_size, int max_collection_size, - double pattern_generation_max_time, double total_max_time, - double stagnation_limit, double blacklist_trigger_percentage, - bool enable_blacklist_on_stagnation, int random_seed, - utils::Verbosity verbosity) +PatternCollectionGeneratorMultipleRandom:: + PatternCollectionGeneratorMultipleRandom( + bool bidirectional, int max_pdb_size, int max_collection_size, + double pattern_generation_max_time, double total_max_time, + double stagnation_limit, double blacklist_trigger_percentage, + bool enable_blacklist_on_stagnation, int random_seed, + utils::Verbosity verbosity) : PatternCollectionGeneratorMultiple( - max_pdb_size, max_collection_size, - pattern_generation_max_time, total_max_time, stagnation_limit, - blacklist_trigger_percentage, enable_blacklist_on_stagnation, - random_seed, verbosity), + max_pdb_size, max_collection_size, pattern_generation_max_time, + total_max_time, stagnation_limit, blacklist_trigger_percentage, + enable_blacklist_on_stagnation, random_seed, verbosity), bidirectional(bidirectional) { } @@ -38,21 +38,14 @@ void PatternCollectionGeneratorMultipleRandom::initialize( } PatternInformation PatternCollectionGeneratorMultipleRandom::compute_pattern( - int max_pdb_size, - double max_time, + int max_pdb_size, double max_time, const shared_ptr &rng, - const shared_ptr &task, - const FactPair &goal, + const shared_ptr &task, const FactPair &goal, unordered_set &&) { // TODO: add support for blacklisting in single RCG? utils::LogProxy silent_log = utils::get_silent_log(); Pattern pattern = generate_random_pattern( - max_pdb_size, - max_time, - silent_log, - rng, - TaskProxy(*task), - goal.var, + max_pdb_size, max_time, silent_log, rng, TaskProxy(*task), goal.var, cg_neighbors); PatternInformation result(TaskProxy(*task), move(pattern), log); @@ -60,14 +53,18 @@ PatternInformation PatternCollectionGeneratorMultipleRandom::compute_pattern( } class PatternCollectionGeneratorMultipleRandomFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + PatternCollectionGenerator, + PatternCollectionGeneratorMultipleRandom> { public: - PatternCollectionGeneratorMultipleRandomFeature() : TypedFeature("random_patterns") { + PatternCollectionGeneratorMultipleRandomFeature() + : TypedFeature("random_patterns") { document_title("Multiple Random Patterns"); document_synopsis( "This pattern collection generator implements the 'multiple " "randomized causal graph' (mRCG) algorithm described in experiments of " - "the paper" + get_rovner_et_al_reference() + + "the paper" + + get_rovner_et_al_reference() + "It is an instantiation of the 'multiple algorithm framework'. " "To compute a pattern in each iteration, it uses the random " "pattern algorithm, called 'single randomized causal graph' (sRCG) " @@ -82,12 +79,13 @@ class PatternCollectionGeneratorMultipleRandomFeature virtual shared_ptr create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + return plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorMultipleRandom>( get_random_pattern_bidirectional_arguments_from_options(opts), - get_multiple_arguments_from_options(opts) - ); + get_multiple_arguments_from_options(opts)); } }; -static plugins::FeaturePlugin _plugin; +static plugins::FeaturePlugin + _plugin; } diff --git a/src/search/pdbs/pattern_collection_generator_multiple_random.h b/src/search/pdbs/pattern_collection_generator_multiple_random.h index fc7a05f261..5dd5921ece 100644 --- a/src/search/pdbs/pattern_collection_generator_multiple_random.h +++ b/src/search/pdbs/pattern_collection_generator_multiple_random.h @@ -4,18 +4,17 @@ #include "pattern_collection_generator_multiple.h" namespace pdbs { -class PatternCollectionGeneratorMultipleRandom : public PatternCollectionGeneratorMultiple { +class PatternCollectionGeneratorMultipleRandom + : public PatternCollectionGeneratorMultiple { const bool bidirectional; std::vector> cg_neighbors; virtual std::string id() const override; virtual void initialize(const std::shared_ptr &task) override; virtual PatternInformation compute_pattern( - int max_pdb_size, - double max_time, + int max_pdb_size, double max_time, const std::shared_ptr &rng, - const std::shared_ptr &task, - const FactPair &goal, + const std::shared_ptr &task, const FactPair &goal, std::unordered_set &&blacklisted_variables) override; public: PatternCollectionGeneratorMultipleRandom( diff --git a/src/search/pdbs/pattern_collection_generator_systematic.cc b/src/search/pdbs/pattern_collection_generator_systematic.cc index c419072064..e8882d61cb 100644 --- a/src/search/pdbs/pattern_collection_generator_systematic.cc +++ b/src/search/pdbs/pattern_collection_generator_systematic.cc @@ -40,12 +40,11 @@ static void compute_union_pattern( const Pattern &pattern1, const Pattern &pattern2, Pattern &result) { result.clear(); result.reserve(pattern1.size() + pattern2.size()); - set_union(pattern1.begin(), pattern1.end(), - pattern2.begin(), pattern2.end(), - back_inserter(result)); + set_union( + pattern1.begin(), pattern1.end(), pattern2.begin(), pattern2.end(), + back_inserter(result)); } - PatternCollectionGeneratorSystematic::PatternCollectionGeneratorSystematic( int pattern_max_size, bool only_interesting_patterns, utils::Verbosity verbosity) @@ -55,7 +54,8 @@ PatternCollectionGeneratorSystematic::PatternCollectionGeneratorSystematic( } void PatternCollectionGeneratorSystematic::compute_eff_pre_neighbors( - const causal_graph::CausalGraph &cg, const Pattern &pattern, vector &result) const { + const causal_graph::CausalGraph &cg, const Pattern &pattern, + vector &result) const { /* Compute all variables that are reachable from pattern by an (eff, pre) arc and are not already contained in the pattern. @@ -77,7 +77,8 @@ void PatternCollectionGeneratorSystematic::compute_eff_pre_neighbors( } void PatternCollectionGeneratorSystematic::compute_connection_points( - const causal_graph::CausalGraph &cg, const Pattern &pattern, vector &result) const { + const causal_graph::CausalGraph &cg, const Pattern &pattern, + vector &result) const { /* The "connection points" of a pattern are those variables of which one must be contained in an SGA pattern that can be attached to this @@ -155,7 +156,8 @@ void PatternCollectionGeneratorSystematic::build_sga_patterns( the patterns vectors grows during the computation. */ for (size_t pattern_no = 0; pattern_no < patterns->size(); ++pattern_no) { - // We must copy the pattern because references to patterns can be invalidated. + // We must copy the pattern because references to patterns can be + // invalidated. Pattern pattern = (*patterns)[pattern_no]; if (pattern.size() == max_pattern_size) break; @@ -204,7 +206,6 @@ void PatternCollectionGeneratorSystematic::build_patterns( for (const Pattern &pattern : sga_patterns) enqueue_pattern_if_new(pattern); - if (log.is_at_least_normal()) { log << "Found " << sga_patterns.size() << " SGA patterns." << endl; } @@ -215,7 +216,8 @@ void PatternCollectionGeneratorSystematic::build_patterns( during the computation. */ for (size_t pattern_no = 0; pattern_no < patterns->size(); ++pattern_no) { - // We must copy the pattern because references to patterns can be invalidated. + // We must copy the pattern because references to patterns can be + // invalidated. Pattern pattern1 = (*patterns)[pattern_no]; vector neighbors; @@ -226,7 +228,7 @@ void PatternCollectionGeneratorSystematic::build_patterns( for (const Pattern *p_pattern2 : candidates) { const Pattern &pattern2 = *p_pattern2; if (pattern1.size() + pattern2.size() > max_pattern_size) - break; // All remaining candidates are too large. + break; // All remaining candidates are too large. if (patterns_are_disjoint(pattern1, pattern2)) { Pattern new_pattern; compute_union_pattern(pattern1, pattern2, new_pattern); @@ -272,7 +274,8 @@ string PatternCollectionGeneratorSystematic::name() const { return "systematic pattern collection generator"; } -PatternCollectionInformation PatternCollectionGeneratorSystematic::compute_patterns( +PatternCollectionInformation +PatternCollectionGeneratorSystematic::compute_patterns( const shared_ptr &task) { TaskProxy task_proxy(*task); patterns = make_shared(); @@ -286,27 +289,25 @@ PatternCollectionInformation PatternCollectionGeneratorSystematic::compute_patte } class PatternCollectionGeneratorSystematicFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + PatternCollectionGenerator, PatternCollectionGeneratorSystematic> { public: PatternCollectionGeneratorSystematicFeature() : TypedFeature("systematic") { document_title("Systematically generated patterns"); document_synopsis( "Generates all (interesting) patterns with up to pattern_max_size " "variables. " - "For details, see" + utils::format_conference_reference( + "For details, see" + + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert"}, "Getting the Most Out of Pattern Databases for Classical Planning", "https://ai.dmi.unibas.ch/papers/pommerening-et-al-ijcai2013.pdf", "Proceedings of the Twenty-Third International Joint" " Conference on Artificial Intelligence (IJCAI 2013)", - "2357-2364", - "AAAI Press", - "2013")); + "2357-2364", "AAAI Press", "2013")); add_option( - "pattern_max_size", - "max number of variables per pattern", - "1", + "pattern_max_size", "max number of variables per pattern", "1", plugins::Bounds("1", "infinity")); add_option( "only_interesting_patterns", @@ -316,15 +317,16 @@ class PatternCollectionGeneratorSystematicFeature add_generator_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { - return plugins::make_shared_from_arg_tuples( + virtual shared_ptr create_component( + const plugins::Options &opts) const override { + return plugins::make_shared_from_arg_tuples< + PatternCollectionGeneratorSystematic>( opts.get("pattern_max_size"), opts.get("only_interesting_patterns"), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; -static plugins::FeaturePlugin _plugin; +static plugins::FeaturePlugin + _plugin; } diff --git a/src/search/pdbs/pattern_collection_generator_systematic.h b/src/search/pdbs/pattern_collection_generator_systematic.h index 0db2bf98f3..82cbae074c 100644 --- a/src/search/pdbs/pattern_collection_generator_systematic.h +++ b/src/search/pdbs/pattern_collection_generator_systematic.h @@ -27,17 +27,18 @@ class PatternCollectionGeneratorSystematic : public PatternCollectionGenerator { const size_t max_pattern_size; const bool only_interesting_patterns; std::shared_ptr patterns; - PatternSet pattern_set; // Cleared after pattern computation. + PatternSet pattern_set; // Cleared after pattern computation. void enqueue_pattern_if_new(const Pattern &pattern); - void compute_eff_pre_neighbors(const causal_graph::CausalGraph &cg, - const Pattern &pattern, - std::vector &result) const; - void compute_connection_points(const causal_graph::CausalGraph &cg, - const Pattern &pattern, - std::vector &result) const; - - void build_sga_patterns(const TaskProxy &task_proxy, const causal_graph::CausalGraph &cg); + void compute_eff_pre_neighbors( + const causal_graph::CausalGraph &cg, const Pattern &pattern, + std::vector &result) const; + void compute_connection_points( + const causal_graph::CausalGraph &cg, const Pattern &pattern, + std::vector &result) const; + + void build_sga_patterns( + const TaskProxy &task_proxy, const causal_graph::CausalGraph &cg); void build_patterns(const TaskProxy &task_proxy); void build_patterns_naive(const TaskProxy &task_proxy); virtual std::string name() const override; diff --git a/src/search/pdbs/pattern_collection_information.cc b/src/search/pdbs/pattern_collection_information.cc index 925dbac2c1..cb1d7e35eb 100644 --- a/src/search/pdbs/pattern_collection_information.cc +++ b/src/search/pdbs/pattern_collection_information.cc @@ -1,8 +1,8 @@ #include "pattern_collection_information.h" +#include "pattern_cliques.h" #include "pattern_database.h" #include "pattern_database_factory.h" -#include "pattern_cliques.h" #include "validation.h" #include "../utils/logging.h" @@ -17,8 +17,7 @@ using namespace std; namespace pdbs { PatternCollectionInformation::PatternCollectionInformation( - const TaskProxy &task_proxy, - const shared_ptr &patterns, + const TaskProxy &task_proxy, const shared_ptr &patterns, utils::LogProxy &log) : task_proxy(task_proxy), patterns(patterns), @@ -65,13 +64,12 @@ void PatternCollectionInformation::create_pdbs_if_missing() { } pdbs = make_shared(); for (const Pattern &pattern : *patterns) { - shared_ptr pdb = - compute_pdb(task_proxy, pattern); + shared_ptr pdb = compute_pdb(task_proxy, pattern); pdbs->push_back(pdb); } if (log.is_at_least_normal()) { - log << "Done computing PDBs for pattern collection: " - << timer << endl; + log << "Done computing PDBs for pattern collection: " << timer + << endl; } } } @@ -80,7 +78,8 @@ void PatternCollectionInformation::create_pattern_cliques_if_missing() { if (!pattern_cliques) { utils::Timer timer; if (log.is_at_least_normal()) { - log << "Computing pattern cliques for pattern collection..." << endl; + log << "Computing pattern cliques for pattern collection..." + << endl; } VariableAdditivity are_additive = compute_additive_vars(task_proxy); pattern_cliques = compute_pattern_cliques(*patterns, are_additive); @@ -91,7 +90,8 @@ void PatternCollectionInformation::create_pattern_cliques_if_missing() { } } -void PatternCollectionInformation::set_pdbs(const shared_ptr &pdbs_) { +void PatternCollectionInformation::set_pdbs( + const shared_ptr &pdbs_) { pdbs = pdbs_; assert(information_is_valid()); } @@ -102,7 +102,8 @@ void PatternCollectionInformation::set_pattern_cliques( assert(information_is_valid()); } -shared_ptr PatternCollectionInformation::get_patterns() const { +shared_ptr +PatternCollectionInformation::get_patterns() const { assert(patterns); return patterns; } @@ -112,7 +113,8 @@ shared_ptr PatternCollectionInformation::get_pdbs() { return pdbs; } -shared_ptr> PatternCollectionInformation::get_pattern_cliques() { +shared_ptr> +PatternCollectionInformation::get_pattern_cliques() { create_pattern_cliques_if_missing(); return pattern_cliques; } diff --git a/src/search/pdbs/pattern_database.cc b/src/search/pdbs/pattern_database.cc index 0137f72192..c23b73ca01 100644 --- a/src/search/pdbs/pattern_database.cc +++ b/src/search/pdbs/pattern_database.cc @@ -1,7 +1,6 @@ #include "pattern_database.h" #include "../task_utils/task_properties.h" - #include "../utils/logging.h" #include "../utils/math.h" @@ -13,8 +12,7 @@ using namespace std; namespace pdbs { -Projection::Projection( - const TaskProxy &task_proxy, const Pattern &pattern) +Projection::Projection(const TaskProxy &task_proxy, const Pattern &pattern) : pattern(pattern) { task_properties::verify_no_axioms(task_proxy); task_properties::verify_no_conditional_effects(task_proxy); @@ -29,9 +27,7 @@ Projection::Projection( int domain_size = var.get_domain_size(); domain_sizes.push_back(domain_size); if (utils::is_product_within_limit( - num_abstract_states, - domain_size, - numeric_limits::max())) { + num_abstract_states, domain_size, numeric_limits::max())) { num_abstract_states *= domain_size; } else { cerr << "Given pattern is too large! (Overflow occured): " << endl; @@ -55,10 +51,8 @@ int Projection::unrank(int index, int var) const { } PatternDatabase::PatternDatabase( - Projection &&projection, - vector &&distances) - : projection(move(projection)), - distances(move(distances)) { + Projection &&projection, vector &&distances) + : projection(move(projection)), distances(move(distances)) { } int PatternDatabase::get_value(const vector &state) const { diff --git a/src/search/pdbs/pattern_database.h b/src/search/pdbs/pattern_database.h index 2b1c434056..bc33ddbb81 100644 --- a/src/search/pdbs/pattern_database.h +++ b/src/search/pdbs/pattern_database.h @@ -47,9 +47,7 @@ class PatternDatabase { */ std::vector distances; public: - PatternDatabase( - Projection &&projection, - std::vector &&distances); + PatternDatabase(Projection &&projection, std::vector &&distances); int get_value(const std::vector &state) const; const Pattern &get_pattern() const { diff --git a/src/search/pdbs/pattern_database_factory.cc b/src/search/pdbs/pattern_database_factory.cc index 3cf10cf1ac..2e4880000f 100644 --- a/src/search/pdbs/pattern_database_factory.cc +++ b/src/search/pdbs/pattern_database_factory.cc @@ -37,11 +37,8 @@ class PatternDatabaseFactory { progression search. */ AbstractOperator build_abstract_operator( - const vector &prev_pairs, - const vector &pre_pairs, - const vector &eff_pairs, - int concrete_op_id, - int cost) const; + const vector &prev_pairs, const vector &pre_pairs, + const vector &eff_pairs, int concrete_op_id, int cost) const; /* Recursive method; called by build_abstract_operators_for_op. In the case @@ -51,12 +48,8 @@ class PatternDatabaseFactory { abstract operator with a concrete value (!= -1) is computed. */ void multiply_out( - int concrete_op_id, - int pos, - int cost, - vector &prev_pairs, - vector &pre_pairs, - vector &eff_pairs, + int concrete_op_id, int pos, int cost, vector &prev_pairs, + vector &pre_pairs, vector &eff_pairs, const vector &effects_without_pre, vector &operators) const; @@ -67,8 +60,7 @@ class PatternDatabaseFactory { variables in the task to their index in the pattern or -1. */ void build_abstract_operators_for_op( - const OperatorProxy &op, - int cost, + const OperatorProxy &op, int cost, vector &operators) const; void compute_abstract_operators(const vector &operator_costs); @@ -99,8 +91,7 @@ class PatternDatabaseFactory { bool compute_wildcard_plan); public: PatternDatabaseFactory( - const TaskProxy &task_proxy, - const Pattern &pattern, + const TaskProxy &task_proxy, const Pattern &pattern, const vector &operator_costs = vector(), bool compute_plan = false, const shared_ptr &rng = nullptr, @@ -108,9 +99,7 @@ class PatternDatabaseFactory { ~PatternDatabaseFactory() = default; shared_ptr extract_pdb() { - return make_shared( - move(projection), - move(distances)); + return make_shared(move(projection), move(distances)); } vector> &&extract_wildcard_plan() { @@ -126,20 +115,17 @@ void PatternDatabaseFactory::compute_variable_to_index(const Pattern &pattern) { } AbstractOperator PatternDatabaseFactory::build_abstract_operator( - const vector &prev_pairs, - const vector &pre_pairs, - const vector &eff_pairs, - int concrete_op_id, - int cost) const { + const vector &prev_pairs, const vector &pre_pairs, + const vector &eff_pairs, int concrete_op_id, int cost) const { vector regression_preconditions(prev_pairs); - regression_preconditions.insert(regression_preconditions.end(), - eff_pairs.begin(), - eff_pairs.end()); + regression_preconditions.insert( + regression_preconditions.end(), eff_pairs.begin(), eff_pairs.end()); // Sort preconditions for MatchTree construction. sort(regression_preconditions.begin(), regression_preconditions.end()); for (size_t i = 1; i < regression_preconditions.size(); ++i) { - assert(regression_preconditions[i].var != - regression_preconditions[i - 1].var); + assert( + regression_preconditions[i].var != + regression_preconditions[i - 1].var); } int hash_effect = 0; assert(pre_pairs.size() == eff_pairs.size()); @@ -152,25 +138,20 @@ AbstractOperator PatternDatabaseFactory::build_abstract_operator( int effect = (new_val - old_val) * projection.get_multiplier(var); hash_effect += effect; } - return AbstractOperator(concrete_op_id, cost, move(regression_preconditions), hash_effect); + return AbstractOperator( + concrete_op_id, cost, move(regression_preconditions), hash_effect); } void PatternDatabaseFactory::multiply_out( - int concrete_op_id, - int cost, - int pos, - vector &prev_pairs, - vector &pre_pairs, - vector &eff_pairs, + int concrete_op_id, int cost, int pos, vector &prev_pairs, + vector &pre_pairs, vector &eff_pairs, const vector &effects_without_pre, vector &operators) const { if (pos == static_cast(effects_without_pre.size())) { // All effects without precondition have been checked: insert op. if (!eff_pairs.empty()) { - operators.push_back( - build_abstract_operator( - prev_pairs, pre_pairs, eff_pairs, - concrete_op_id, cost)); + operators.push_back(build_abstract_operator( + prev_pairs, pre_pairs, eff_pairs, concrete_op_id, cost)); } } else { // For each possible value for the current variable, build an @@ -185,9 +166,9 @@ void PatternDatabaseFactory::multiply_out( } else { prev_pairs.emplace_back(var_id, i); } - multiply_out(concrete_op_id, cost, - pos + 1, prev_pairs, pre_pairs, eff_pairs, - effects_without_pre, operators); + multiply_out( + concrete_op_id, cost, pos + 1, prev_pairs, pre_pairs, eff_pairs, + effects_without_pre, operators); if (i != eff) { pre_pairs.pop_back(); eff_pairs.pop_back(); @@ -199,8 +180,7 @@ void PatternDatabaseFactory::multiply_out( } void PatternDatabaseFactory::build_abstract_operators_for_op( - const OperatorProxy &op, - int cost, + const OperatorProxy &op, int cost, vector &operators) const { // All variable value pairs that are a prevail condition vector prev_pairs; @@ -243,9 +223,9 @@ void PatternDatabaseFactory::build_abstract_operators_for_op( } } } - multiply_out(op.get_id(), cost, 0, - prev_pairs, pre_pairs, eff_pairs, effects_without_pre, - operators); + multiply_out( + op.get_id(), cost, 0, prev_pairs, pre_pairs, eff_pairs, + effects_without_pre, operators); } void PatternDatabaseFactory::compute_abstract_operators( @@ -257,13 +237,13 @@ void PatternDatabaseFactory::compute_abstract_operators( } else { op_cost = operator_costs[op.get_id()]; } - build_abstract_operators_for_op( - op, op_cost, abstract_ops); + build_abstract_operators_for_op(op, op_cost, abstract_ops); } } unique_ptr PatternDatabaseFactory::compute_match_tree() const { - unique_ptr match_tree = make_unique(task_proxy, projection); + unique_ptr match_tree = + make_unique(task_proxy, projection); for (size_t op_id = 0; op_id < abstract_ops.size(); ++op_id) { const AbstractOperator &op = abstract_ops[op_id]; match_tree->insert(op_id, op.get_regression_preconditions()); @@ -299,7 +279,8 @@ void PatternDatabaseFactory::compute_distances( priority_queues::AdaptiveQueue pq; // initialize queue - for (int state_index = 0; state_index < projection.get_num_abstract_states(); ++state_index) { + for (int state_index = 0; + state_index < projection.get_num_abstract_states(); ++state_index) { if (is_goal_state(state_index)) { pq.push(0, state_index); distances.push_back(0); @@ -333,7 +314,8 @@ void PatternDatabaseFactory::compute_distances( // regress abstract_state vector applicable_operator_ids; - match_tree.get_applicable_operator_ids(state_index, applicable_operator_ids); + match_tree.get_applicable_operator_ids( + state_index, applicable_operator_ids); for (int op_id : applicable_operator_ids) { const AbstractOperator &op = abstract_ops[op_id]; int predecessor = state_index + op.get_hash_effect(); @@ -367,8 +349,7 @@ void PatternDatabaseFactory::compute_plan( */ State initial_state = task_proxy.get_initial_state(); initial_state.unpack(); - int current_state = - projection.rank(initial_state.get_unpacked_values()); + int current_state = projection.rank(initial_state.get_unpacked_values()); if (distances[current_state] != numeric_limits::max()) { while (!is_goal_state(current_state)) { int op_id = generating_op_ids[current_state]; @@ -379,12 +360,17 @@ void PatternDatabaseFactory::compute_plan( // Compute equivalent ops vector cheapest_operators; vector applicable_operator_ids; - match_tree.get_applicable_operator_ids(successor_state, applicable_operator_ids); + match_tree.get_applicable_operator_ids( + successor_state, applicable_operator_ids); for (int applicable_op_id : applicable_operator_ids) { - const AbstractOperator &applicable_op = abstract_ops[applicable_op_id]; - int predecessor = successor_state + applicable_op.get_hash_effect(); - if (predecessor == current_state && op.get_cost() == applicable_op.get_cost()) { - cheapest_operators.emplace_back(applicable_op.get_concrete_op_id()); + const AbstractOperator &applicable_op = + abstract_ops[applicable_op_id]; + int predecessor = + successor_state + applicable_op.get_hash_effect(); + if (predecessor == current_state && + op.get_cost() == applicable_op.get_cost()) { + cheapest_operators.emplace_back( + applicable_op.get_concrete_op_id()); } } if (compute_wildcard_plan) { @@ -408,17 +394,16 @@ void PatternDatabaseFactory::compute_plan( class for pattern databases. */ PatternDatabaseFactory::PatternDatabaseFactory( - const TaskProxy &task_proxy, - const Pattern &pattern, - const vector &operator_costs, - bool compute_plan, + const TaskProxy &task_proxy, const Pattern &pattern, + const vector &operator_costs, bool compute_plan, const shared_ptr &rng, bool compute_wildcard_plan) : task_proxy(task_proxy), variables(task_proxy.get_variables()), projection(task_proxy, pattern) { - assert(operator_costs.empty() || - operator_costs.size() == task_proxy.get_operators().size()); + assert( + operator_costs.empty() || + operator_costs.size() == task_proxy.get_operators().size()); compute_variable_to_index(pattern); compute_abstract_operators(operator_costs); unique_ptr match_tree = compute_match_tree(); @@ -431,22 +416,22 @@ PatternDatabaseFactory::PatternDatabaseFactory( } shared_ptr compute_pdb( - const TaskProxy &task_proxy, - const Pattern &pattern, + const TaskProxy &task_proxy, const Pattern &pattern, const vector &operator_costs, const shared_ptr &rng) { - PatternDatabaseFactory pdb_factory(task_proxy, pattern, operator_costs, false, rng); + PatternDatabaseFactory pdb_factory( + task_proxy, pattern, operator_costs, false, rng); return pdb_factory.extract_pdb(); } tuple, vector>> compute_pdb_and_plan( - const TaskProxy &task_proxy, - const Pattern &pattern, + const TaskProxy &task_proxy, const Pattern &pattern, const vector &operator_costs, const shared_ptr &rng, bool compute_wildcard_plan) { - PatternDatabaseFactory pdb_factory(task_proxy, pattern, operator_costs, true, rng, compute_wildcard_plan); + PatternDatabaseFactory pdb_factory( + task_proxy, pattern, operator_costs, true, rng, compute_wildcard_plan); return {pdb_factory.extract_pdb(), pdb_factory.extract_wildcard_plan()}; } } diff --git a/src/search/pdbs/pattern_database_factory.h b/src/search/pdbs/pattern_database_factory.h index d7182920da..6293c2b96a 100644 --- a/src/search/pdbs/pattern_database_factory.h +++ b/src/search/pdbs/pattern_database_factory.h @@ -24,8 +24,7 @@ namespace pdbs { instead of its original cost. */ extern std::shared_ptr compute_pdb( - const TaskProxy &task_proxy, - const Pattern &pattern, + const TaskProxy &task_proxy, const Pattern &pattern, const std::vector &operator_costs = std::vector(), const std::shared_ptr &rng = nullptr); @@ -43,10 +42,10 @@ extern std::shared_ptr compute_pdb( plan contains exactly one operator, thus representing a regular plan. If set to true, each set contains at least one operator ID. */ -extern std::tuple, - std::vector>> compute_pdb_and_plan( - const TaskProxy &task_proxy, - const Pattern &pattern, +extern std::tuple< + std::shared_ptr, std::vector>> +compute_pdb_and_plan( + const TaskProxy &task_proxy, const Pattern &pattern, const std::vector &operator_costs = std::vector(), const std::shared_ptr &rng = nullptr, bool compute_wildcard_plan = false); diff --git a/src/search/pdbs/pattern_generator.cc b/src/search/pdbs/pattern_generator.cc index ca233eaca7..8dd79ffb63 100644 --- a/src/search/pdbs/pattern_generator.cc +++ b/src/search/pdbs/pattern_generator.cc @@ -19,8 +19,7 @@ PatternCollectionInformation PatternCollectionGenerator::generate( } utils::Timer timer; PatternCollectionInformation pci = compute_patterns(task); - dump_pattern_collection_generation_statistics( - name(), timer(), pci, log); + dump_pattern_collection_generation_statistics(name(), timer(), pci, log); return pci; } @@ -35,11 +34,7 @@ PatternInformation PatternGenerator::generate( } utils::Timer timer; PatternInformation pattern_info = compute_pattern(task); - dump_pattern_generation_statistics( - name(), - timer.stop(), - pattern_info, - log); + dump_pattern_generation_statistics(name(), timer.stop(), pattern_info, log); return pattern_info; } @@ -52,19 +47,20 @@ tuple get_generator_arguments_from_options( return utils::get_log_arguments_from_options(opts); } -static class PatternCollectionGeneratorCategoryPlugin : public plugins::TypedCategoryPlugin { +static class PatternCollectionGeneratorCategoryPlugin + : public plugins::TypedCategoryPlugin { public: - PatternCollectionGeneratorCategoryPlugin() : TypedCategoryPlugin("PatternCollectionGenerator") { + PatternCollectionGeneratorCategoryPlugin() + : TypedCategoryPlugin("PatternCollectionGenerator") { document_synopsis("Factory for pattern collections"); } -} -_category_plugin_collection; +} _category_plugin_collection; -static class PatternGeneratorCategoryPlugin : public plugins::TypedCategoryPlugin { +static class PatternGeneratorCategoryPlugin + : public plugins::TypedCategoryPlugin { public: PatternGeneratorCategoryPlugin() : TypedCategoryPlugin("PatternGenerator") { document_synopsis("Factory for single patterns"); } -} -_category_plugin_single; +} _category_plugin_single; } diff --git a/src/search/pdbs/pattern_generator.h b/src/search/pdbs/pattern_generator.h index 2dd22cbfba..9eb4385ac8 100644 --- a/src/search/pdbs/pattern_generator.h +++ b/src/search/pdbs/pattern_generator.h @@ -50,8 +50,8 @@ class PatternGenerator { }; extern void add_generator_options_to_feature(plugins::Feature &feature); -extern std::tuple -get_generator_arguments_from_options(const plugins::Options &opts); +extern std::tuple get_generator_arguments_from_options( + const plugins::Options &opts); } #endif diff --git a/src/search/pdbs/pattern_generator_cegar.cc b/src/search/pdbs/pattern_generator_cegar.cc index b9b7d3e6a0..b743899480 100644 --- a/src/search/pdbs/pattern_generator_cegar.cc +++ b/src/search/pdbs/pattern_generator_cegar.cc @@ -17,8 +17,8 @@ using namespace std; namespace pdbs { PatternGeneratorCEGAR::PatternGeneratorCEGAR( - int max_pdb_size, double max_time, bool use_wildcard_plans, - int random_seed, utils::Verbosity verbosity) + int max_pdb_size, double max_time, bool use_wildcard_plans, int random_seed, + utils::Verbosity verbosity) : PatternGenerator(verbosity), max_pdb_size(max_pdb_size), max_time(max_time), @@ -35,13 +35,7 @@ PatternInformation PatternGeneratorCEGAR::compute_pattern( TaskProxy task_proxy(*task); vector goals = get_goals_in_random_order(task_proxy, *rng); return generate_pattern_with_cegar( - max_pdb_size, - max_time, - use_wildcard_plans, - log, - rng, - task, - goals[0]); + max_pdb_size, max_time, use_wildcard_plans, log, rng, task, goals[0]); } class PatternGeneratorCEGARFeature @@ -54,19 +48,17 @@ class PatternGeneratorCEGARFeature "random single goal of the task to compute a pattern. See below " "for a description of the algorithm and some implementation notes. " "The original algorithm (called single CEGAR) is described in the " - "paper " + get_rovner_et_al_reference()); + "paper " + + get_rovner_et_al_reference()); add_option( "max_pdb_size", "maximum number of states in the final pattern database (possibly " "ignored by a singleton pattern consisting of a single goal variable)", - "1000000", - plugins::Bounds("1", "infinity")); + "1000000", plugins::Bounds("1", "infinity")); add_option( - "max_time", - "maximum time in seconds for the pattern generation", - "infinity", - plugins::Bounds("0.0", "infinity")); + "max_time", "maximum time in seconds for the pattern generation", + "infinity", plugins::Bounds("0.0", "infinity")); add_cegar_wildcard_option_to_feature(*this); utils::add_rng_options_to_feature(*this); add_generator_options_to_feature(*this); @@ -74,15 +66,13 @@ class PatternGeneratorCEGARFeature add_cegar_implementation_notes_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - opts.get("max_pdb_size"), - opts.get("max_time"), + opts.get("max_pdb_size"), opts.get("max_time"), get_cegar_wildcard_arguments_from_options(opts), utils::get_rng_arguments_from_options(opts), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pattern_generator_greedy.cc b/src/search/pdbs/pattern_generator_greedy.cc index 5864a8c6d0..b81d5b2941 100644 --- a/src/search/pdbs/pattern_generator_greedy.cc +++ b/src/search/pdbs/pattern_generator_greedy.cc @@ -18,15 +18,15 @@ using namespace std; namespace pdbs { PatternGeneratorGreedy::PatternGeneratorGreedy( int max_states, utils::Verbosity verbosity) - : PatternGenerator(verbosity), - max_states(max_states) { + : PatternGenerator(verbosity), max_states(max_states) { } string PatternGeneratorGreedy::name() const { return "greedy pattern generator"; } -PatternInformation PatternGeneratorGreedy::compute_pattern(const shared_ptr &task) { +PatternInformation PatternGeneratorGreedy::compute_pattern( + const shared_ptr &task) { TaskProxy task_proxy(*task); Pattern pattern; variable_order_finder::VariableOrderFinder order( @@ -58,17 +58,15 @@ class PatternGeneratorGreedyFeature add_option( "max_states", "maximal number of abstract states in the pattern database.", - "1000000", - plugins::Bounds("1", "infinity")); + "1000000", plugins::Bounds("1", "infinity")); add_generator_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get("max_states"), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pattern_generator_manual.cc b/src/search/pdbs/pattern_generator_manual.cc index b3c37736db..929ca9d4e0 100644 --- a/src/search/pdbs/pattern_generator_manual.cc +++ b/src/search/pdbs/pattern_generator_manual.cc @@ -14,8 +14,7 @@ using namespace std; namespace pdbs { PatternGeneratorManual::PatternGeneratorManual( const vector &pattern, utils::Verbosity verbosity) - : PatternGenerator(verbosity), - pattern(pattern) { + : PatternGenerator(verbosity), pattern(pattern) { } string PatternGeneratorManual::name() const { @@ -42,12 +41,11 @@ class PatternGeneratorManualFeature add_generator_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get_list("pattern"), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pattern_generator_random.cc b/src/search/pdbs/pattern_generator_random.cc index 7b130eac3a..055343d624 100644 --- a/src/search/pdbs/pattern_generator_random.cc +++ b/src/search/pdbs/pattern_generator_random.cc @@ -17,8 +17,8 @@ using namespace std; namespace pdbs { PatternGeneratorRandom::PatternGeneratorRandom( - int max_pdb_size, double max_time, bool bidirectional, - int random_seed, utils::Verbosity verbosity) + int max_pdb_size, double max_time, bool bidirectional, int random_seed, + utils::Verbosity verbosity) : PatternGenerator(verbosity), max_pdb_size(max_pdb_size), max_time(max_time), @@ -32,18 +32,13 @@ string PatternGeneratorRandom::name() const { PatternInformation PatternGeneratorRandom::compute_pattern( const shared_ptr &task) { - vector> cg_neighbors = compute_cg_neighbors( - task, bidirectional); + vector> cg_neighbors = + compute_cg_neighbors(task, bidirectional); TaskProxy task_proxy(*task); vector goals = get_goals_in_random_order(task_proxy, *rng); Pattern pattern = generate_random_pattern( - max_pdb_size, - max_time, - log, - rng, - task_proxy, - goals[0].var, + max_pdb_size, max_time, log, rng, task_proxy, goals[0].var, cg_neighbors); return PatternInformation(task_proxy, pattern, log); @@ -56,8 +51,8 @@ class PatternGeneratorRandomFeature document_title("Random Pattern"); document_synopsis( "This pattern generator implements the 'single randomized " - "causal graph' algorithm described in experiments of the the paper" - + get_rovner_et_al_reference() + + "causal graph' algorithm described in experiments of the the paper" + + get_rovner_et_al_reference() + "See below for a description of the algorithm and some implementation " "notes."); @@ -65,13 +60,10 @@ class PatternGeneratorRandomFeature "max_pdb_size", "maximum number of states in the final pattern database (possibly " "ignored by a singleton pattern consisting of a single goal variable)", - "1000000", - plugins::Bounds("1", "infinity")); + "1000000", plugins::Bounds("1", "infinity")); add_option( - "max_time", - "maximum time in seconds for the pattern generation", - "infinity", - plugins::Bounds("0.0", "infinity")); + "max_time", "maximum time in seconds for the pattern generation", + "infinity", plugins::Bounds("0.0", "infinity")); add_random_pattern_bidirectional_option_to_feature(*this); utils::add_rng_options_to_feature(*this); add_generator_options_to_feature(*this); @@ -79,15 +71,13 @@ class PatternGeneratorRandomFeature add_random_pattern_implementation_notes_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - opts.get("max_pdb_size"), - opts.get("max_time"), + opts.get("max_pdb_size"), opts.get("max_time"), get_random_pattern_bidirectional_arguments_from_options(opts), utils::get_rng_arguments_from_options(opts), - get_generator_arguments_from_options(opts) - ); + get_generator_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pattern_generator_random.h b/src/search/pdbs/pattern_generator_random.h index 8b50d4576c..992aeca7cc 100644 --- a/src/search/pdbs/pattern_generator_random.h +++ b/src/search/pdbs/pattern_generator_random.h @@ -19,8 +19,8 @@ class PatternGeneratorRandom : public PatternGenerator { const std::shared_ptr &task) override; public: PatternGeneratorRandom( - int max_pdb_size, double max_time, bool bidirectional, - int random_seed, utils::Verbosity verbosity); + int max_pdb_size, double max_time, bool bidirectional, int random_seed, + utils::Verbosity verbosity); }; } diff --git a/src/search/pdbs/pattern_information.cc b/src/search/pdbs/pattern_information.cc index 6bf91d9f50..2d58a2fede 100644 --- a/src/search/pdbs/pattern_information.cc +++ b/src/search/pdbs/pattern_information.cc @@ -10,12 +10,8 @@ using namespace std; namespace pdbs { PatternInformation::PatternInformation( - const TaskProxy &task_proxy, - Pattern pattern, - utils::LogProxy &log) - : task_proxy(task_proxy), - pattern(move(pattern)), - pdb(nullptr) { + const TaskProxy &task_proxy, Pattern pattern, utils::LogProxy &log) + : task_proxy(task_proxy), pattern(move(pattern)), pdb(nullptr) { validate_and_normalize_pattern(task_proxy, this->pattern, log); } diff --git a/src/search/pdbs/pdb_heuristic.cc b/src/search/pdbs/pdb_heuristic.cc index a2486c25d3..a5e6c73a95 100644 --- a/src/search/pdbs/pdb_heuristic.cc +++ b/src/search/pdbs/pdb_heuristic.cc @@ -36,23 +36,19 @@ int PDBHeuristic::compute_heuristic(const State &ancestor_state) { static basic_string paper_references() { return utils::format_conference_reference( - {"Stefan Edelkamp"}, - "Planning with Pattern Databases", - "https://aaai.org/papers/7280-ecp-01-2001/", - "Proceedings of the Sixth European Conference on Planning (ECP 2001)", - "84-90", - "AAAI Press", - "2001") + - "For implementation notes, see:" + utils::format_conference_reference( - {"Silvan Sievers", "Manuela Ortlieb", "Malte Helmert"}, - "Efficient Implementation of Pattern Database Heuristics for" - " Classical Planning", - "https://ai.dmi.unibas.ch/papers/sievers-et-al-socs2012.pdf", - "Proceedings of the Fifth Annual Symposium on Combinatorial" - " Search (SoCS 2012)", - "105-111", - "AAAI Press", - "2012"); + {"Stefan Edelkamp"}, "Planning with Pattern Databases", + "https://aaai.org/papers/7280-ecp-01-2001/", + "Proceedings of the Sixth European Conference on Planning (ECP 2001)", + "84-90", "AAAI Press", "2001") + + "For implementation notes, see:" + + utils::format_conference_reference( + {"Silvan Sievers", "Manuela Ortlieb", "Malte Helmert"}, + "Efficient Implementation of Pattern Database Heuristics for" + " Classical Planning", + "https://ai.dmi.unibas.ch/papers/sievers-et-al-socs2012.pdf", + "Proceedings of the Fifth Annual Symposium on Combinatorial" + " Search (SoCS 2012)", + "105-111", "AAAI Press", "2012"); } class PDBHeuristicFeature : public plugins::TypedFeature { @@ -63,13 +59,11 @@ class PDBHeuristicFeature document_synopsis( "Computes goal distance in " "state space abstractions based on projections. " - "First used in domain-independent planning by:" - + paper_references()); + "First used in domain-independent planning by:" + + paper_references()); add_option>( - "pattern", - "pattern generation method", - "greedy()"); + "pattern", "pattern generation method", "greedy()"); add_heuristic_options_to_feature(*this, "pdb"); document_language_support("action costs", "supported"); @@ -82,12 +76,11 @@ class PDBHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get>("pattern"), - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/pdb_heuristic.h b/src/search/pdbs/pdb_heuristic.h index 4d5f8e98b5..4fa73645a5 100644 --- a/src/search/pdbs/pdb_heuristic.h +++ b/src/search/pdbs/pdb_heuristic.h @@ -26,9 +26,8 @@ class PDBHeuristic : public Heuristic { */ PDBHeuristic( const std::shared_ptr &pattern_generator, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/pdbs/random_pattern.cc b/src/search/pdbs/random_pattern.cc index ad1165d21d..24ee476b09 100644 --- a/src/search/pdbs/random_pattern.cc +++ b/src/search/pdbs/random_pattern.cc @@ -26,12 +26,9 @@ static bool time_limit_reached( } Pattern generate_random_pattern( - int max_pdb_size, - double max_time, - utils::LogProxy &log, + int max_pdb_size, double max_time, utils::LogProxy &log, const shared_ptr &rng, - const TaskProxy &task_proxy, - int goal_variable, + const TaskProxy &task_proxy, int goal_variable, vector> &cg_neighbors) { utils::CountdownTimer timer(max_time); int current_var = goal_variable; @@ -49,7 +46,8 @@ Pattern generate_random_pattern( bool found_neighbor = false; for (int neighbor : cg_neighbors[current_var]) { int neighbor_dom_size = variables[neighbor].get_domain_size(); - if (!visited_vars.count(neighbor) && utils::is_product_within_limit( + if (!visited_vars.count(neighbor) && + utils::is_product_within_limit( pdb_size, neighbor_dom_size, max_pdb_size)) { pdb_size *= neighbor_dom_size; visited_vars.insert(neighbor); @@ -92,7 +90,8 @@ void add_random_pattern_implementation_notes_to_feature( true); } -void add_random_pattern_bidirectional_option_to_feature(plugins::Feature &feature) { +void add_random_pattern_bidirectional_option_to_feature( + plugins::Feature &feature) { feature.add_option( "bidirectional", "this option decides if the causal graph is considered to be " diff --git a/src/search/pdbs/random_pattern.h b/src/search/pdbs/random_pattern.h index 036bc8eaea..b5c512e15f 100644 --- a/src/search/pdbs/random_pattern.h +++ b/src/search/pdbs/random_pattern.h @@ -26,20 +26,16 @@ namespace pdbs { pattern due to the size limit or if the time limit is reached. */ extern Pattern generate_random_pattern( - int max_pdb_size, - double max_time, - utils::LogProxy &log, + int max_pdb_size, double max_time, utils::LogProxy &log, const std::shared_ptr &rng, - const TaskProxy &task_proxy, - int goal_variable, + const TaskProxy &task_proxy, int goal_variable, std::vector> &cg_neighbors); extern void add_random_pattern_implementation_notes_to_feature( plugins::Feature &feature); extern void add_random_pattern_bidirectional_option_to_feature( plugins::Feature &feature); -extern std::tuple -get_random_pattern_bidirectional_arguments_from_options( +extern std::tuple get_random_pattern_bidirectional_arguments_from_options( const plugins::Options &opts); } diff --git a/src/search/pdbs/subcategory.cc b/src/search/pdbs/subcategory.cc index c1b4c92ce7..9bc5728609 100644 --- a/src/search/pdbs/subcategory.cc +++ b/src/search/pdbs/subcategory.cc @@ -6,6 +6,5 @@ static class PDBGroupPlugin : public plugins::SubcategoryPlugin { PDBGroupPlugin() : SubcategoryPlugin("heuristics_pdb") { document_title("Pattern Database Heuristics"); } -} -_subcategory_plugin; +} _subcategory_plugin; } diff --git a/src/search/pdbs/utils.cc b/src/search/pdbs/utils.cc index cdb5049354..7c2504750e 100644 --- a/src/search/pdbs/utils.cc +++ b/src/search/pdbs/utils.cc @@ -8,7 +8,6 @@ #include "../task_utils/causal_graph.h" #include "../task_utils/task_properties.h" - #include "../utils/logging.h" #include "../utils/markup.h" #include "../utils/math.h" @@ -23,8 +22,8 @@ int compute_pdb_size(const TaskProxy &task_proxy, const Pattern &pattern) { int size = 1; for (int var : pattern) { int domain_size = task_proxy.get_variables()[var].get_domain_size(); - if (utils::is_product_within_limit(size, domain_size, - numeric_limits::max())) { + if (utils::is_product_within_limit( + size, domain_size, numeric_limits::max())) { size *= domain_size; } else { cerr << "Given pattern is too large! (Overflow occurred): " << endl; @@ -56,7 +55,8 @@ bool is_operator_relevant(const Pattern &pattern, const OperatorProxy &op) { vector get_goals_in_random_order( const TaskProxy &task_proxy, utils::RandomNumberGenerator &rng) { - vector goals = task_properties::get_fact_pairs(task_proxy.get_goals()); + vector goals = + task_properties::get_fact_pairs(task_proxy.get_goals()); rng.shuffle(goals); return goals; } @@ -80,17 +80,19 @@ vector get_non_goal_variables(const TaskProxy &task_proxy) { } vector> compute_cg_neighbors( - const shared_ptr &task, - bool bidirectional) { + const shared_ptr &task, bool bidirectional) { TaskProxy task_proxy(*task); int num_vars = task_proxy.get_variables().size(); - const causal_graph::CausalGraph &cg = causal_graph::get_causal_graph(task.get()); + const causal_graph::CausalGraph &cg = + causal_graph::get_causal_graph(task.get()); vector> cg_neighbors(num_vars); for (int var_id = 0; var_id < num_vars; ++var_id) { cg_neighbors[var_id] = cg.get_predecessors(var_id); if (bidirectional) { const vector &successors = cg.get_successors(var_id); - cg_neighbors[var_id].insert(cg_neighbors[var_id].end(), successors.begin(), successors.end()); + cg_neighbors[var_id].insert( + cg_neighbors[var_id].end(), successors.begin(), + successors.end()); } utils::sort_unique(cg_neighbors[var_id]); } @@ -98,8 +100,7 @@ vector> compute_cg_neighbors( } PatternCollectionInformation get_pattern_collection_info( - const TaskProxy &task_proxy, - const shared_ptr &pdbs, + const TaskProxy &task_proxy, const shared_ptr &pdbs, utils::LogProxy &log) { shared_ptr patterns = make_shared(); patterns->reserve(pdbs->size()); @@ -112,10 +113,8 @@ PatternCollectionInformation get_pattern_collection_info( } void dump_pattern_generation_statistics( - const string &identifier, - utils::Duration runtime, - const PatternInformation &pattern_info, - utils::LogProxy &log) { + const string &identifier, utils::Duration runtime, + const PatternInformation &pattern_info, utils::LogProxy &log) { const Pattern &pattern = pattern_info.get_pattern(); if (log.is_at_least_normal()) { log << identifier << " pattern: " << pattern << endl; @@ -127,17 +126,15 @@ void dump_pattern_generation_statistics( } void dump_pattern_collection_generation_statistics( - const string &identifier, - utils::Duration runtime, - const PatternCollectionInformation &pci, - utils::LogProxy &log) { + const string &identifier, utils::Duration runtime, + const PatternCollectionInformation &pci, utils::LogProxy &log) { const PatternCollection &pattern_collection = *pci.get_patterns(); if (log.is_at_least_normal()) { - log << identifier << " number of patterns: " << pattern_collection.size() - << endl; + log << identifier + << " number of patterns: " << pattern_collection.size() << endl; log << identifier << " total PDB size: " - << compute_total_pdb_size( - pci.get_task_proxy(), pattern_collection) << endl; + << compute_total_pdb_size(pci.get_task_proxy(), pattern_collection) + << endl; log << identifier << " computation time: " << runtime << endl; } } @@ -150,8 +147,6 @@ string get_rovner_et_al_reference() { "https://ai.dmi.unibas.ch/papers/rovner-et-al-icaps2019.pdf", "Proceedings of the 29th International Conference on Automated " "Planning and Scheduling (ICAPS 2019)", - "362-367", - "AAAI Press", - "2019"); + "362-367", "AAAI Press", "2019"); } } diff --git a/src/search/pdbs/utils.h b/src/search/pdbs/utils.h index 52325c36cf..b510c74155 100644 --- a/src/search/pdbs/utils.h +++ b/src/search/pdbs/utils.h @@ -19,10 +19,12 @@ namespace pdbs { class PatternCollectionInformation; class PatternInformation; -extern int compute_pdb_size(const TaskProxy &task_proxy, const Pattern &pattern); +extern int compute_pdb_size( + const TaskProxy &task_proxy, const Pattern &pattern); extern int compute_total_pdb_size( const TaskProxy &task_proxy, const PatternCollection &pattern_collection); -extern bool is_operator_relevant(const Pattern &pattern, const OperatorProxy &op); +extern bool is_operator_relevant( + const Pattern &pattern, const OperatorProxy &op); extern std::vector get_goals_in_random_order( const TaskProxy &task_proxy, utils::RandomNumberGenerator &rng); @@ -35,12 +37,10 @@ extern std::vector get_non_goal_variables(const TaskProxy &task_proxy); undirected graph and also successors of variables are considered neighbors. */ extern std::vector> compute_cg_neighbors( - const std::shared_ptr &task, - bool bidirectional); + const std::shared_ptr &task, bool bidirectional); extern PatternCollectionInformation get_pattern_collection_info( - const TaskProxy &task_proxy, - const std::shared_ptr &pdbs, + const TaskProxy &task_proxy, const std::shared_ptr &pdbs, utils::LogProxy &log); /* @@ -49,10 +49,8 @@ extern PatternCollectionInformation get_pattern_collection_info( prepended with the given string identifier. */ extern void dump_pattern_generation_statistics( - const std::string &identifier, - utils::Duration runtime, - const PatternInformation &pattern_info, - utils::LogProxy &log); + const std::string &identifier, utils::Duration runtime, + const PatternInformation &pattern_info, utils::LogProxy &log); /* Compute and dump the number of patterns, the total size of the corresponding @@ -60,10 +58,8 @@ extern void dump_pattern_generation_statistics( prepended with the given string identifier. */ extern void dump_pattern_collection_generation_statistics( - const std::string &identifier, - utils::Duration runtime, - const PatternCollectionInformation &pci, - utils::LogProxy &log); + const std::string &identifier, utils::Duration runtime, + const PatternCollectionInformation &pci, utils::LogProxy &log); extern std::string get_rovner_et_al_reference(); } diff --git a/src/search/pdbs/validation.cc b/src/search/pdbs/validation.cc index 49fe2b9b86..2b1ec869bd 100644 --- a/src/search/pdbs/validation.cc +++ b/src/search/pdbs/validation.cc @@ -12,9 +12,8 @@ using namespace std; using utils::ExitCode; namespace pdbs { -void validate_and_normalize_pattern(const TaskProxy &task_proxy, - Pattern &pattern, - utils::LogProxy &log) { +void validate_and_normalize_pattern( + const TaskProxy &task_proxy, Pattern &pattern, utils::LogProxy &log) { /* - Sort by variable number and remove duplicate variables. - Warn if duplicate variables exist. @@ -42,9 +41,9 @@ void validate_and_normalize_pattern(const TaskProxy &task_proxy, } } -void validate_and_normalize_patterns(const TaskProxy &task_proxy, - PatternCollection &patterns, - utils::LogProxy &log) { +void validate_and_normalize_patterns( + const TaskProxy &task_proxy, PatternCollection &patterns, + utils::LogProxy &log) { /* - Validate and normalize each pattern (see there). - Warn if duplicate patterns exist. diff --git a/src/search/pdbs/validation.h b/src/search/pdbs/validation.h index e41dc3a8ef..8137fde21d 100644 --- a/src/search/pdbs/validation.h +++ b/src/search/pdbs/validation.h @@ -13,8 +13,7 @@ namespace pdbs { extern void validate_and_normalize_pattern( const TaskProxy &task_proxy, Pattern &pattern, utils::LogProxy &log); extern void validate_and_normalize_patterns( - const TaskProxy &task_proxy, - PatternCollection &patterns, + const TaskProxy &task_proxy, PatternCollection &patterns, utils::LogProxy &log); } diff --git a/src/search/pdbs/zero_one_pdbs.cc b/src/search/pdbs/zero_one_pdbs.cc index 3b096d2888..a17b3f8bc8 100644 --- a/src/search/pdbs/zero_one_pdbs.cc +++ b/src/search/pdbs/zero_one_pdbs.cc @@ -26,8 +26,8 @@ ZeroOnePDBs::ZeroOnePDBs( pattern_databases.reserve(patterns.size()); for (const Pattern &pattern : patterns) { - shared_ptr pdb = compute_pdb( - task_proxy, pattern, remaining_operator_costs); + shared_ptr pdb = + compute_pdb(task_proxy, pattern, remaining_operator_costs); /* Set cost of relevant operators to 0 for further iterations (action cost partitioning). */ @@ -40,7 +40,6 @@ ZeroOnePDBs::ZeroOnePDBs( } } - int ZeroOnePDBs::get_value(const State &state) const { /* Because we use cost partitioning, we can simply add up all diff --git a/src/search/pdbs/zero_one_pdbs_heuristic.cc b/src/search/pdbs/zero_one_pdbs_heuristic.cc index b59f5102af..fe57aec548 100644 --- a/src/search/pdbs/zero_one_pdbs_heuristic.cc +++ b/src/search/pdbs/zero_one_pdbs_heuristic.cc @@ -52,9 +52,7 @@ class ZeroOnePDBsHeuristicFeature "to zero for all other affected patterns."); add_option>( - "patterns", - "pattern generation method", - "systematic(1)"); + "patterns", "pattern generation method", "systematic(1)"); add_heuristic_options_to_feature(*this, "zopdbs"); document_language_support("action costs", "supported"); @@ -67,12 +65,11 @@ class ZeroOnePDBsHeuristicFeature document_property("preferred operators", "no"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get>("patterns"), - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } }; diff --git a/src/search/pdbs/zero_one_pdbs_heuristic.h b/src/search/pdbs/zero_one_pdbs_heuristic.h index b52ce23b5b..3f884c6af2 100644 --- a/src/search/pdbs/zero_one_pdbs_heuristic.h +++ b/src/search/pdbs/zero_one_pdbs_heuristic.h @@ -16,9 +16,8 @@ class ZeroOnePDBsHeuristic : public Heuristic { public: ZeroOnePDBsHeuristic( const std::shared_ptr &patterns, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &name, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &name, utils::Verbosity verbosity); }; } diff --git a/src/search/per_state_array.h b/src/search/per_state_array.h index 1fad15ddc9..925aad5f5b 100644 --- a/src/search/per_state_array.h +++ b/src/search/per_state_array.h @@ -6,13 +6,13 @@ #include #include - template class ConstArrayView { const T *p; int size_; public: - ConstArrayView(const T *p, int size) : p(p), size_(size) {} + ConstArrayView(const T *p, int size) : p(p), size_(size) { + } ConstArrayView(const ConstArrayView &other) = default; ConstArrayView &operator=(const ConstArrayView &other) = default; @@ -32,7 +32,8 @@ class ArrayView { T *p; int size_; public: - ArrayView(T *p, int size) : p(p), size_(size) {} + ArrayView(T *p, int size) : p(p), size_(size) { + } ArrayView(const ArrayView &other) = default; ArrayView &operator=(const ArrayView &other) = default; @@ -70,27 +71,32 @@ class ArrayView { template class PerStateArray : public subscriber::Subscriber { const std::vector default_array; - using EntryArrayVectorMap = std::unordered_map *>; + using EntryArrayVectorMap = std::unordered_map< + const StateRegistry *, + segmented_vector::SegmentedArrayVector *>; EntryArrayVectorMap entry_arrays_by_registry; mutable const StateRegistry *cached_registry; mutable segmented_vector::SegmentedArrayVector *cached_entries; - segmented_vector::SegmentedArrayVector *get_entries(const StateRegistry *registry) { + segmented_vector::SegmentedArrayVector *get_entries( + const StateRegistry *registry) { if (cached_registry != registry) { cached_registry = registry; auto it = entry_arrays_by_registry.find(registry); if (it == entry_arrays_by_registry.end()) { - cached_entries = new segmented_vector::SegmentedArrayVector( - default_array.size()); + cached_entries = + new segmented_vector::SegmentedArrayVector( + default_array.size()); entry_arrays_by_registry[registry] = cached_entries; registry->subscribe(this); } else { cached_entries = it->second; } } - assert(cached_registry == registry && cached_entries == entry_arrays_by_registry[registry]); + assert( + cached_registry == registry && + cached_entries == entry_arrays_by_registry[registry]); return cached_entries; } @@ -102,7 +108,8 @@ class PerStateArray : public subscriber::Subscriber { return nullptr; } else { cached_registry = registry; - cached_entries = const_cast *>( + cached_entries = const_cast< + segmented_vector::SegmentedArrayVector *>( it->second); } } @@ -133,7 +140,8 @@ class PerStateArray : public subscriber::Subscriber { << "state." << std::endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } - segmented_vector::SegmentedArrayVector *entries = get_entries(registry); + segmented_vector::SegmentedArrayVector *entries = + get_entries(registry); int state_id = state.get_id().value; assert(state.get_id() != StateID::no_state); size_t virtual_size = registry->size(); @@ -165,10 +173,12 @@ class PerStateArray : public subscriber::Subscriber { ABORT("PerStateArray::operator[] const tried to access " "non-existing entry."); } - return ConstArrayView((*entries)[state_id], default_array.size()); + return ConstArrayView( + (*entries)[state_id], default_array.size()); } - virtual void notify_service_destroyed(const StateRegistry *registry) override { + virtual void notify_service_destroyed( + const StateRegistry *registry) override { delete entry_arrays_by_registry[registry]; entry_arrays_by_registry.erase(registry); if (registry == cached_registry) { diff --git a/src/search/per_state_bitset.cc b/src/search/per_state_bitset.cc index 374b5213fb..eed614dd08 100644 --- a/src/search/per_state_bitset.cc +++ b/src/search/per_state_bitset.cc @@ -2,7 +2,6 @@ using namespace std; - int BitsetMath::compute_num_blocks(size_t num_bits) { return (num_bits + bits_per_block - 1) / bits_per_block; } @@ -19,7 +18,6 @@ BitsetMath::Block BitsetMath::bit_mask(size_t pos) { return Block(1) << bit_index(pos); } - void BitsetView::set(int index) { assert(index >= 0 && index < num_bits); int block_index = BitsetMath::block_index(index); @@ -55,12 +53,12 @@ int BitsetView::size() const { return num_bits; } - static vector pack_bit_vector(const vector &bits) { int num_bits = bits.size(); int num_blocks = BitsetMath::compute_num_blocks(num_bits); vector packed_bits(num_blocks, 0); - BitsetView bitset_view(ArrayView(packed_bits.data(), num_blocks), num_bits); + BitsetView bitset_view( + ArrayView(packed_bits.data(), num_blocks), num_bits); for (int i = 0; i < num_bits; ++i) { if (bits[i]) { bitset_view.set(i); @@ -69,7 +67,6 @@ static vector pack_bit_vector(const vector &bits) { return packed_bits; } - PerStateBitset::PerStateBitset(const vector &default_bits) : num_bits_per_entry(default_bits.size()), data(pack_bit_vector(default_bits)) { @@ -79,7 +76,6 @@ BitsetView PerStateBitset::operator[](const State &state) { return BitsetView(data[state], num_bits_per_entry); } - bool ConstBitsetView::test(int index) const { assert(index >= 0 && index < num_bits); int block_index = BitsetMath::block_index(index); diff --git a/src/search/per_state_bitset.h b/src/search/per_state_bitset.h index 21c12d8eb4..86831c6e99 100644 --- a/src/search/per_state_bitset.h +++ b/src/search/per_state_bitset.h @@ -5,13 +5,11 @@ #include - class BitsetMath { public: using Block = unsigned int; static_assert( - !std::numeric_limits::is_signed, - "Block type must be unsigned"); + !std::numeric_limits::is_signed, "Block type must be unsigned"); static const Block zeros = Block(0); // MSVC's bitwise negation always returns a signed type. @@ -24,14 +22,13 @@ class BitsetMath { static Block bit_mask(std::size_t pos); }; - class ConstBitsetView { ConstArrayView data; int num_bits; public: - ConstBitsetView(ConstArrayView data, int num_bits) : - data(data), num_bits(num_bits) {} - + ConstBitsetView(ConstArrayView data, int num_bits) + : data(data), num_bits(num_bits) { + } ConstBitsetView(const ConstBitsetView &other) = default; ConstBitsetView &operator=(const ConstBitsetView &other) = default; @@ -40,14 +37,13 @@ class ConstBitsetView { int size() const; }; - class BitsetView { ArrayView data; int num_bits; public: - BitsetView(ArrayView data, int num_bits) : - data(data), num_bits(num_bits) {} - + BitsetView(ArrayView data, int num_bits) + : data(data), num_bits(num_bits) { + } BitsetView(const BitsetView &other) = default; BitsetView &operator=(const BitsetView &other) = default; @@ -64,7 +60,6 @@ class BitsetView { int size() const; }; - class PerStateBitset { int num_bits_per_entry; PerStateArray data; diff --git a/src/search/per_state_information.h b/src/search/per_state_information.h index 7452b6418d..6acd3da223 100644 --- a/src/search/per_state_information.h +++ b/src/search/per_state_information.h @@ -39,8 +39,8 @@ template class PerStateInformation : public subscriber::Subscriber { const Entry default_value; - using EntryVectorMap = std::unordered_map * >; + using EntryVectorMap = std::unordered_map< + const StateRegistry *, segmented_vector::SegmentedVector *>; EntryVectorMap entries_by_registry; mutable const StateRegistry *cached_registry; @@ -48,11 +48,12 @@ class PerStateInformation : public subscriber::Subscriber { /* Returns the SegmentedVector associated with the given StateRegistry. - If no vector is associated with this registry yet, an empty one is created. - Both the registry and the returned vector are cached to speed up + If no vector is associated with this registry yet, an empty one is + created. Both the registry and the returned vector are cached to speed up consecutive calls with the same registry. */ - segmented_vector::SegmentedVector *get_entries(const StateRegistry *registry) { + segmented_vector::SegmentedVector *get_entries( + const StateRegistry *registry) { if (cached_registry != registry) { cached_registry = registry; auto it = entries_by_registry.find(registry); @@ -64,7 +65,9 @@ class PerStateInformation : public subscriber::Subscriber { cached_entries = it->second; } } - assert(cached_registry == registry && cached_entries == entries_by_registry[registry]); + assert( + cached_registry == registry && + cached_entries == entries_by_registry[registry]); return cached_entries; } @@ -74,14 +77,17 @@ class PerStateInformation : public subscriber::Subscriber { Otherwise, both the registry and the returned vector are cached to speed up consecutive calls with the same registry. */ - const segmented_vector::SegmentedVector *get_entries(const StateRegistry *registry) const { + const segmented_vector::SegmentedVector *get_entries( + const StateRegistry *registry) const { if (cached_registry != registry) { const auto it = entries_by_registry.find(registry); if (it == entries_by_registry.end()) { return nullptr; } else { cached_registry = registry; - cached_entries = const_cast *>(it->second); + cached_entries = + const_cast *>( + it->second); } } assert(cached_registry == registry); @@ -90,9 +96,7 @@ class PerStateInformation : public subscriber::Subscriber { public: PerStateInformation() - : default_value(), - cached_registry(nullptr), - cached_entries(nullptr) { + : default_value(), cached_registry(nullptr), cached_entries(nullptr) { } explicit PerStateInformation(const Entry &default_value_) @@ -117,7 +121,8 @@ class PerStateInformation : public subscriber::Subscriber { << "unregistered state." << std::endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } - segmented_vector::SegmentedVector *entries = get_entries(registry); + segmented_vector::SegmentedVector *entries = + get_entries(registry); int state_id = state.get_id().value; assert(state.get_id() != StateID::no_state); size_t virtual_size = registry->size(); @@ -135,7 +140,8 @@ class PerStateInformation : public subscriber::Subscriber { << "unregistered state." << std::endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } - const segmented_vector::SegmentedVector *entries = get_entries(registry); + const segmented_vector::SegmentedVector *entries = + get_entries(registry); if (!entries) { return default_value; } @@ -149,7 +155,8 @@ class PerStateInformation : public subscriber::Subscriber { return (*entries)[state_id]; } - virtual void notify_service_destroyed(const StateRegistry *registry) override { + virtual void notify_service_destroyed( + const StateRegistry *registry) override { delete entries_by_registry[registry]; entries_by_registry.erase(registry); if (registry == cached_registry) { diff --git a/src/search/per_task_information.h b/src/search/per_task_information.h index a41f3108c0..8ed628cb62 100644 --- a/src/search/per_task_information.h +++ b/src/search/per_task_information.h @@ -28,7 +28,8 @@ class PerTaskInformation : public subscriber::Subscriber { classes are concerned. It should return a unique_ptr to the newly created object. */ - using EntryConstructor = std::function(const TaskProxy &)>; + using EntryConstructor = + std::function(const TaskProxy &)>; EntryConstructor entry_constructor; utils::HashMap> entries; public: @@ -38,10 +39,9 @@ class PerTaskInformation : public subscriber::Subscriber { parameter. */ PerTaskInformation() - : entry_constructor( - [](const TaskProxy &task_proxy) { - return std::make_unique(task_proxy); - }) { + : entry_constructor([](const TaskProxy &task_proxy) { + return std::make_unique(task_proxy); + }) { } explicit PerTaskInformation(EntryConstructor entry_constructor) diff --git a/src/search/plan_manager.cc b/src/search/plan_manager.cc index fe7ad29288..c14657a515 100644 --- a/src/search/plan_manager.cc +++ b/src/search/plan_manager.cc @@ -9,7 +9,6 @@ #include #include - using namespace std; int calculate_plan_cost(const Plan &plan, const TaskProxy &task_proxy) { @@ -31,11 +30,13 @@ void PlanManager::set_plan_filename(const string &plan_filename_) { plan_filename = plan_filename_; } -void PlanManager::set_num_previously_generated_plans(int num_previously_generated_plans_) { +void PlanManager::set_num_previously_generated_plans( + int num_previously_generated_plans_) { num_previously_generated_plans = num_previously_generated_plans_; } -void PlanManager::set_is_part_of_anytime_portfolio(bool is_part_of_anytime_portfolio_) { +void PlanManager::set_is_part_of_anytime_portfolio( + bool is_part_of_anytime_portfolio_) { is_part_of_anytime_portfolio = is_part_of_anytime_portfolio_; } @@ -57,7 +58,8 @@ void PlanManager::save_plan( } OperatorsProxy operators = task_proxy.get_operators(); for (OperatorID op_id : plan) { - cout << operators[op_id].get_name() << " (" << operators[op_id].get_cost() << ")" << endl; + cout << operators[op_id].get_name() << " (" + << operators[op_id].get_cost() << ")" << endl; outfile << "(" << operators[op_id].get_name() << ")" << endl; } int plan_cost = calculate_plan_cost(plan, task_proxy); diff --git a/src/search/plan_manager.h b/src/search/plan_manager.h index 9467499d37..776ee79e3a 100644 --- a/src/search/plan_manager.h +++ b/src/search/plan_manager.h @@ -21,8 +21,8 @@ class PlanManager { void set_is_part_of_anytime_portfolio(bool is_part_of_anytime_portfolio); /* - Set generates_multiple_plan_files to true if the planner can find more than - one plan and should number the plans as FILENAME.1, ..., FILENAME.n. + Set generates_multiple_plan_files to true if the planner can find more + than one plan and should number the plans as FILENAME.1, ..., FILENAME.n. */ void save_plan( const Plan &plan, const TaskProxy &task_proxy, diff --git a/src/search/planner.cc b/src/search/planner.cc index e52edcb22d..2f5addf422 100644 --- a/src/search/planner.cc +++ b/src/search/planner.cc @@ -1,10 +1,9 @@ #include "command_line.h" - #include "git_revision.h" #include "search_algorithm.h" -#include "tasks/root_task.h" #include "task_utils/task_properties.h" +#include "tasks/root_task.h" #include "utils/logging.h" #include "utils/system.h" #include "utils/timer.h" @@ -16,8 +15,10 @@ using utils::ExitCode; int main(int argc, const char **argv) { try { - if (argc == 2 && static_cast(argv[1]) == "--internal-git-revision") { - // We handle this option before registering event handlers to avoid printing peak memory on exit. + if (argc == 2 && + static_cast(argv[1]) == "--internal-git-revision") { + // We handle this option before registering event handlers to avoid + // printing peak memory on exit. cout << g_git_revision << endl; exit(0); } @@ -42,7 +43,6 @@ int main(int argc, const char **argv) { shared_ptr search_algorithm = parse_cmd_line(argc, argv, unit_cost); - utils::Timer search_timer; search_algorithm->search(); search_timer.stop(); @@ -54,8 +54,8 @@ int main(int argc, const char **argv) { utils::g_log << "Total time: " << utils::g_timer << endl; ExitCode exitcode = search_algorithm->found_solution() - ? ExitCode::SUCCESS - : ExitCode::SEARCH_UNSOLVED_INCOMPLETE; + ? ExitCode::SUCCESS + : ExitCode::SEARCH_UNSOLVED_INCOMPLETE; exit_with(exitcode); } catch (const utils::ExitException &e) { /* To ensure that all destructors are called before the program exits, diff --git a/src/search/plugins/any.h b/src/search/plugins/any.h index 7036643680..78a1b082e9 100644 --- a/src/search/plugins/any.h +++ b/src/search/plugins/any.h @@ -24,8 +24,9 @@ namespace plugins { class Any { class Placeholder { -public: - virtual ~Placeholder() {} + public: + virtual ~Placeholder() { + } virtual std::unique_ptr clone() const = 0; virtual const std::type_info &type() const = 0; virtual std::string type_name() const = 0; @@ -34,11 +35,10 @@ class Any { template class Holder : public Placeholder { Holder &operator=(const Holder &) = delete; -public: + public: ValueType held; - Holder(const ValueType &value) - : held(value) { + Holder(const ValueType &value) : held(value) { } virtual std::unique_ptr clone() const override { @@ -107,11 +107,11 @@ class BadAnyCast : public std::bad_cast { } }; - template ValueType *any_cast(Any *operand) { if (operand && operand->type() == typeid(ValueType)) - return &static_cast *>(operand->content.get())->held; + return &static_cast *>(operand->content.get()) + ->held; else return nullptr; } @@ -121,7 +121,6 @@ inline const ValueType *any_cast(const Any *operand) { return any_cast(const_cast(operand)); } - template ValueType any_cast(Any &operand) { ValueType *result = any_cast(&operand); diff --git a/src/search/plugins/bounds.cc b/src/search/plugins/bounds.cc index e56badf8fa..5c7ed28f30 100644 --- a/src/search/plugins/bounds.cc +++ b/src/search/plugins/bounds.cc @@ -3,8 +3,7 @@ using namespace std; namespace plugins { -Bounds::Bounds(const string &min, const string &max) - : min(min), max(max) { +Bounds::Bounds(const string &min, const string &max) : min(min), max(max) { } bool Bounds::has_bound() const { diff --git a/src/search/plugins/doc_printer.cc b/src/search/plugins/doc_printer.cc index 47a2e888a8..e9b6a34899 100644 --- a/src/search/plugins/doc_printer.cc +++ b/src/search/plugins/doc_printer.cc @@ -11,17 +11,16 @@ using namespace std; namespace plugins { DocPrinter::DocPrinter(ostream &out, Registry ®istry) - : os(out), - registry(registry) { + : os(out), registry(registry) { } void DocPrinter::print_all() const { FeatureTypes feature_types = registry.get_feature_types(); - sort(feature_types.begin(), feature_types.end(), - [](const FeatureType *t1, const FeatureType *t2) { - return t1->name() < t2->name(); - } - ); + sort( + feature_types.begin(), feature_types.end(), + [](const FeatureType *t1, const FeatureType *t2) { + return t1->name() < t2->name(); + }); for (const FeatureType *type : feature_types) { print_category(*type); } @@ -33,9 +32,10 @@ void DocPrinter::print_feature(const string &name) const { void DocPrinter::print_category(const FeatureType &type) const { print_category_header(type.name()); - print_category_synopsis(type.get_synopsis(), type.supports_variable_binding()); + print_category_synopsis( + type.get_synopsis(), type.supports_variable_binding()); map> subcategories; - for (const shared_ptr &feature: registry.get_features()) { + for (const shared_ptr &feature : registry.get_features()) { if (feature->get_type() == type) { subcategories[feature->get_subcategory()].push_back(feature.get()); } @@ -54,21 +54,22 @@ void DocPrinter::print_category(const FeatureType &type) const { used by plug-ins, and if they are not used, they do not clutter the documentation. */ - for (auto &pair: subcategories) { + for (auto &pair : subcategories) { string subcategory_name = pair.first; vector &features = pair.second; - sort(features.begin(), features.end(), - [](const Feature *p1, const Feature *p2) { - return p1->get_key() < p2->get_key(); - } - ); + sort( + features.begin(), features.end(), + [](const Feature *p1, const Feature *p2) { + return p1->get_key() < p2->get_key(); + }); print_subcategory(subcategory_name, features); } print_category_footer(); } void DocPrinter::print_subcategory( - const string &subcategory_name, const vector &features) const { + const string &subcategory_name, + const vector &features) const { if (!subcategory_name.empty()) { const SubcategoryPlugin &subcategory_plugin = registry.get_subcategory_plugin(subcategory_name); @@ -117,24 +118,22 @@ void Txt2TagsPrinter::print_usage(const Feature &feature) const { } argument_help_strings.push_back(arg_help); } - os << utils::join(argument_help_strings, ", ") - << ")" << endl; + os << utils::join(argument_help_strings, ", ") << ")" << endl; } } void Txt2TagsPrinter::print_arguments(const Feature &feature) const { for (const ArgumentInfo &arg_info : feature.get_arguments()) { const Type &arg_type = arg_info.type; - os << "- //" << arg_info.key << "// (" - << arg_type.name(); + os << "- //" << arg_info.key << "// (" << arg_type.name(); if (arg_info.bounds.has_bound()) os << " \"\"" << arg_info.bounds << "\"\""; os << "): " << arg_info.help << endl; if (arg_type.is_enum_type()) { for (const pair &explanation : arg_type.get_documented_enum_values()) { - os << " - ``" << explanation.first << "``: " - << explanation.second << endl; + os << " - ``" << explanation.first + << "``: " << explanation.second << endl; } } } @@ -145,9 +144,11 @@ void Txt2TagsPrinter::print_notes(const Feature &feature) const { for (const NoteInfo ¬e : feature.get_notes()) { if (note.long_text) { os << "=== " << note.name << " ===" << endl - << note.description << endl << endl; + << note.description << endl + << endl; } else { - os << "**" << note.name << ":** " << note.description << endl << endl; + os << "**" << note.name << ":** " << note.description << endl + << endl; } } } @@ -176,31 +177,29 @@ void Txt2TagsPrinter::print_category_header(const string &category_name) const { os << ">>>>CATEGORY: " << category_name << "<<<<" << endl; } -void Txt2TagsPrinter::print_category_synopsis(const string &synopsis, - bool supports_variable_binding) const { +void Txt2TagsPrinter::print_category_synopsis( + const string &synopsis, bool supports_variable_binding) const { if (!synopsis.empty()) { os << synopsis << endl; } if (supports_variable_binding) { - os << endl << "This feature type can be bound to variables using " + os << endl + << "This feature type can be bound to variables using " << "``let(variable_name, variable_definition, expression)" << "`` where ``expression`` can use ``variable_name``. " << "Predefinitions using ``--evaluator``, ``--heuristic``, and " << "``--landmarks`` are automatically transformed into ``let``-" - << "expressions but are deprecated." - << endl; + << "expressions but are deprecated." << endl; } os << endl; } void Txt2TagsPrinter::print_category_footer() const { - os << endl - << ">>>>CATEGORYEND<<<<" << endl; + os << endl << ">>>>CATEGORYEND<<<<" << endl; } PlainPrinter::PlainPrinter(ostream &out, Registry ®istry, bool print_all) - : DocPrinter(out, registry), - print_all(print_all) { + : DocPrinter(out, registry), print_all(print_all) { } void PlainPrinter::print_synopsis(const Feature &feature) const { @@ -231,8 +230,7 @@ void PlainPrinter::print_usage(const Feature &feature) const { void PlainPrinter::print_arguments(const Feature &feature) const { for (const ArgumentInfo &arg_info : feature.get_arguments()) { - os << " " << arg_info.key << " (" - << arg_info.type.name(); + os << " " << arg_info.key << " (" << arg_info.type.name(); if (arg_info.bounds.has_bound()) os << " " << arg_info.bounds; os << "): " << arg_info.help << endl; @@ -240,8 +238,8 @@ void PlainPrinter::print_arguments(const Feature &feature) const { if (arg_type.is_enum_type()) { for (const pair &explanation : arg_type.get_documented_enum_values()) { - os << " - " << explanation.first << ": " - << explanation.second << endl; + os << " - " << explanation.first << ": " << explanation.second + << endl; } } } @@ -252,9 +250,11 @@ void PlainPrinter::print_notes(const Feature &feature) const { for (const NoteInfo ¬e : feature.get_notes()) { if (note.long_text) { os << "=== " << note.name << " ===" << endl - << note.description << endl << endl; + << note.description << endl + << endl; } else { - os << " * " << note.name << ": " << note.description << endl << endl; + os << " * " << note.name << ": " << note.description << endl + << endl; } } } @@ -282,19 +282,19 @@ void PlainPrinter::print_category_header(const string &category_name) const { os << "Help for " << category_name << endl << endl; } -void PlainPrinter::print_category_synopsis(const string &synopsis, - bool supports_variable_binding) const { +void PlainPrinter::print_category_synopsis( + const string &synopsis, bool supports_variable_binding) const { if (print_all && !synopsis.empty()) { os << synopsis << endl; } if (supports_variable_binding) { - os << endl << "This feature type can be bound to variables using " + os << endl + << "This feature type can be bound to variables using " << "``let(variable_name, variable_definition, expression)" << "`` where ``expression`` can use ``variable_name``. " << "Predefinitions using ``--evaluator``, ``--heuristic``, and " << "``--landmarks`` are automatically transformed into ``let``-" - << "expressions but are deprecated." - << endl; + << "expressions but are deprecated." << endl; } } diff --git a/src/search/plugins/doc_printer.h b/src/search/plugins/doc_printer.h index 8d9bccbb2e..fa3be2f6aa 100644 --- a/src/search/plugins/doc_printer.h +++ b/src/search/plugins/doc_printer.h @@ -13,8 +13,9 @@ class Registry; class DocPrinter { virtual void print_category(const FeatureType &type) const; - virtual void print_subcategory(const std::string &subcategory_name, - const std::vector &plugins) const; + virtual void print_subcategory( + const std::string &subcategory_name, + const std::vector &plugins) const; virtual void print_feature(const Feature &plugin) const; protected: @@ -27,9 +28,10 @@ class DocPrinter { virtual void print_notes(const Feature &plugin) const = 0; virtual void print_language_features(const Feature &plugin) const = 0; virtual void print_properties(const Feature &plugin) const = 0; - virtual void print_category_header(const std::string &category_name) const = 0; - virtual void print_category_synopsis(const std::string &synopsis, - bool supports_variable_binding) const = 0; + virtual void print_category_header( + const std::string &category_name) const = 0; + virtual void print_category_synopsis( + const std::string &synopsis, bool supports_variable_binding) const = 0; virtual void print_category_footer() const = 0; public: @@ -40,7 +42,6 @@ class DocPrinter { void print_feature(const std::string &name) const; }; - class Txt2TagsPrinter : public DocPrinter { protected: virtual void print_synopsis(const Feature &plugin) const override; @@ -49,16 +50,17 @@ class Txt2TagsPrinter : public DocPrinter { virtual void print_notes(const Feature &plugin) const override; virtual void print_language_features(const Feature &plugin) const override; virtual void print_properties(const Feature &plugin) const override; - virtual void print_category_header(const std::string &category_name) const override; - virtual void print_category_synopsis(const std::string &synopsis, - bool supports_variable_binding) const override; + virtual void print_category_header( + const std::string &category_name) const override; + virtual void print_category_synopsis( + const std::string &synopsis, + bool supports_variable_binding) const override; virtual void print_category_footer() const override; public: Txt2TagsPrinter(std::ostream &out, Registry ®istry); }; - class PlainPrinter : public DocPrinter { // If this is false, notes, properties and language_features are omitted. bool print_all; @@ -70,9 +72,11 @@ class PlainPrinter : public DocPrinter { virtual void print_notes(const Feature &plugin) const override; virtual void print_language_features(const Feature &plugin) const override; virtual void print_properties(const Feature &plugin) const override; - virtual void print_category_header(const std::string &category_name) const override; - virtual void print_category_synopsis(const std::string &synopsis, - bool supports_variable_binding) const override; + virtual void print_category_header( + const std::string &category_name) const override; + virtual void print_category_synopsis( + const std::string &synopsis, + bool supports_variable_binding) const override; virtual void print_category_footer() const override; public: diff --git a/src/search/plugins/options.h b/src/search/plugins/options.h index 6537a85bba..33b0dfb99c 100644 --- a/src/search/plugins/options.h +++ b/src/search/plugins/options.h @@ -38,7 +38,8 @@ template struct OptionsAnyCaster< ValueType, typename std::enable_if::value>::type> { static ValueType cast(const Any &operand) { - // Enums set within the code (options.set()) are already the right ValueType... + // Enums set within the code (options.set()) are already the right + // ValueType... if (operand.type() == typeid(ValueType)) { return any_cast(operand); } @@ -54,7 +55,8 @@ struct OptionsAnyCaster> { return any_cast>(operand); } // any_cast returns a copy here, not a reference. - const std::vector any_elements = any_cast>(operand); + const std::vector any_elements = + any_cast>(operand); std::vector result; result.reserve(any_elements.size()); for (const Any &element : any_elements) { @@ -96,8 +98,8 @@ class Options { return result; } catch (const BadAnyCast &) { ABORT( - "Invalid conversion while retrieving config options!\n" + - key + " is not of type " + utils::get_type_name() + + "Invalid conversion while retrieving config options!\n" + key + + " is not of type " + utils::get_type_name() + " but of type " + it->second.type_name()); } } diff --git a/src/search/plugins/plugin.cc b/src/search/plugins/plugin.cc index 8564a76ae3..962e256a2a 100644 --- a/src/search/plugins/plugin.cc +++ b/src/search/plugins/plugin.cc @@ -21,8 +21,7 @@ void Feature::document_synopsis(const string ¬e) { synopsis = note; } -void Feature::document_property( - const string &property, const string ¬e) { +void Feature::document_property(const string &property, const string ¬e) { properties.emplace_back(property, note); } @@ -77,9 +76,12 @@ Plugin::Plugin() { } CategoryPlugin::CategoryPlugin( - type_index pointer_type, const string &class_name, const string &category_name) - : pointer_type(pointer_type), class_name(class_name), - category_name(category_name), can_be_bound_to_variable(false) { + type_index pointer_type, const string &class_name, + const string &category_name) + : pointer_type(pointer_type), + class_name(class_name), + category_name(category_name), + can_be_bound_to_variable(false) { RawRegistry::instance()->insert_category_plugin(*this); } @@ -136,8 +138,9 @@ string SubcategoryPlugin::get_synopsis() const { return synopsis; } -EnumPlugin::EnumPlugin(type_index type, const string &class_name, - initializer_list> enum_values) +EnumPlugin::EnumPlugin( + type_index type, const string &class_name, + initializer_list> enum_values) : type(type), class_name(class_name), enum_info(enum_values) { RawRegistry::instance()->insert_enum_plugin(*this); } diff --git a/src/search/plugins/plugin.h b/src/search/plugins/plugin.h index e3b9cf9ed0..9ec9b31c81 100644 --- a/src/search/plugins/plugin.h +++ b/src/search/plugins/plugin.h @@ -12,8 +12,8 @@ #include "../utils/tuples.h" #include -#include #include +#include #include namespace utils { @@ -36,24 +36,22 @@ class Feature { virtual ~Feature() = default; Feature(const Feature &) = delete; - virtual Any construct(const Options &opts, const utils::Context &context) const = 0; + virtual Any construct( + const Options &opts, const utils::Context &context) const = 0; /* Add option with default value. Use def_val=ArgumentInfo::NO_DEFAULT for optional parameters without default values. */ template void add_option( - const std::string &key, - const std::string &help = "", + const std::string &key, const std::string &help = "", const std::string &default_value = "", const Bounds &bounds = Bounds::unlimited(), bool lazy_construction = false); template void add_list_option( - const std::string &key, - const std::string &help = "", - const std::string &default_value = "", - bool lazy_construction = false); + const std::string &key, const std::string &help = "", + const std::string &default_value = "", bool lazy_construction = false); void document_subcategory(const std::string &subcategory); void document_title(const std::string &title); @@ -63,7 +61,8 @@ class Feature { void document_language_support( const std::string &feature, const std::string ¬e); void document_note( - const std::string &title, const std::string ¬e, bool long_text = false); + const std::string &title, const std::string ¬e, + bool long_text = false); const Type &get_type() const; std::string get_key() const; @@ -76,13 +75,12 @@ class Feature { const std::vector &get_notes() const; }; - template class FeatureWithDefault : public Feature { protected: using Feature::Feature; - virtual std::shared_ptr - create_component(const Options &options) const { + virtual std::shared_ptr create_component( + const Options &options) const { return std::make_shared(options); } }; @@ -91,27 +89,29 @@ template class FeatureWithoutDefault : public Feature { protected: using Feature::Feature; - virtual std::shared_ptr - create_component(const Options &) const = 0; + virtual std::shared_ptr create_component( + const Options &) const = 0; }; template using FeatureAuto = typename std::conditional< std::is_constructible::value, - FeatureWithDefault, - FeatureWithoutDefault>::type; + FeatureWithDefault, FeatureWithoutDefault>::type; template class TypedFeature : public FeatureAuto { using BasePtr = std::shared_ptr; - static_assert(std::is_base_of::value, - "Constructed must derive from Base"); + static_assert( + std::is_base_of::value, + "Constructed must derive from Base"); public: TypedFeature(const std::string &key) - : FeatureAuto(TypeRegistry::instance()->get_type(), key) { + : FeatureAuto( + TypeRegistry::instance()->get_type(), key) { } - Any construct(const Options &options, const utils::Context &context) const override { + Any construct( + const Options &options, const utils::Context &context) const override { std::shared_ptr ptr; try { ptr = this->create_component(options); @@ -128,15 +128,15 @@ class TypedFeature : public FeatureAuto { before calling the constructor. The resulting arguments will be used as arguments to make_shared. */ -template +template std::shared_ptr make_shared_from_arg_tuples(Arguments... arguments) { return std::apply( - [](auto &&... flattened_args) { + [](auto &&...flattened_args) { return std::make_shared( - std::forward(flattened_args) ...); + std::forward(flattened_args)...); }, utils::flatten_tuple( - std::tuple(std::forward(arguments) ...))); + std::tuple(std::forward(arguments)...))); } class Plugin { @@ -190,8 +190,7 @@ class CategoryPlugin { bool can_be_bound_to_variable; public: CategoryPlugin( - std::type_index pointer_type, - const std::string &class_name, + std::type_index pointer_type, const std::string &class_name, const std::string &category_name); virtual ~CategoryPlugin() = default; CategoryPlugin(const CategoryPlugin &) = delete; @@ -210,9 +209,9 @@ template class TypedCategoryPlugin : public CategoryPlugin { public: TypedCategoryPlugin(const std::string &category_name) - : CategoryPlugin(typeid(std::shared_ptr), - utils::get_type_name>(), - category_name) { + : CategoryPlugin( + typeid(std::shared_ptr), + utils::get_type_name>(), category_name) { } }; @@ -236,8 +235,9 @@ class EnumPlugin { std::string class_name; EnumInfo enum_info; public: - EnumPlugin(std::type_index type, const std::string &class_name, - std::initializer_list> enum_values); + EnumPlugin( + std::type_index type, const std::string &class_name, + std::initializer_list> enum_values); std::type_index get_type() const; std::string get_class_name() const; @@ -247,31 +247,30 @@ class EnumPlugin { template class TypedEnumPlugin : public EnumPlugin { public: - TypedEnumPlugin(std::initializer_list> enum_values) - : EnumPlugin(typeid(T), utils::get_type_name>(), enum_values) { + TypedEnumPlugin( + std::initializer_list> enum_values) + : EnumPlugin( + typeid(T), utils::get_type_name>(), + enum_values) { } }; - template void Feature::add_option( - const std::string &key, - const std::string &help, - const std::string &default_value, - const Bounds &bounds, + const std::string &key, const std::string &help, + const std::string &default_value, const Bounds &bounds, bool lazy_construction) { - arguments.emplace_back(key, help, TypeRegistry::instance()->get_type(), - default_value, bounds, lazy_construction); + arguments.emplace_back( + key, help, TypeRegistry::instance()->get_type(), default_value, + bounds, lazy_construction); } template void Feature::add_list_option( - const std::string &key, - const std::string &help, - const std::string &default_value, - bool lazy_construction) { - add_option>(key, help, default_value, Bounds::unlimited(), - lazy_construction); + const std::string &key, const std::string &help, + const std::string &default_value, bool lazy_construction) { + add_option>( + key, help, default_value, Bounds::unlimited(), lazy_construction); } } diff --git a/src/search/plugins/plugin_info.cc b/src/search/plugins/plugin_info.cc index 502bc4a036..caa3701fac 100644 --- a/src/search/plugins/plugin_info.cc +++ b/src/search/plugins/plugin_info.cc @@ -9,12 +9,8 @@ namespace plugins { const string ArgumentInfo::NO_DEFAULT = ""; ArgumentInfo::ArgumentInfo( - const string &key, - const string &help, - const Type &type, - const string &default_value, - const Bounds &bounds, - bool lazy_construction) + const string &key, const string &help, const Type &type, + const string &default_value, const Bounds &bounds, bool lazy_construction) : key(key), help(help), type(type), @@ -31,26 +27,17 @@ bool ArgumentInfo::has_default() const { return is_optional() && default_value != NO_DEFAULT; } -PropertyInfo::PropertyInfo( - const string &property, - const string &description) - : property(property), - description(description) { +PropertyInfo::PropertyInfo(const string &property, const string &description) + : property(property), description(description) { } NoteInfo::NoteInfo( - const string &name, - const string &description, - bool long_text) - : name(name), - description(description), - long_text(long_text) { + const string &name, const string &description, bool long_text) + : name(name), description(description), long_text(long_text) { } LanguageSupportInfo::LanguageSupportInfo( - const string &feature, - const string &description) - : feature(feature), - description(description) { + const string &feature, const string &description) + : feature(feature), description(description) { } } diff --git a/src/search/plugins/plugin_info.h b/src/search/plugins/plugin_info.h index bf9b103c91..36e3596148 100644 --- a/src/search/plugins/plugin_info.h +++ b/src/search/plugins/plugin_info.h @@ -21,18 +21,14 @@ struct ArgumentInfo { bool lazy_construction; ArgumentInfo( - const std::string &key, - const std::string &help, - const Type &type, - const std::string &default_value, - const Bounds &bounds, + const std::string &key, const std::string &help, const Type &type, + const std::string &default_value, const Bounds &bounds, bool lazy_construction = false); bool is_optional() const; bool has_default() const; }; - struct PropertyInfo { std::string property; std::string description; @@ -40,21 +36,22 @@ struct PropertyInfo { PropertyInfo(const std::string &property, const std::string &description); }; - struct NoteInfo { std::string name; std::string description; bool long_text; - NoteInfo(const std::string &name, const std::string &description, bool long_text); + NoteInfo( + const std::string &name, const std::string &description, + bool long_text); }; - struct LanguageSupportInfo { std::string feature; std::string description; - LanguageSupportInfo(const std::string &feature, const std::string &description); + LanguageSupportInfo( + const std::string &feature, const std::string &description); }; } diff --git a/src/search/plugins/raw_registry.cc b/src/search/plugins/raw_registry.cc index 67e9cc5f06..0c0313dff4 100644 --- a/src/search/plugins/raw_registry.cc +++ b/src/search/plugins/raw_registry.cc @@ -11,11 +11,13 @@ using namespace std; namespace plugins { -void RawRegistry::insert_category_plugin(const CategoryPlugin &category_plugin) { +void RawRegistry::insert_category_plugin( + const CategoryPlugin &category_plugin) { category_plugins.push_back(&category_plugin); } -void RawRegistry::insert_subcategory_plugin(const SubcategoryPlugin &subcategory_plugin) { +void RawRegistry::insert_subcategory_plugin( + const SubcategoryPlugin &subcategory_plugin) { subcategory_plugins.push_back(&subcategory_plugin); } @@ -40,17 +42,20 @@ FeatureTypes RawRegistry::collect_types(vector &errors) const { } for (const CategoryPlugin *category_plugin : category_plugins) { - vector &names = type_to_names[category_plugin->get_pointer_type()]; + vector &names = + type_to_names[category_plugin->get_pointer_type()]; if (names.empty()) { const FeatureType &type = TypeRegistry::instance()->create_feature_type(*category_plugin); feature_types.push_back(&type); } - names.push_back("CategoryPlugin(" + category_plugin->get_class_name() + - ", " + category_plugin->get_category_name() + ")"); + names.push_back( + "CategoryPlugin(" + category_plugin->get_class_name() + ", " + + category_plugin->get_category_name() + ")"); } - // Check that each type index is only used once for either an enum or a category. + // Check that each type index is only used once for either an enum or a + // category. for (const auto &pair : type_to_names) { const vector &names = pair.second; if (names.size() > 1) { @@ -77,8 +82,8 @@ void RawRegistry::validate_category_names(vector &errors) const { const vector &class_names = pair.second; if (class_names.size() > 1) { errors.push_back( - "Multiple CategoryPlugins have the name '" + - category_name + "': " + utils::join(class_names, ", ") + "."); + "Multiple CategoryPlugins have the name '" + category_name + + "': " + utils::join(class_names, ", ") + "."); } } for (const auto &pair : class_name_to_category_names) { @@ -86,8 +91,8 @@ void RawRegistry::validate_category_names(vector &errors) const { const vector &category_names = pair.second; if (category_names.size() > 1) { errors.push_back( - "Multiple CategoryPlugins are defined for the class '" + class_name + - "': " + utils::join(category_names, ", ") + "."); + "Multiple CategoryPlugins are defined for the class '" + + class_name + "': " + utils::join(category_names, ", ") + "."); } } } @@ -99,8 +104,8 @@ SubcategoryPlugins RawRegistry::collect_subcategory_plugins( for (const SubcategoryPlugin *subcategory_plugin : subcategory_plugins) { ++occurrences[subcategory_plugin->get_subcategory_name()]; - subcategory_plugin_map.emplace(subcategory_plugin->get_subcategory_name(), - subcategory_plugin); + subcategory_plugin_map.emplace( + subcategory_plugin->get_subcategory_name(), subcategory_plugin); } for (auto &item : occurrences) { @@ -116,7 +121,8 @@ SubcategoryPlugins RawRegistry::collect_subcategory_plugins( } Features RawRegistry::collect_features( - const SubcategoryPlugins &subcategory_plugins, vector &errors) const { + const SubcategoryPlugins &subcategory_plugins, + vector &errors) const { Features features; unordered_map feature_key_occurrences; for (const Plugin *plugin : plugins) { @@ -167,8 +173,8 @@ Features RawRegistry::collect_features( for (const ArgumentInfo &arg_info : feature.get_arguments()) { if (arg_info.type == TypeRegistry::NO_TYPE) { errors.push_back( - "Missing Plugin for type of parameter '" + arg_info.key - + "' of feature '" + feature_key + "'."); + "Missing Plugin for type of parameter '" + arg_info.key + + "' of feature '" + feature_key + "'."); } ++parameter_occurrences[arg_info.key]; } @@ -178,8 +184,9 @@ Features RawRegistry::collect_features( int parameter_occurrence = pair.second; if (parameter_occurrence > 1) { errors.push_back( - "The parameter '" + parameter + "' in '" + feature_key + "' is defined " + - to_string(parameter_occurrence) + " times."); + "The parameter '" + parameter + "' in '" + feature_key + + "' is defined " + to_string(parameter_occurrence) + + " times."); } } } @@ -191,7 +198,8 @@ Registry RawRegistry::construct_registry() const { vector errors; FeatureTypes feature_types = collect_types(errors); validate_category_names(errors); - SubcategoryPlugins subcategory_plugins = collect_subcategory_plugins(errors); + SubcategoryPlugins subcategory_plugins = + collect_subcategory_plugins(errors); Features features = collect_features(subcategory_plugins, errors); if (!errors.empty()) { @@ -203,8 +211,6 @@ Registry RawRegistry::construct_registry() const { utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } return Registry( - move(feature_types), - move(subcategory_plugins), - move(features)); + move(feature_types), move(subcategory_plugins), move(features)); } } diff --git a/src/search/plugins/raw_registry.h b/src/search/plugins/raw_registry.h index f4aec4b5b3..62d0b51465 100644 --- a/src/search/plugins/raw_registry.h +++ b/src/search/plugins/raw_registry.h @@ -15,7 +15,8 @@ class RawRegistry { FeatureTypes collect_types(std::vector &errors) const; void validate_category_names(std::vector &errors) const; - SubcategoryPlugins collect_subcategory_plugins(std::vector &errors) const; + SubcategoryPlugins collect_subcategory_plugins( + std::vector &errors) const; Features collect_features( const SubcategoryPlugins &subcategory_plugins, std::vector &errors) const; diff --git a/src/search/plugins/registry.cc b/src/search/plugins/registry.cc index 9e8da2ba10..b234d59c51 100644 --- a/src/search/plugins/registry.cc +++ b/src/search/plugins/registry.cc @@ -10,8 +10,7 @@ using namespace std; namespace plugins { Registry::Registry( - FeatureTypes &&feature_types, - SubcategoryPlugins &&subcategory_plugins, + FeatureTypes &&feature_types, SubcategoryPlugins &&subcategory_plugins, Features &&features) : feature_types(move(feature_types)), subcategory_plugins(move(subcategory_plugins)), @@ -25,10 +24,12 @@ shared_ptr Registry::get_feature(const string &name) const { return features.at(name); } -const SubcategoryPlugin &Registry::get_subcategory_plugin(const string &subcategory) const { +const SubcategoryPlugin &Registry::get_subcategory_plugin( + const string &subcategory) const { if (!subcategory_plugins.count(subcategory)) { - ABORT("attempt to retrieve non-existing group info from registry: " + - string(subcategory)); + ABORT( + "attempt to retrieve non-existing group info from registry: " + + string(subcategory)); } return *subcategory_plugins.at(subcategory); } diff --git a/src/search/plugins/registry.h b/src/search/plugins/registry.h index 8051a93842..b2792df2ec 100644 --- a/src/search/plugins/registry.h +++ b/src/search/plugins/registry.h @@ -36,13 +36,13 @@ class Registry { Features features; public: Registry( - FeatureTypes &&feature_types, - SubcategoryPlugins &&subcategory_plugins, + FeatureTypes &&feature_types, SubcategoryPlugins &&subcategory_plugins, Features &&features); bool has_feature(const std::string &name) const; std::shared_ptr get_feature(const std::string &name) const; - const SubcategoryPlugin &get_subcategory_plugin(const std::string &subcategory) const; + const SubcategoryPlugin &get_subcategory_plugin( + const std::string &subcategory) const; const FeatureTypes &get_feature_types() const; std::vector get_subcategory_plugins() const; diff --git a/src/search/plugins/registry_types.h b/src/search/plugins/registry_types.h index 1b5dbb1ead..1e089e5a14 100644 --- a/src/search/plugins/registry_types.h +++ b/src/search/plugins/registry_types.h @@ -18,6 +18,7 @@ class SubcategoryPlugin; using FeatureTypes = std::vector; using EnumInfo = std::vector>; using Features = std::unordered_map>; -using SubcategoryPlugins = std::unordered_map; +using SubcategoryPlugins = + std::unordered_map; } #endif diff --git a/src/search/plugins/types.cc b/src/search/plugins/types.cc index 57e0b0fa9d..b7505bc898 100644 --- a/src/search/plugins/types.cc +++ b/src/search/plugins/types.cc @@ -19,7 +19,8 @@ bool Type::is_basic_type() const { } const type_index &Type::get_basic_type_index() const { - ABORT("Used Type::get_basic_type_index on a type that does not support it."); + ABORT( + "Used Type::get_basic_type_index on a type that does not support it."); } bool Type::is_feature_type() const { @@ -55,7 +56,8 @@ int Type::get_enum_index(const string &, utils::Context &) const { } const EnumInfo &Type::get_documented_enum_values() const { - ABORT("Used Type::get_documented_enum_values on a type that does not support it."); + ABORT( + "Used Type::get_documented_enum_values on a type that does not support it."); } bool Type::is_symbol_type() const { @@ -84,10 +86,9 @@ const type_index &BasicType::get_basic_type_index() const { } bool BasicType::can_convert_into(const Type &other) const { - return Type::can_convert_into(other) - || (other.is_basic_type() - && get_basic_type_index() == typeid(int) - && other.get_basic_type_index() == typeid(double)); + return Type::can_convert_into(other) || + (other.is_basic_type() && get_basic_type_index() == typeid(int) && + other.get_basic_type_index() == typeid(double)); } string BasicType::name() const { @@ -98,9 +99,12 @@ size_t BasicType::get_hash() const { return hash()(type); } -FeatureType::FeatureType(type_index pointer_type, const string &type_name, - const string &synopsis, bool supports_variable_binding) - : pointer_type(pointer_type), type_name(type_name), synopsis(synopsis), +FeatureType::FeatureType( + type_index pointer_type, const string &type_name, const string &synopsis, + bool supports_variable_binding) + : pointer_type(pointer_type), + type_name(type_name), + synopsis(synopsis), can_be_bound_to_variable(supports_variable_binding) { } @@ -126,11 +130,11 @@ string FeatureType::name() const { } size_t FeatureType::get_hash() const { - return hash()(typeid(FeatureType)) ^ hash()(pointer_type); + return hash()(typeid(FeatureType)) ^ + hash()(pointer_type); } -ListType::ListType(const Type &nested_type) - : nested_type(nested_type) { +ListType::ListType(const Type &nested_type) : nested_type(nested_type) { } bool ListType::operator==(const Type &other) const { @@ -151,8 +155,8 @@ const Type &ListType::get_nested_type() const { } bool ListType::can_convert_into(const Type &other) const { - return other.is_list_type() && other.has_nested_type() - && nested_type.can_convert_into(other.get_nested_type()); + return other.is_list_type() && other.has_nested_type() && + nested_type.can_convert_into(other.get_nested_type()); } string ListType::name() const { @@ -164,7 +168,8 @@ size_t ListType::get_hash() const { } bool EmptyListType::operator==(const Type &other) const { - const EmptyListType *other_ptr = dynamic_cast(&other); + const EmptyListType *other_ptr = + dynamic_cast(&other); return other_ptr; } @@ -201,7 +206,8 @@ bool EnumType::is_enum_type() const { return true; } -int EnumType::get_enum_index(const string &value, utils::Context &context) const { +int EnumType::get_enum_index( + const string &value, utils::Context &context) const { auto it = find(values.begin(), values.end(), value); int enum_index = static_cast(it - values.begin()); if (enum_index >= static_cast(values.size())) { @@ -229,7 +235,6 @@ size_t EnumType::get_hash() const { return hash_value; } - bool SymbolType::operator==(const Type &other) const { return other.is_symbol_type(); } @@ -250,11 +255,16 @@ size_t SymbolType::get_hash() const { return hash()(typeid(SymbolType)); } -Any convert(const Any &value, const Type &from_type, const Type &to_type, utils::Context &context) { +Any convert( + const Any &value, const Type &from_type, const Type &to_type, + utils::Context &context) { if (from_type == to_type) { return value; - } else if (from_type.is_basic_type() && from_type.get_basic_type_index() == typeid(int) - && to_type.is_basic_type() && to_type.get_basic_type_index() == typeid(double)) { + } else if ( + from_type.is_basic_type() && + from_type.get_basic_type_index() == typeid(int) && + to_type.is_basic_type() && + to_type.get_basic_type_index() == typeid(double)) { int int_value = any_cast(value); if (int_value == numeric_limits::max()) { return Any(numeric_limits::infinity()); @@ -265,24 +275,31 @@ Any convert(const Any &value, const Type &from_type, const Type &to_type, utils: } else if (from_type.is_symbol_type() && to_type.is_enum_type()) { string str_value = any_cast(value); return Any(to_type.get_enum_index(str_value, context)); - } else if (from_type.is_list_type() && !from_type.has_nested_type() && to_type.is_list_type()) { + } else if ( + from_type.is_list_type() && !from_type.has_nested_type() && + to_type.is_list_type()) { /* A list without a specified type for its nested elements can be interpreted as a list of any other type. */ return value; - } else if (from_type.is_list_type() && from_type.has_nested_type() - && to_type.is_list_type() && to_type.has_nested_type() - && from_type.get_nested_type().can_convert_into(to_type.get_nested_type())) { + } else if ( + from_type.is_list_type() && from_type.has_nested_type() && + to_type.is_list_type() && to_type.has_nested_type() && + from_type.get_nested_type().can_convert_into( + to_type.get_nested_type())) { const Type &from_nested_type = from_type.get_nested_type(); const Type &to_nested_type = to_type.get_nested_type(); const vector &elements = any_cast>(value); vector converted_elements; converted_elements.reserve(elements.size()); for (const Any &element : elements) { - converted_elements.push_back(convert(element, from_nested_type, to_nested_type, context)); + converted_elements.push_back( + convert(element, from_nested_type, to_nested_type, context)); } return Any(converted_elements); } else { - ABORT("Cannot convert " + from_type.name() + " to " + to_type.name() + "."); + ABORT( + "Cannot convert " + from_type.name() + " to " + to_type.name() + + "."); } } @@ -303,12 +320,14 @@ void TypeRegistry::insert_basic_type(const string &name) { registered_types[type] = make_unique(type, name); } -const FeatureType &TypeRegistry::create_feature_type(const CategoryPlugin &plugin) { +const FeatureType &TypeRegistry::create_feature_type( + const CategoryPlugin &plugin) { type_index type = plugin.get_pointer_type(); if (registered_types.count(type)) { - ABORT("Creating the FeatureType '" + plugin.get_class_name() - + "' but the type '" + registered_types[type]->name() - + "' already exists and has the same type_index."); + ABORT( + "Creating the FeatureType '" + plugin.get_class_name() + + "' but the type '" + registered_types[type]->name() + + "' already exists and has the same type_index."); } unique_ptr type_ptr = make_unique( plugin.get_pointer_type(), plugin.get_category_name(), @@ -322,9 +341,10 @@ const EnumType &TypeRegistry::create_enum_type(const EnumPlugin &plugin) { type_index type = plugin.get_type(); const EnumInfo &values = plugin.get_enum_info(); if (registered_types.count(type)) { - ABORT("Creating the EnumType '" + plugin.get_class_name() - + "' but the type '" + registered_types[type]->name() - + "' already exists and has the same type_index."); + ABORT( + "Creating the EnumType '" + plugin.get_class_name() + + "' but the type '" + registered_types[type]->name() + + "' already exists and has the same type_index."); } unique_ptr type_ptr = make_unique(type, values); const EnumType &type_ref = *type_ptr; @@ -335,7 +355,8 @@ const EnumType &TypeRegistry::create_enum_type(const EnumPlugin &plugin) { const ListType &TypeRegistry::create_list_type(const Type &element_type) { const Type *key = &element_type; if (!registered_list_types.count(key)) { - registered_list_types.insert({key, make_unique(element_type)}); + registered_list_types.insert( + {key, make_unique(element_type)}); } return *registered_list_types[key]; } diff --git a/src/search/plugins/types.h b/src/search/plugins/types.h index 6ffe72b456..20ee10ba8a 100644 --- a/src/search/plugins/types.h +++ b/src/search/plugins/types.h @@ -73,8 +73,9 @@ class FeatureType : public Type { bool can_be_bound_to_variable; public: - FeatureType(std::type_index pointer_type, const std::string &type_name, - const std::string &synopsis, bool supports_variable_binding); + FeatureType( + std::type_index pointer_type, const std::string &type_name, + const std::string &synopsis, bool supports_variable_binding); virtual bool operator==(const Type &other) const override; virtual bool is_feature_type() const override; virtual bool supports_variable_binding() const override; @@ -115,7 +116,8 @@ class EnumType : public Type { EnumType(std::type_index type, const EnumInfo &documented_values); virtual bool operator==(const Type &other) const override; virtual bool is_enum_type() const override; - virtual int get_enum_index(const std::string &value, utils::Context &context) const override; + virtual int get_enum_index( + const std::string &value, utils::Context &context) const override; virtual const EnumInfo &get_documented_enum_values() const override; virtual std::string name() const override; virtual size_t get_hash() const override; @@ -159,8 +161,9 @@ class TypeRegistry { }; std::unordered_map> registered_types; - std::unordered_map, - SemanticHash, SemanticEqual> registered_list_types; + std::unordered_map< + const Type *, std::unique_ptr, SemanticHash, SemanticEqual> + registered_list_types; template void insert_basic_type(const std::string &name); const Type &get_nonlist_type(std::type_index type) const; @@ -190,7 +193,8 @@ const Type &TypeRegistry::TypeOf::value(TypeRegistry ®istry) { } template -const Type &TypeRegistry::TypeOf>::value(TypeRegistry ®istry) { +const Type & +TypeRegistry::TypeOf>::value(TypeRegistry ®istry) { return registry.create_list_type(registry.get_type()); } @@ -199,8 +203,9 @@ const Type &TypeRegistry::get_type() { return TypeOf::value(*this); } -extern Any convert(const Any &value, const Type &from_type, const Type &to_type, - utils::Context &context); +extern Any convert( + const Any &value, const Type &from_type, const Type &to_type, + utils::Context &context); } #endif diff --git a/src/search/potentials/diverse_potential_heuristics.cc b/src/search/potentials/diverse_potential_heuristics.cc index 1907814736..3c268a4ff3 100644 --- a/src/search/potentials/diverse_potential_heuristics.cc +++ b/src/search/potentials/diverse_potential_heuristics.cc @@ -17,13 +17,9 @@ using namespace std; namespace potentials { DiversePotentialHeuristics::DiversePotentialHeuristics( int num_samples, int max_num_heuristics, double max_potential, - lp::LPSolverType lpsolver, - const shared_ptr &transform, int random_seed, - utils::Verbosity verbosity) - : optimizer( - transform, - lpsolver, - max_potential), + lp::LPSolverType lpsolver, const shared_ptr &transform, + int random_seed, utils::Verbosity verbosity) + : optimizer(transform, lpsolver, max_potential), max_num_heuristics(max_num_heuristics), num_samples(num_samples), rng(utils::get_rng(random_seed)), @@ -56,9 +52,12 @@ DiversePotentialHeuristics::filter_samples_and_compute_functions( log << "Time for filtering dead ends: " << filtering_timer << endl; log << "Duplicate samples: " << num_duplicates << endl; log << "Dead end samples: " << num_dead_ends << endl; - log << "Unique non-dead-end samples: " << samples_to_functions.size() << endl; + log << "Unique non-dead-end samples: " << samples_to_functions.size() + << endl; } - assert(num_duplicates + num_dead_ends + samples_to_functions.size() == samples.size()); + assert( + num_duplicates + num_dead_ends + samples_to_functions.size() == + samples.size()); return samples_to_functions; } @@ -133,8 +132,8 @@ DiversePotentialHeuristics::find_functions() { utils::Timer init_timer; // Sample states. - vector samples = sample_without_dead_end_detection( - optimizer, num_samples, *rng); + vector samples = + sample_without_dead_end_detection(optimizer, num_samples, *rng); // Filter dead end samples. SamplesToFunctionsMap samples_to_functions = @@ -157,25 +156,21 @@ class DiversePotentialMaxHeuristicFeature DiversePotentialMaxHeuristicFeature() : TypedFeature("diverse_potentials") { document_subcategory("heuristics_potentials"); document_title("Diverse potential heuristics"); - document_synopsis( - get_admissible_potentials_reference()); + document_synopsis(get_admissible_potentials_reference()); add_option( - "num_samples", - "Number of states to sample", - "1000", + "num_samples", "Number of states to sample", "1000", plugins::Bounds("0", "infinity")); add_option( - "max_num_heuristics", - "maximum number of potential heuristics", - "infinity", - plugins::Bounds("0", "infinity")); - add_admissible_potentials_options_to_feature(*this, "diverse_potentials"); + "max_num_heuristics", "maximum number of potential heuristics", + "infinity", plugins::Bounds("0", "infinity")); + add_admissible_potentials_options_to_feature( + *this, "diverse_potentials"); utils::add_rng_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return make_shared( DiversePotentialHeuristics( opts.get("num_samples"), @@ -184,13 +179,11 @@ class DiversePotentialMaxHeuristicFeature opts.get("lpsolver"), opts.get>("transform"), opts.get("random_seed"), - opts.get("verbosity") - ).find_functions(), + opts.get("verbosity")) + .find_functions(), opts.get>("transform"), - opts.get("cache_estimates"), - opts.get("description"), - opts.get("verbosity") - ); + opts.get("cache_estimates"), opts.get("description"), + opts.get("verbosity")); } }; diff --git a/src/search/potentials/potential_heuristic.cc b/src/search/potentials/potential_heuristic.cc index c54526aec4..f945627a7b 100644 --- a/src/search/potentials/potential_heuristic.cc +++ b/src/search/potentials/potential_heuristic.cc @@ -15,7 +15,6 @@ PotentialHeuristic::PotentialHeuristic( function(move(function)) { } - int PotentialHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); return max(0, function->get_value(state)); diff --git a/src/search/potentials/potential_heuristic.h b/src/search/potentials/potential_heuristic.h index d47528f3b1..643e26eed2 100644 --- a/src/search/potentials/potential_heuristic.h +++ b/src/search/potentials/potential_heuristic.h @@ -20,9 +20,8 @@ class PotentialHeuristic : public Heuristic { public: explicit PotentialHeuristic( std::unique_ptr function, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/potentials/potential_max_heuristic.h b/src/search/potentials/potential_max_heuristic.h index fd095ee5c6..5a5c6cb168 100644 --- a/src/search/potentials/potential_max_heuristic.h +++ b/src/search/potentials/potential_max_heuristic.h @@ -21,9 +21,8 @@ class PotentialMaxHeuristic : public Heuristic { public: PotentialMaxHeuristic( std::vector> &&functions, - const std::shared_ptr &transform, - bool cache_estimates, const std::string &description, - utils::Verbosity verbosity); + const std::shared_ptr &transform, bool cache_estimates, + const std::string &description, utils::Verbosity verbosity); }; } diff --git a/src/search/potentials/potential_optimizer.cc b/src/search/potentials/potential_optimizer.cc index 7733fd827f..6a1c5955cd 100644 --- a/src/search/potentials/potential_optimizer.cc +++ b/src/search/potentials/potential_optimizer.cc @@ -19,8 +19,8 @@ static int get_undefined_value(VariableProxy var) { } PotentialOptimizer::PotentialOptimizer( - const shared_ptr &transform, - lp::LPSolverType lpsolver, double max_potential) + const shared_ptr &transform, lp::LPSolverType lpsolver, + double max_potential) : task(transform), task_proxy(*task), lp_solver(lpsolver), @@ -51,8 +51,7 @@ bool PotentialOptimizer::has_optimal_solution() const { } void PotentialOptimizer::optimize_for_state(const State &state) { - optimize_for_samples({state} - ); + optimize_for_samples({state}); } int PotentialOptimizer::get_lp_var_id(const FactProxy &fact) const { @@ -70,7 +69,8 @@ void PotentialOptimizer::optimize_for_all_states() { } vector coefficients(num_lp_vars, 0.0); for (FactProxy fact : task_proxy.get_variables().get_facts()) { - coefficients[get_lp_var_id(fact)] = 1.0 / fact.get_variable().get_domain_size(); + coefficients[get_lp_var_id(fact)] = + 1.0 / fact.get_variable().get_domain_size(); } lp_solver.set_objective_coefficients(coefficients); solve_and_extract(); @@ -112,7 +112,8 @@ void PotentialOptimizer::construct_lp() { named_vector::NamedVector lp_constraints; for (OperatorProxy op : task_proxy.get_operators()) { // Create constraint: - // Sum_{V in vars(eff(o))} (P_{V=pre(o)[V]} - P_{V=eff(o)[V]}) <= cost(o) + // Sum_{V in vars(eff(o))} (P_{V=pre(o)[V]} - P_{V=eff(o)[V]}) <= + // cost(o) unordered_map var_to_precondition; for (FactProxy pre : op.get_preconditions()) { var_to_precondition[pre.get_variable().get_id()] = pre.get_value(); @@ -190,8 +191,9 @@ void PotentialOptimizer::construct_lp() { lp_constraints.push_back(constraint); } } - lp::LinearProgram lp(lp::LPObjectiveSense::MAXIMIZE, move(lp_variables), - move(lp_constraints), infinity); + lp::LinearProgram lp( + lp::LPObjectiveSense::MAXIMIZE, move(lp_variables), + move(lp_constraints), infinity); lp_solver.load_problem(lp); } @@ -211,7 +213,8 @@ void PotentialOptimizer::extract_lp_solution() { } } -unique_ptr PotentialOptimizer::get_potential_function() const { +unique_ptr +PotentialOptimizer::get_potential_function() const { assert(has_optimal_solution()); return make_unique(fact_potentials); } diff --git a/src/search/potentials/sample_based_potential_heuristics.cc b/src/search/potentials/sample_based_potential_heuristics.cc index 2815445f68..e47a991660 100644 --- a/src/search/potentials/sample_based_potential_heuristics.cc +++ b/src/search/potentials/sample_based_potential_heuristics.cc @@ -13,7 +13,8 @@ using namespace std; namespace potentials { -static void filter_dead_ends(PotentialOptimizer &optimizer, vector &samples) { +static void filter_dead_ends( + PotentialOptimizer &optimizer, vector &samples) { assert(!optimizer.potentials_are_bounded()); vector non_dead_end_samples; for (const State &sample : samples) { @@ -25,11 +26,10 @@ static void filter_dead_ends(PotentialOptimizer &optimizer, vector &sampl } static void optimize_for_samples( - PotentialOptimizer &optimizer, - int num_samples, + PotentialOptimizer &optimizer, int num_samples, utils::RandomNumberGenerator &rng) { - vector samples = sample_without_dead_end_detection( - optimizer, num_samples, rng); + vector samples = + sample_without_dead_end_detection(optimizer, num_samples, rng); if (!optimizer.potentials_are_bounded()) { filter_dead_ends(optimizer, samples); } @@ -43,12 +43,11 @@ static void optimize_for_samples( static vector> create_sample_based_potential_functions( int num_samples, int num_heuristics, double max_potential, - lp::LPSolverType lpsolver, - const shared_ptr &transform, int random_seed) { + lp::LPSolverType lpsolver, const shared_ptr &transform, + int random_seed) { vector> functions; PotentialOptimizer optimizer(transform, lpsolver, max_potential); - shared_ptr rng( - utils::get_rng(random_seed)); + shared_ptr rng(utils::get_rng(random_seed)); for (int i = 0; i < num_heuristics; ++i) { optimize_for_samples(optimizer, num_samples, *rng); functions.push_back(optimizer.get_potential_function()); @@ -59,43 +58,36 @@ create_sample_based_potential_functions( class SampleBasedPotentialMaxHeuristicFeature : public plugins::TypedFeature { public: - SampleBasedPotentialMaxHeuristicFeature() : TypedFeature("sample_based_potentials") { + SampleBasedPotentialMaxHeuristicFeature() + : TypedFeature("sample_based_potentials") { document_subcategory("heuristics_potentials"); document_title("Sample-based potential heuristics"); document_synopsis( "Maximum over multiple potential heuristics optimized for samples. " + get_admissible_potentials_reference()); add_option( - "num_heuristics", - "number of potential heuristics", - "1", + "num_heuristics", "number of potential heuristics", "1", plugins::Bounds("0", "infinity")); add_option( - "num_samples", - "Number of states to sample", - "1000", + "num_samples", "Number of states to sample", "1000", plugins::Bounds("0", "infinity")); add_admissible_potentials_options_to_feature( *this, "sample_based_potentials"); utils::add_rng_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return make_shared( create_sample_based_potential_functions( - opts.get("num_samples"), - opts.get("num_heuristics"), + opts.get("num_samples"), opts.get("num_heuristics"), opts.get("max_potential"), opts.get("lpsolver"), opts.get>("transform"), - opts.get("random_seed") - ), + opts.get("random_seed")), opts.get>("transform"), - opts.get("cache_estimates"), - opts.get("description"), - opts.get("verbosity") - ); + opts.get("cache_estimates"), opts.get("description"), + opts.get("verbosity")); } }; diff --git a/src/search/potentials/single_potential_heuristics.cc b/src/search/potentials/single_potential_heuristics.cc index 3fd1bc011d..9d30c7fd49 100644 --- a/src/search/potentials/single_potential_heuristics.cc +++ b/src/search/potentials/single_potential_heuristics.cc @@ -15,9 +15,8 @@ enum class OptimizeFor { }; static unique_ptr create_potential_function( - const shared_ptr &transform, - lp::LPSolverType lpsolver, double max_potential, - OptimizeFor opt_func) { + const shared_ptr &transform, lp::LPSolverType lpsolver, + double max_potential, OptimizeFor opt_func) { PotentialOptimizer optimizer(transform, lpsolver, max_potential); const AbstractTask &task = *transform; TaskProxy task_proxy(task); @@ -37,7 +36,8 @@ static unique_ptr create_potential_function( class InitialStatePotentialHeuristicFeature : public plugins::TypedFeature { public: - InitialStatePotentialHeuristicFeature() : TypedFeature("initial_state_potential") { + InitialStatePotentialHeuristicFeature() + : TypedFeature("initial_state_potential") { document_subcategory("heuristics_potentials"); document_title("Potential heuristic optimized for initial state"); document_synopsis(get_admissible_potentials_reference()); @@ -46,28 +46,27 @@ class InitialStatePotentialHeuristicFeature *this, "initial_state_potential"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return make_shared( create_potential_function( opts.get>("transform"), opts.get("lpsolver"), - opts.get("max_potential"), - OptimizeFor::INITIAL_STATE), + opts.get("max_potential"), OptimizeFor::INITIAL_STATE), opts.get>("transform"), - opts.get("cache_estimates"), - opts.get("description"), - opts.get("verbosity") - ); + opts.get("cache_estimates"), opts.get("description"), + opts.get("verbosity")); } }; -static plugins::FeaturePlugin _plugin_initial_state; +static plugins::FeaturePlugin + _plugin_initial_state; class AllStatesPotentialHeuristicFeature : public plugins::TypedFeature { public: - AllStatesPotentialHeuristicFeature() : TypedFeature("all_states_potential") { + AllStatesPotentialHeuristicFeature() + : TypedFeature("all_states_potential") { document_subcategory("heuristics_potentials"); document_title("Potential heuristic optimized for all states"); document_synopsis(get_admissible_potentials_reference()); @@ -76,21 +75,19 @@ class AllStatesPotentialHeuristicFeature *this, "all_states_potential"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return make_shared( create_potential_function( opts.get>("transform"), opts.get("lpsolver"), - opts.get("max_potential"), - OptimizeFor::ALL_STATES), + opts.get("max_potential"), OptimizeFor::ALL_STATES), opts.get>("transform"), - opts.get("cache_estimates"), - opts.get("description"), - opts.get("verbosity") - ); + opts.get("cache_estimates"), opts.get("description"), + opts.get("verbosity")); } }; -static plugins::FeaturePlugin _plugin_all_states; +static plugins::FeaturePlugin + _plugin_all_states; } diff --git a/src/search/potentials/subcategory.cc b/src/search/potentials/subcategory.cc index e1c60c543d..f067677420 100644 --- a/src/search/potentials/subcategory.cc +++ b/src/search/potentials/subcategory.cc @@ -1,11 +1,12 @@ #include "../plugins/plugin.h" namespace potentials { -static class PotentialHeuristicsGroupPlugin : public plugins::SubcategoryPlugin { +static class PotentialHeuristicsGroupPlugin + : public plugins::SubcategoryPlugin { public: - PotentialHeuristicsGroupPlugin() : SubcategoryPlugin("heuristics_potentials") { + PotentialHeuristicsGroupPlugin() + : SubcategoryPlugin("heuristics_potentials") { document_title("Potential Heuristics"); } -} -_subcategory_plugin; +} _subcategory_plugin; } diff --git a/src/search/potentials/util.cc b/src/search/potentials/util.cc index 98624779f2..a710238dff 100644 --- a/src/search/potentials/util.cc +++ b/src/search/potentials/util.cc @@ -15,8 +15,7 @@ using namespace std; namespace potentials { vector sample_without_dead_end_detection( - PotentialOptimizer &optimizer, - int num_samples, + PotentialOptimizer &optimizer, int num_samples, utils::RandomNumberGenerator &rng) { const shared_ptr task = optimizer.get_task(); const TaskProxy task_proxy(*task); @@ -33,15 +32,14 @@ vector sample_without_dead_end_detection( } string get_admissible_potentials_reference() { - return "The algorithm is based on" + utils::format_conference_reference( - {"Jendrik Seipp", "Florian Pommerening", "Malte Helmert"}, - "New Optimization Functions for Potential Heuristics", - "https://ai.dmi.unibas.ch/papers/seipp-et-al-icaps2015.pdf", - "Proceedings of the 25th International Conference on" - " Automated Planning and Scheduling (ICAPS 2015)", - "193-201", - "AAAI Press", - "2015"); + return "The algorithm is based on" + + utils::format_conference_reference( + {"Jendrik Seipp", "Florian Pommerening", "Malte Helmert"}, + "New Optimization Functions for Potential Heuristics", + "https://ai.dmi.unibas.ch/papers/seipp-et-al-icaps2015.pdf", + "Proceedings of the 25th International Conference on" + " Automated Planning and Scheduling (ICAPS 2015)", + "193-201", "AAAI Press", "2015"); } void add_admissible_potentials_options_to_feature( @@ -61,21 +59,18 @@ void add_admissible_potentials_options_to_feature( "very high weights can cause numerical instability in the LP solver, " "while using very low weights limits the choice of potential " "heuristics. For details, see the ICAPS paper cited above.", - "1e8", - plugins::Bounds("0.0", "infinity")); + "1e8", plugins::Bounds("0.0", "infinity")); lp::add_lp_solver_option_to_feature(feature); add_heuristic_options_to_feature(feature, description); } - -tuple, bool, string, - utils::Verbosity> -get_admissible_potential_arguments_from_options( - const plugins::Options &opts) { +tuple< + double, lp::LPSolverType, shared_ptr, bool, string, + utils::Verbosity> +get_admissible_potential_arguments_from_options(const plugins::Options &opts) { return tuple_cat( make_tuple(opts.get("max_potential")), lp::get_lp_solver_arguments_from_options(opts), - get_heuristic_arguments_from_options(opts) - ); + get_heuristic_arguments_from_options(opts)); } } diff --git a/src/search/potentials/util.h b/src/search/potentials/util.h index 41b30e7b5f..1ae4b4e025 100644 --- a/src/search/potentials/util.h +++ b/src/search/potentials/util.h @@ -1,12 +1,12 @@ #ifndef POTENTIALS_UTIL_H #define POTENTIALS_UTIL_H -#include -#include -#include #include "../lp/lp_solver.h" #include "../utils/logging.h" +#include +#include +#include class AbstractTask; class State; @@ -24,17 +24,16 @@ namespace potentials { class PotentialOptimizer; std::vector sample_without_dead_end_detection( - PotentialOptimizer &optimizer, - int num_samples, + PotentialOptimizer &optimizer, int num_samples, utils::RandomNumberGenerator &rng); std::string get_admissible_potentials_reference(); void add_admissible_potentials_options_to_feature( plugins::Feature &feature, const std::string &description); -std::tuple, - bool, std::string, utils::Verbosity> -get_admissible_potential_arguments_from_options( - const plugins::Options &opts); +std::tuple< + double, lp::LPSolverType, std::shared_ptr, bool, std::string, + utils::Verbosity> +get_admissible_potential_arguments_from_options(const plugins::Options &opts); } #endif diff --git a/src/search/pruning/limited_pruning.cc b/src/search/pruning/limited_pruning.cc index 2eb0c8f707..1124046464 100644 --- a/src/search/pruning/limited_pruning.cc +++ b/src/search/pruning/limited_pruning.cc @@ -7,10 +7,8 @@ using namespace std; namespace limited_pruning { LimitedPruning::LimitedPruning( - const shared_ptr &pruning, - double min_required_pruning_ratio, - int expansions_before_checking_pruning_ratio, - utils::Verbosity verbosity) + const shared_ptr &pruning, double min_required_pruning_ratio, + int expansions_before_checking_pruning_ratio, utils::Verbosity verbosity) : PruningMethod(verbosity), pruning_method(pruning), min_required_pruning_ratio(min_required_pruning_ratio), @@ -26,24 +24,27 @@ void LimitedPruning::initialize(const shared_ptr &task) { log << "pruning method: limited" << endl; } -void LimitedPruning::prune( - const State &state, vector &op_ids) { +void LimitedPruning::prune(const State &state, vector &op_ids) { if (is_pruning_disabled) { return; } if (num_pruning_calls == num_expansions_before_checking_pruning_ratio && min_required_pruning_ratio > 0.) { - double pruning_ratio = (num_successors_before_pruning == 0) ? 1. : 1. - ( - static_cast(num_successors_after_pruning) / - static_cast(num_successors_before_pruning)); + double pruning_ratio = + (num_successors_before_pruning == 0) + ? 1. + : 1. - (static_cast(num_successors_after_pruning) / + static_cast(num_successors_before_pruning)); if (log.is_at_least_normal()) { - log << "Pruning ratio after " << num_expansions_before_checking_pruning_ratio + log << "Pruning ratio after " + << num_expansions_before_checking_pruning_ratio << " calls: " << pruning_ratio << endl; } if (pruning_ratio < min_required_pruning_ratio) { if (log.is_at_least_normal()) { log << "-- pruning ratio is lower than minimum pruning ratio (" - << min_required_pruning_ratio << ") -> switching off pruning" << endl; + << min_required_pruning_ratio + << ") -> switching off pruning" << endl; } is_pruning_disabled = true; } @@ -66,19 +67,16 @@ class LimitedPruningFeature "previous expansions."); add_option>( - "pruning", - "the underlying pruning method to be applied"); + "pruning", "the underlying pruning method to be applied"); add_option( "min_required_pruning_ratio", "disable pruning if the pruning ratio is lower than this value after" " 'expansions_before_checking_pruning_ratio' expansions", - "0.2", - plugins::Bounds("0.0", "1.0")); + "0.2", plugins::Bounds("0.0", "1.0")); add_option( "expansions_before_checking_pruning_ratio", "number of expansions before deciding whether to disable pruning", - "1000", - plugins::Bounds("0", "infinity")); + "1000", plugins::Bounds("0", "infinity")); add_pruning_options_to_feature(*this); document_note( @@ -89,8 +87,8 @@ class LimitedPruningFeature "in an eager search such as astar."); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get>("pruning"), opts.get("min_required_pruning_ratio"), diff --git a/src/search/pruning/null_pruning_method.cc b/src/search/pruning/null_pruning_method.cc index 96ce5cdd0f..dde244f31c 100644 --- a/src/search/pruning/null_pruning_method.cc +++ b/src/search/pruning/null_pruning_method.cc @@ -28,8 +28,8 @@ class NullPruningMethodFeature add_pruning_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( get_pruning_arguments_from_options(opts)); } diff --git a/src/search/pruning/null_pruning_method.h b/src/search/pruning/null_pruning_method.h index 3f857dd872..934f167ba8 100644 --- a/src/search/pruning/null_pruning_method.h +++ b/src/search/pruning/null_pruning_method.h @@ -5,12 +5,13 @@ namespace null_pruning_method { class NullPruningMethod : public PruningMethod { - virtual void prune( - const State &, std::vector &) override {} + virtual void prune(const State &, std::vector &) override { + } public: explicit NullPruningMethod(utils::Verbosity verbosity); virtual void initialize(const std::shared_ptr &) override; - virtual void print_statistics() const override {} + virtual void print_statistics() const override { + } }; } diff --git a/src/search/pruning/stubborn_sets.cc b/src/search/pruning/stubborn_sets.cc index 78005aebe5..bc8f05c98d 100644 --- a/src/search/pruning/stubborn_sets.cc +++ b/src/search/pruning/stubborn_sets.cc @@ -7,8 +7,7 @@ using namespace std; namespace stubborn_sets { StubbornSets::StubbornSets(utils::Verbosity verbosity) - : PruningMethod(verbosity), - num_operators(-1) { + : PruningMethod(verbosity), num_operators(-1) { } void StubbornSets::initialize(const shared_ptr &task) { @@ -36,10 +35,10 @@ void StubbornSets::compute_sorted_operators(const TaskProxy &task_proxy) { sorted_op_effects = utils::map_vector>( operators, [](const OperatorProxy &op) { - return utils::sorted( - utils::map_vector( - op.get_effects(), - [](const EffectProxy &eff) {return eff.get_fact().get_pair();})); + return utils::sorted(utils::map_vector( + op.get_effects(), [](const EffectProxy &eff) { + return eff.get_fact().get_pair(); + })); }); } diff --git a/src/search/pruning/stubborn_sets.h b/src/search/pruning/stubborn_sets.h index 402395ddec..c3f1a8f923 100644 --- a/src/search/pruning/stubborn_sets.h +++ b/src/search/pruning/stubborn_sets.h @@ -11,8 +11,8 @@ inline FactPair find_unsatisfied_condition( class StubbornSets : public PruningMethod { void compute_sorted_operators(const TaskProxy &task_proxy); void compute_achievers(const TaskProxy &task_proxy); - virtual void prune(const State &state, - std::vector &op_ids) override; + virtual void prune( + const State &state, std::vector &op_ids) override; protected: /* We copy some parts of the task here, so we can avoid the more expensive @@ -48,8 +48,10 @@ class StubbornSets : public PruningMethod { rather than an arbitrary variable order. (However, so far, there is no experimental evidence that this is a particularly good order.) */ - FactPair find_unsatisfied_precondition(int op_no, const State &state) const { - return find_unsatisfied_condition(sorted_op_preconditions[op_no], state); + FactPair find_unsatisfied_precondition( + int op_no, const State &state) const { + return find_unsatisfied_condition( + sorted_op_preconditions[op_no], state); } virtual void compute_stubborn_set(const State &state) = 0; diff --git a/src/search/pruning/stubborn_sets_action_centric.cc b/src/search/pruning/stubborn_sets_action_centric.cc index e0871a4fda..349efd6957 100644 --- a/src/search/pruning/stubborn_sets_action_centric.cc +++ b/src/search/pruning/stubborn_sets_action_centric.cc @@ -4,8 +4,8 @@ using namespace std; namespace stubborn_sets { // Relies on both fact sets being sorted by variable. -static bool contain_conflicting_fact(const vector &facts1, - const vector &facts2) { +static bool contain_conflicting_fact( + const vector &facts1, const vector &facts2) { auto facts1_it = facts1.begin(); auto facts2_it = facts2.begin(); while (facts1_it != facts1.end() && facts2_it != facts2.end()) { @@ -23,8 +23,7 @@ static bool contain_conflicting_fact(const vector &facts1, return false; } -StubbornSetsActionCentric::StubbornSetsActionCentric( - utils::Verbosity verbosity) +StubbornSetsActionCentric::StubbornSetsActionCentric(utils::Verbosity verbosity) : StubbornSets(verbosity) { } @@ -43,14 +42,14 @@ void StubbornSetsActionCentric::compute_stubborn_set(const State &state) { // Relies on op_preconds and op_effects being sorted by variable. bool StubbornSetsActionCentric::can_disable(int op1_no, int op2_no) const { - return contain_conflicting_fact(sorted_op_effects[op1_no], - sorted_op_preconditions[op2_no]); + return contain_conflicting_fact( + sorted_op_effects[op1_no], sorted_op_preconditions[op2_no]); } // Relies on op_effect being sorted by variable. bool StubbornSetsActionCentric::can_conflict(int op1_no, int op2_no) const { - return contain_conflicting_fact(sorted_op_effects[op1_no], - sorted_op_effects[op2_no]); + return contain_conflicting_fact( + sorted_op_effects[op1_no], sorted_op_effects[op2_no]); } bool StubbornSetsActionCentric::enqueue_stubborn_operator(int op_no) { diff --git a/src/search/pruning/stubborn_sets_atom_centric.cc b/src/search/pruning/stubborn_sets_atom_centric.cc index c3b6fc1027..8232731b8a 100644 --- a/src/search/pruning/stubborn_sets_atom_centric.cc +++ b/src/search/pruning/stubborn_sets_atom_centric.cc @@ -10,8 +10,7 @@ using namespace std; namespace stubborn_sets_atom_centric { StubbornSetsAtomCentric::StubbornSetsAtomCentric( - bool use_sibling_shortcut, - AtomSelectionStrategy atom_selection_strategy, + bool use_sibling_shortcut, AtomSelectionStrategy atom_selection_strategy, utils::Verbosity verbosity) : StubbornSets(verbosity), use_sibling_shortcut(use_sibling_shortcut), @@ -61,7 +60,8 @@ void StubbornSetsAtomCentric::compute_consumers(const TaskProxy &task_proxy) { } } -bool StubbornSetsAtomCentric::operator_is_applicable(int op, const State &state) const { +bool StubbornSetsAtomCentric::operator_is_applicable( + int op, const State &state) const { return find_unsatisfied_precondition(op, state) == FactPair::no_fact; } @@ -86,7 +86,8 @@ void StubbornSetsAtomCentric::enqueue_sibling_producers(const FactPair &fact) { given fact v=d. */ int dummy_mark = MARKED_VALUES_NONE; - int &mark = use_sibling_shortcut ? marked_producer_variables[fact.var] : dummy_mark; + int &mark = + use_sibling_shortcut ? marked_producer_variables[fact.var] : dummy_mark; if (mark == MARKED_VALUES_NONE) { /* If we don't have marking info for variable v, enqueue all sibling @@ -112,7 +113,8 @@ void StubbornSetsAtomCentric::enqueue_sibling_producers(const FactPair &fact) { void StubbornSetsAtomCentric::enqueue_sibling_consumers(const FactPair &fact) { // For documentation, see enqueue_sibling_producers(). int dummy_mark = MARKED_VALUES_NONE; - int &mark = use_sibling_shortcut ? marked_consumer_variables[fact.var] : dummy_mark; + int &mark = + use_sibling_shortcut ? marked_consumer_variables[fact.var] : dummy_mark; if (mark == MARKED_VALUES_NONE) { int domain_size = consumers[fact.var].size(); for (int value = 0; value < domain_size; ++value) { @@ -158,13 +160,16 @@ FactPair StubbornSetsAtomCentric::select_fact( } } } - } else if (atom_selection_strategy == AtomSelectionStrategy::DYNAMIC_SMALL) { + } else if ( + atom_selection_strategy == AtomSelectionStrategy::DYNAMIC_SMALL) { int min_count = numeric_limits::max(); for (const FactPair &condition : facts) { if (state[condition.var].get_value() != condition.value) { - const vector &ops = achievers[condition.var][condition.value]; - int count = count_if( - ops.begin(), ops.end(), [&](int op) {return !stubborn[op];}); + const vector &ops = + achievers[condition.var][condition.value]; + int count = count_if(ops.begin(), ops.end(), [&](int op) { + return !stubborn[op]; + }); if (count < min_count) { fact = condition; min_count = count; @@ -234,7 +239,8 @@ void StubbornSetsAtomCentric::compute_stubborn_set(const State &state) { } } -void StubbornSetsAtomCentric::handle_stubborn_operator(const State &state, int op) { +void StubbornSetsAtomCentric::handle_stubborn_operator( + const State &state, int op) { if (!stubborn[op]) { stubborn[op] = true; if (operator_is_applicable(op, state)) { @@ -248,7 +254,8 @@ void StubbornSetsAtomCentric::handle_stubborn_operator(const State &state, int o class StubbornSetsAtomCentricFeature : public plugins::TypedFeature { public: - StubbornSetsAtomCentricFeature() : TypedFeature("atom_centric_stubborn_sets") { + StubbornSetsAtomCentricFeature() + : TypedFeature("atom_centric_stubborn_sets") { document_title("Atom-centric stubborn sets"); document_synopsis( "Stubborn sets are a state pruning method which computes a subset " @@ -258,14 +265,13 @@ class StubbornSetsAtomCentricFeature "this implementation focuses on atomic propositions (atoms), which " "often speeds up the computation on IPC benchmarks. For details, see" + utils::format_conference_reference( - {"Gabriele Roeger", "Malte Helmert", "Jendrik Seipp", "Silvan Sievers"}, + {"Gabriele Roeger", "Malte Helmert", "Jendrik Seipp", + "Silvan Sievers"}, "An Atom-Centric Perspective on Stubborn Sets", "https://ai.dmi.unibas.ch/papers/roeger-et-al-socs2020.pdf", "Proceedings of the 13th Annual Symposium on Combinatorial Search " "(SoCS 2020)", - "57-65", - "AAAI Press", - "2020")); + "57-65", "AAAI Press", "2020")); add_option( "use_sibling_shortcut", @@ -280,8 +286,8 @@ class StubbornSetsAtomCentricFeature add_pruning_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get("use_sibling_shortcut"), opts.get("atom_selection_strategy"), @@ -291,16 +297,15 @@ class StubbornSetsAtomCentricFeature static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"fast_downward", - "select the atom (v, d) with the variable v that comes first in the Fast " - "Downward variable ordering (which is based on the causal graph)"}, - {"quick_skip", - "if possible, select an unsatisfied atom whose producers are already marked"}, - {"static_small", - "select the atom achieved by the fewest number of actions"}, - {"dynamic_small", - "select the atom achieved by the fewest number of actions that are not " - "yet part of the stubborn set"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"fast_downward", + "select the atom (v, d) with the variable v that comes first in the Fast " + "Downward variable ordering (which is based on the causal graph)"}, + {"quick_skip", + "if possible, select an unsatisfied atom whose producers are already marked"}, + {"static_small", + "select the atom achieved by the fewest number of actions"}, + {"dynamic_small", + "select the atom achieved by the fewest number of actions that are not " + "yet part of the stubborn set"}}); } diff --git a/src/search/pruning/stubborn_sets_atom_centric.h b/src/search/pruning/stubborn_sets_atom_centric.h index 091fc25d30..08f191521e 100644 --- a/src/search/pruning/stubborn_sets_atom_centric.h +++ b/src/search/pruning/stubborn_sets_atom_centric.h @@ -19,7 +19,8 @@ class StubbornSetsAtomCentric : public stubborn_sets::StubbornSets { const bool use_sibling_shortcut; const AtomSelectionStrategy atom_selection_strategy; - // consumers[v][d] contains the ID of operator o if pre(o) contains the fact v=d. + // consumers[v][d] contains the ID of operator o if pre(o) contains the fact + // v=d. std::vector>> consumers; /* Marked producer and consumer facts. @@ -45,7 +46,8 @@ class StubbornSetsAtomCentric : public stubborn_sets::StubbornSets { void enqueue_consumers(const FactPair &fact); void enqueue_sibling_consumers(const FactPair &fact); void enqueue_sibling_producers(const FactPair &fact); - FactPair select_fact(const std::vector &facts, const State &state) const; + FactPair select_fact( + const std::vector &facts, const State &state) const; void enqueue_nes(int op, const State &state); void enqueue_interferers(int op); void handle_stubborn_operator(const State &state, int op); diff --git a/src/search/pruning/stubborn_sets_ec.cc b/src/search/pruning/stubborn_sets_ec.cc index d6b2972fde..11bc971ed5 100644 --- a/src/search/pruning/stubborn_sets_ec.cc +++ b/src/search/pruning/stubborn_sets_ec.cc @@ -13,12 +13,12 @@ namespace stubborn_sets_ec { // DTGs are stored as one adjacency list per value. using StubbornDTG = vector>; -static inline bool is_v_applicable(int var, - int op_no, - const State &state, - vector> &preconditions) { +static inline bool is_v_applicable( + int var, int op_no, const State &state, + vector> &preconditions) { int precondition_on_var = preconditions[op_no][var]; - return precondition_on_var == -1 || precondition_on_var == state[var].get_value(); + return precondition_on_var == -1 || + precondition_on_var == state[var].get_value(); } static vector build_dtgs(TaskProxy task_proxy) { @@ -52,7 +52,8 @@ static vector build_dtgs(TaskProxy task_proxy) { VariableProxy var = fact.get_variable(); int var_id = var.get_id(); int eff_val = fact.get_value(); - int pre_val = utils::get_value_or_default(preconditions, var_id, -1); + int pre_val = + utils::get_value_or_default(preconditions, var_id, -1); StubbornDTG &dtg = dtgs[var_id]; if (pre_val == -1) { @@ -68,10 +69,9 @@ static vector build_dtgs(TaskProxy task_proxy) { return dtgs; } -static void recurse_forwards(const StubbornDTG &dtg, - int start_value, - int current_value, - vector &reachable) { +static void recurse_forwards( + const StubbornDTG &dtg, int start_value, int current_value, + vector &reachable) { if (!reachable[current_value]) { reachable[current_value] = true; for (int successor_value : dtg[current_value]) @@ -80,14 +80,13 @@ static void recurse_forwards(const StubbornDTG &dtg, } // Relies on both fact sets being sorted by variable. -static void get_conflicting_vars(const vector &facts1, - const vector &facts2, - vector &conflicting_vars) { +static void get_conflicting_vars( + const vector &facts1, const vector &facts2, + vector &conflicting_vars) { conflicting_vars.clear(); auto facts1_it = facts1.begin(); auto facts2_it = facts2.begin(); - while (facts1_it != facts1.end() && - facts2_it != facts2.end()) { + while (facts1_it != facts1.end() && facts2_it != facts2.end()) { if (facts1_it->var < facts2_it->var) { ++facts1_it; } else if (facts1_it->var > facts2_it->var) { @@ -127,7 +126,8 @@ void StubbornSetsEC::initialize(const shared_ptr &task) { log << "pruning method: stubborn sets ec" << endl; } -void StubbornSetsEC::compute_operator_preconditions(const TaskProxy &task_proxy) { +void StubbornSetsEC::compute_operator_preconditions( + const TaskProxy &task_proxy) { int num_variables = task_proxy.get_variables().size(); op_preconditions_on_var = utils::map_vector>( task_proxy.get_operators(), [&](const OperatorProxy &op) { @@ -228,21 +228,24 @@ void StubbornSetsEC::enqueue_stubborn_operator_and_remember_written_vars( /* TODO: think about a better name, which distinguishes this method better from the corresponding method for simple stubborn sets */ -void StubbornSetsEC::add_nes_for_fact(const FactPair &fact, const State &state) { +void StubbornSetsEC::add_nes_for_fact( + const FactPair &fact, const State &state) { for (int achiever : achievers[fact.var][fact.value]) { if (active_ops[achiever]) { - enqueue_stubborn_operator_and_remember_written_vars(achiever, state); + enqueue_stubborn_operator_and_remember_written_vars( + achiever, state); } } nes_computed[fact.var][fact.value] = true; } -void StubbornSetsEC::add_conflicting_and_disabling(int op_no, - const State &state) { +void StubbornSetsEC::add_conflicting_and_disabling( + int op_no, const State &state) { for (int conflict : get_conflicting_and_disabling(op_no)) { if (active_ops[conflict]) { - enqueue_stubborn_operator_and_remember_written_vars(conflict, state); + enqueue_stubborn_operator_and_remember_written_vars( + conflict, state); } } } @@ -250,13 +253,14 @@ void StubbornSetsEC::add_conflicting_and_disabling(int op_no, // Relies on op_effects and op_preconditions being sorted by variable. void StubbornSetsEC::get_disabled_vars( int op1_no, int op2_no, vector &disabled_vars) const { - get_conflicting_vars(sorted_op_effects[op1_no], - sorted_op_preconditions[op2_no], - disabled_vars); + get_conflicting_vars( + sorted_op_effects[op1_no], sorted_op_preconditions[op2_no], + disabled_vars); } void StubbornSetsEC::apply_s5(int op_no, const State &state) { - // Find a violated state variable and check if stubborn contains a writer for this variable. + // Find a violated state variable and check if stubborn contains a writer + // for this variable. for (const FactPair &pre : sorted_op_preconditions[op_no]) { if (state[pre.var].get_value() != pre.value && written_vars[pre.var]) { if (!nes_computed[pre.var][pre.value]) { @@ -266,7 +270,8 @@ void StubbornSetsEC::apply_s5(int op_no, const State &state) { } } - FactPair violated_precondition = find_unsatisfied_precondition(op_no, state); + FactPair violated_precondition = + find_unsatisfied_precondition(op_no, state); assert(violated_precondition != FactPair::no_fact); if (!nes_computed[violated_precondition.var][violated_precondition.value]) { add_nes_for_fact(violated_precondition, state); @@ -281,29 +286,28 @@ void StubbornSetsEC::initialize_stubborn_set(const State &state) { compute_active_operators(state); - //rule S1 + // rule S1 FactPair unsatisfied_goal = find_unsatisfied_goal(state); assert(unsatisfied_goal != FactPair::no_fact); - add_nes_for_fact(unsatisfied_goal, state); // active operators used + add_nes_for_fact(unsatisfied_goal, state); // active operators used } void StubbornSetsEC::handle_stubborn_operator(const State &state, int op_no) { if (is_applicable(op_no, state)) { - //Rule S2 & S3 - add_conflicting_and_disabling(op_no, state); // active operators used - //Rule S4' + // Rule S2 & S3 + add_conflicting_and_disabling(op_no, state); // active operators used + // Rule S4' vector disabled_vars; for (int disabled_op_no : get_disabled(op_no)) { if (active_ops[disabled_op_no]) { get_disabled_vars(op_no, disabled_op_no, disabled_vars); - if (!disabled_vars.empty()) { // == can_disable(op1_no, op2_no) + if (!disabled_vars.empty()) { // == can_disable(op1_no, op2_no) bool v_applicable_op_found = false; for (int disabled_var : disabled_vars) { - //First case: add o' - if (is_v_applicable(disabled_var, - disabled_op_no, - state, - op_preconditions_on_var)) { + // First case: add o' + if (is_v_applicable( + disabled_var, disabled_op_no, state, + op_preconditions_on_var)) { enqueue_stubborn_operator_and_remember_written_vars( disabled_op_no, state); v_applicable_op_found = true; @@ -311,15 +315,16 @@ void StubbornSetsEC::handle_stubborn_operator(const State &state, int op_no) { } } - //Second case: add a necessary enabling set for o' following S5 + // Second case: add a necessary enabling set for o' + // following S5 if (!v_applicable_op_found) { apply_s5(disabled_op_no, state); } } } } - } else { // op is inapplicable - //S5 + } else { // op is inapplicable + // S5 apply_s5(op_no, state); } } @@ -336,20 +341,20 @@ class StubbornSetsECFeature "on several design choices, there are different variants thereof. " "The variant 'StubbornSetsEC' resolves the design choices such that " "the resulting pruning method is guaranteed to strictly dominate the " - "Expansion Core pruning method. For details, see" + utils::format_conference_reference( - {"Martin Wehrle", "Malte Helmert", "Yusra Alkhazraji", "Robert Mattmueller"}, + "Expansion Core pruning method. For details, see" + + utils::format_conference_reference( + {"Martin Wehrle", "Malte Helmert", "Yusra Alkhazraji", + "Robert Mattmueller"}, "The Relative Pruning Power of Strong Stubborn Sets and Expansion Core", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS13/paper/view/6053/6185", "Proceedings of the 23rd International Conference on Automated Planning " "and Scheduling (ICAPS 2013)", - "251-259", - "AAAI Press", - "2013")); + "251-259", "AAAI Press", "2013")); add_pruning_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( get_pruning_arguments_from_options(opts)); } diff --git a/src/search/pruning/stubborn_sets_ec.h b/src/search/pruning/stubborn_sets_ec.h index 8ac60024bb..788e2397a1 100644 --- a/src/search/pruning/stubborn_sets_ec.h +++ b/src/search/pruning/stubborn_sets_ec.h @@ -17,20 +17,22 @@ class StubbornSetsEC : public stubborn_sets::StubbornSetsActionCentric { std::vector> nes_computed; bool is_applicable(int op_no, const State &state) const; - void get_disabled_vars(int op1_no, int op2_no, - std::vector &disabled_vars) const; + void get_disabled_vars( + int op1_no, int op2_no, std::vector &disabled_vars) const; void build_reachability_map(const TaskProxy &task_proxy); void compute_operator_preconditions(const TaskProxy &task_proxy); const std::vector &get_conflicting_and_disabling(int op1_no); const std::vector &get_disabled(int op1_no); void add_conflicting_and_disabling(int op_no, const State &state); void compute_active_operators(const State &state); - void enqueue_stubborn_operator_and_remember_written_vars(int op_no, const State &state); + void enqueue_stubborn_operator_and_remember_written_vars( + int op_no, const State &state); void add_nes_for_fact(const FactPair &fact, const State &state); void apply_s5(int op_no, const State &state); protected: virtual void initialize_stubborn_set(const State &state) override; - virtual void handle_stubborn_operator(const State &state, int op_no) override; + virtual void handle_stubborn_operator( + const State &state, int op_no) override; public: explicit StubbornSetsEC(utils::Verbosity verbosity); virtual void initialize(const std::shared_ptr &task) override; diff --git a/src/search/pruning/stubborn_sets_simple.cc b/src/search/pruning/stubborn_sets_simple.cc index d03e7f6d60..4f50faa9e6 100644 --- a/src/search/pruning/stubborn_sets_simple.cc +++ b/src/search/pruning/stubborn_sets_simple.cc @@ -58,9 +58,10 @@ void StubbornSetsSimple::initialize_stubborn_set(const State &state) { add_necessary_enabling_set(unsatisfied_goal); } -void StubbornSetsSimple::handle_stubborn_operator(const State &state, - int op_no) { - FactPair unsatisfied_precondition = find_unsatisfied_precondition(op_no, state); +void StubbornSetsSimple::handle_stubborn_operator( + const State &state, int op_no) { + FactPair unsatisfied_precondition = + find_unsatisfied_precondition(op_no, state); if (unsatisfied_precondition == FactPair::no_fact) { /* no unsatisfied precondition found => operator is applicable @@ -84,30 +85,27 @@ class StubbornSetsSimpleFeature "optimality of the overall search is preserved. As stubborn sets rely " "on several design choices, there are different variants thereof. " "This stubborn set variant resolves the design choices in a " - "straight-forward way. For details, see the following papers: " - + utils::format_conference_reference( - {"Yusra Alkhazraji", "Martin Wehrle", "Robert Mattmueller", "Malte Helmert"}, + "straight-forward way. For details, see the following papers: " + + utils::format_conference_reference( + {"Yusra Alkhazraji", "Martin Wehrle", "Robert Mattmueller", + "Malte Helmert"}, "A Stubborn Set Algorithm for Optimal Planning", "https://ai.dmi.unibas.ch/papers/alkhazraji-et-al-ecai2012.pdf", "Proceedings of the 20th European Conference on Artificial Intelligence " "(ECAI 2012)", - "891-892", - "IOS Press", - "2012") - + utils::format_conference_reference( + "891-892", "IOS Press", "2012") + + utils::format_conference_reference( {"Martin Wehrle", "Malte Helmert"}, "Efficient Stubborn Sets: Generalized Algorithms and Selection Strategies", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7922/8042", "Proceedings of the 24th International Conference on Automated Planning " " and Scheduling (ICAPS 2014)", - "323-331", - "AAAI Press", - "2014")); + "323-331", "AAAI Press", "2014")); add_pruning_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( get_pruning_arguments_from_options(opts)); } diff --git a/src/search/pruning/stubborn_sets_simple.h b/src/search/pruning/stubborn_sets_simple.h index 2a6d8366b8..2f2c3182ad 100644 --- a/src/search/pruning/stubborn_sets_simple.h +++ b/src/search/pruning/stubborn_sets_simple.h @@ -16,15 +16,14 @@ class StubbornSetsSimple : public stubborn_sets::StubbornSetsActionCentric { void add_interfering(int op_no); inline bool interfere(int op1_no, int op2_no) { - return can_disable(op1_no, op2_no) || - can_conflict(op1_no, op2_no) || + return can_disable(op1_no, op2_no) || can_conflict(op1_no, op2_no) || can_disable(op2_no, op1_no); } const std::vector &get_interfering_operators(int op1_no); protected: virtual void initialize_stubborn_set(const State &state) override; - virtual void handle_stubborn_operator(const State &state, - int op_no) override; + virtual void handle_stubborn_operator( + const State &state, int op_no) override; public: explicit StubbornSetsSimple(utils::Verbosity verbosity); virtual void initialize(const std::shared_ptr &task) override; diff --git a/src/search/pruning_method.cc b/src/search/pruning_method.cc index 1c2a3e060c..f2a23ebf46 100644 --- a/src/search/pruning_method.cc +++ b/src/search/pruning_method.cc @@ -2,7 +2,6 @@ #include "plugins/plugin.h" #include "task_utils/task_properties.h" - #include "utils/logging.h" #include @@ -48,9 +47,11 @@ void PruningMethod::print_statistics() const { << num_successors_before_pruning << endl << "total successors after pruning: " << num_successors_after_pruning << endl; - double pruning_ratio = (num_successors_before_pruning == 0) ? 1. : 1. - ( - static_cast(num_successors_after_pruning) / - static_cast(num_successors_before_pruning)); + double pruning_ratio = + (num_successors_before_pruning == 0) + ? 1. + : 1. - (static_cast(num_successors_after_pruning) / + static_cast(num_successors_before_pruning)); log << "Pruning ratio: " << pruning_ratio << endl; if (log.is_at_least_verbose()) { log << "Time for pruning operators: " << timer << endl; @@ -74,10 +75,10 @@ tuple get_pruning_arguments_from_options( return utils::get_log_arguments_from_options(opts); } -static class PruningMethodCategoryPlugin : public plugins::TypedCategoryPlugin { +static class PruningMethodCategoryPlugin + : public plugins::TypedCategoryPlugin { public: PruningMethodCategoryPlugin() : TypedCategoryPlugin("PruningMethod") { document_synopsis("Prune or reorder applicable operators."); } -} -_category_plugin; +} _category_plugin; diff --git a/src/search/pruning_method.h b/src/search/pruning_method.h index 11f7521d74..861ed3affb 100644 --- a/src/search/pruning_method.h +++ b/src/search/pruning_method.h @@ -25,8 +25,7 @@ class PruningMethod { utils::Timer timer; friend class limited_pruning::LimitedPruning; - virtual void prune( - const State &state, std::vector &op_ids) = 0; + virtual void prune(const State &state, std::vector &op_ids) = 0; protected: mutable utils::LogProxy log; std::shared_ptr task; diff --git a/src/search/search_algorithm.cc b/src/search/search_algorithm.cc index c018c45c65..43229aacff 100644 --- a/src/search/search_algorithm.cc +++ b/src/search/search_algorithm.cc @@ -20,7 +20,6 @@ using namespace std; using utils::ExitCode; - static successor_generator::SuccessorGenerator &get_successor_generator( const TaskProxy &task_proxy, utils::LogProxy &log) { log << "Building successor generator..." << flush; @@ -63,14 +62,17 @@ SearchAlgorithm::SearchAlgorithm( task_properties::print_variable_statistics(task_proxy); } -SearchAlgorithm::SearchAlgorithm(const plugins::Options &opts) // TODO options object is needed for iterated search, the prototype for issue559 resolves this +SearchAlgorithm::SearchAlgorithm( + const plugins::Options + &opts) // TODO options object is needed for iterated search, the + // prototype for issue559 resolves this : description(opts.get_unparsed_config()), status(IN_PROGRESS), solution_found(false), task(tasks::g_root_task), task_proxy(*task), log(utils::get_log_for_verbosity( - opts.get("verbosity"))), + opts.get("verbosity"))), state_registry(task_proxy), successor_generator(get_successor_generator(task_proxy, log)), search_space(state_registry, log), @@ -143,23 +145,20 @@ int SearchAlgorithm::get_adjusted_cost(const OperatorProxy &op) const { return get_adjusted_action_cost(op, cost_type, is_unit_cost); } - - -void print_initial_evaluator_values( - const EvaluationContext &eval_context) { +void print_initial_evaluator_values(const EvaluationContext &eval_context) { eval_context.get_cache().for_each_evaluator_result( - [] (const Evaluator *eval, const EvaluationResult &result) { + [](const Evaluator *eval, const EvaluationResult &result) { if (eval->is_used_for_reporting_minima()) { eval->report_value_for_initial_state(result); } - } - ); + }); } /* TODO: merge this into add_options_to_feature when all search algorithms support pruning. - Method doesn't belong here because it's only useful for certain derived classes. + Method doesn't belong here because it's only useful for certain derived + classes. TODO: Figure out where it belongs and move it there. */ void add_search_pruning_options_to_feature(plugins::Feature &feature) { feature.add_option>( @@ -170,8 +169,7 @@ void add_search_pruning_options_to_feature(plugins::Feature &feature) { "null()"); } -tuple> -get_search_pruning_arguments_from_options( +tuple> get_search_pruning_arguments_from_options( const plugins::Options &opts) { return make_tuple(opts.get>("pruning")); } @@ -182,7 +180,8 @@ void add_search_algorithm_options_to_feature( feature.add_option( "bound", "exclusive depth bound on g-values. Cutoffs are always performed according to " - "the real cost, regardless of the cost_type parameter", "infinity"); + "the real cost, regardless of the cost_type parameter", + "infinity"); feature.add_option( "max_time", "maximum time in seconds the search is allowed to run for. The " @@ -193,37 +192,30 @@ void add_search_algorithm_options_to_feature( "just like incomplete search algorithms that exhaust their search space.", "infinity"); feature.add_option( - "description", - "description used to identify search algorithm in logs", + "description", "description used to identify search algorithm in logs", "\"" + description + "\""); utils::add_log_options_to_feature(feature); } tuple -get_search_algorithm_arguments_from_options( - const plugins::Options &opts) { +get_search_algorithm_arguments_from_options(const plugins::Options &opts) { return tuple_cat( ::get_cost_type_arguments_from_options(opts), make_tuple( - opts.get("bound"), - opts.get("max_time"), - opts.get("description") - ), - utils::get_log_arguments_from_options(opts) - ); + opts.get("bound"), opts.get("max_time"), + opts.get("description")), + utils::get_log_arguments_from_options(opts)); } -/* Method doesn't belong here because it's only useful for certain derived classes. +/* Method doesn't belong here because it's only useful for certain derived + classes. TODO: Figure out where it belongs and move it there. */ -void add_successors_order_options_to_feature( - plugins::Feature &feature) { +void add_successors_order_options_to_feature(plugins::Feature &feature) { feature.add_option( "randomize_successors", - "randomize the order in which successors are generated", - "false"); + "randomize the order in which successors are generated", "false"); feature.add_option( - "preferred_successors_first", - "consider preferred operators first", + "preferred_successors_first", "consider preferred operators first", "false"); feature.document_note( "Successor ordering", @@ -238,27 +230,26 @@ tuple get_successors_order_arguments_from_options( return tuple_cat( make_tuple( opts.get("randomize_successors"), - opts.get("preferred_successors_first") - ), - utils::get_rng_arguments_from_options(opts) - ); + opts.get("preferred_successors_first")), + utils::get_rng_arguments_from_options(opts)); } -static class SearchAlgorithmCategoryPlugin : public plugins::TypedCategoryPlugin { +static class SearchAlgorithmCategoryPlugin + : public plugins::TypedCategoryPlugin { public: SearchAlgorithmCategoryPlugin() : TypedCategoryPlugin("SearchAlgorithm") { // TODO: Replace add synopsis for the wiki page. // document_synopsis("..."); } -} -_category_plugin; +} _category_plugin; void collect_preferred_operators( - EvaluationContext &eval_context, - Evaluator *preferred_operator_evaluator, + EvaluationContext &eval_context, Evaluator *preferred_operator_evaluator, ordered_set::OrderedSet &preferred_operators) { - if (!eval_context.is_evaluator_value_infinite(preferred_operator_evaluator)) { - for (OperatorID op_id : eval_context.get_preferred_operators(preferred_operator_evaluator)) { + if (!eval_context.is_evaluator_value_infinite( + preferred_operator_evaluator)) { + for (OperatorID op_id : eval_context.get_preferred_operators( + preferred_operator_evaluator)) { preferred_operators.insert(op_id); } } diff --git a/src/search/search_algorithm.h b/src/search/search_algorithm.h index 29103c7b0f..9ed2d571c3 100644 --- a/src/search/search_algorithm.h +++ b/src/search/search_algorithm.h @@ -28,7 +28,12 @@ namespace successor_generator { class SuccessorGenerator; } -enum SearchStatus {IN_PROGRESS, TIMEOUT, FAILED, SOLVED}; +enum SearchStatus { + IN_PROGRESS, + TIMEOUT, + FAILED, + SOLVED +}; class SearchAlgorithm { std::string description; @@ -36,7 +41,8 @@ class SearchAlgorithm { bool solution_found; Plan plan; protected: - // Hold a reference to the task implementation and pass it to objects that need it. + // Hold a reference to the task implementation and pass it to objects that + // need it. const std::shared_ptr task; // Use task_proxy to access task information. TaskProxy task_proxy; @@ -53,7 +59,8 @@ class SearchAlgorithm { bool is_unit_cost; double max_time; - virtual void initialize() {} + virtual void initialize() { + } virtual SearchStatus step() = 0; void set_plan(const Plan &plan); @@ -63,7 +70,10 @@ class SearchAlgorithm { SearchAlgorithm( OperatorCost cost_type, int bound, double max_time, const std::string &description, utils::Verbosity verbosity); - explicit SearchAlgorithm(const plugins::Options &opts); // TODO options object is needed for iterated search, the prototype for issue559 resolves this + explicit SearchAlgorithm( + const plugins::Options + &opts); // TODO options object is needed for iterated search, the + // prototype for issue559 resolves this virtual ~SearchAlgorithm(); virtual void print_statistics() const = 0; virtual void save_plan_if_necessary(); @@ -71,11 +81,21 @@ class SearchAlgorithm { SearchStatus get_status() const; const Plan &get_plan() const; void search(); - const SearchStatistics &get_statistics() const {return statistics;} - void set_bound(int b) {bound = b;} - int get_bound() {return bound;} - PlanManager &get_plan_manager() {return plan_manager;} - std::string get_description() {return description;} + const SearchStatistics &get_statistics() const { + return statistics; + } + void set_bound(int b) { + bound = b; + } + int get_bound() { + return bound; + } + PlanManager &get_plan_manager() { + return plan_manager; + } + std::string get_description() { + return description; + } }; /* @@ -90,20 +110,15 @@ extern void collect_preferred_operators( class PruningMethod; -extern void add_search_pruning_options_to_feature( - plugins::Feature &feature); +extern void add_search_pruning_options_to_feature(plugins::Feature &feature); extern std::tuple> get_search_pruning_arguments_from_options(const plugins::Options &opts); extern void add_search_algorithm_options_to_feature( plugins::Feature &feature, const std::string &description); -extern std::tuple< - OperatorCost, int, double, std::string, utils::Verbosity> -get_search_algorithm_arguments_from_options( - const plugins::Options &opts); -extern void add_successors_order_options_to_feature( - plugins::Feature &feature); -extern std::tuple -get_successors_order_arguments_from_options( +extern std::tuple +get_search_algorithm_arguments_from_options(const plugins::Options &opts); +extern void add_successors_order_options_to_feature(plugins::Feature &feature); +extern std::tuple get_successors_order_arguments_from_options( const plugins::Options &opts); #endif diff --git a/src/search/search_algorithms/eager_search.cc b/src/search/search_algorithms/eager_search.cc index 951906fbcc..79dbf88b4c 100644 --- a/src/search/search_algorithms/eager_search.cc +++ b/src/search/search_algorithms/eager_search.cc @@ -27,13 +27,12 @@ EagerSearch::EagerSearch( const shared_ptr &lazy_evaluator, OperatorCost cost_type, int bound, double max_time, const string &description, utils::Verbosity verbosity) - : SearchAlgorithm( - cost_type, bound, max_time, description, verbosity), + : SearchAlgorithm(cost_type, bound, max_time, description, verbosity), reopen_closed_nodes(reopen_closed), open_list(open->create_state_open_list()), - f_evaluator(f_eval), // default nullptr + f_evaluator(f_eval), // default nullptr preferred_operator_evaluators(preferred), - lazy_evaluator(lazy_evaluator), // default nullptr + lazy_evaluator(lazy_evaluator), // default nullptr pruning_method(pruning) { if (lazy_evaluator && !lazy_evaluator->does_cache_estimates()) { cerr << "lazy_evaluator must cache its estimates" << endl; @@ -44,8 +43,7 @@ EagerSearch::EagerSearch( void EagerSearch::initialize() { log << "Conducting best first search" << (reopen_closed_nodes ? " with" : " without") - << " reopening closed nodes, (real) bound = " << bound - << endl; + << " reopening closed nodes, (real) bound = " << bound << endl; assert(open_list); set evals; @@ -55,7 +53,8 @@ void EagerSearch::initialize() { Collect path-dependent evaluators that are used for preferred operators (in case they are not also used in the open list). */ - for (const shared_ptr &evaluator : preferred_operator_evaluators) { + for (const shared_ptr &evaluator : + preferred_operator_evaluators) { evaluator->get_path_dependent_evaluators(evals); } @@ -115,24 +114,30 @@ void EagerSearch::print_statistics() const { } SearchStatus EagerSearch::step() { - optional node; - while (true) { - if (open_list->empty()) { - log << "Completely explored state space -- no solution!" << endl; - return FAILED; - } + optional node = get_next_node_to_expand(); + if (!node.has_value()) { + assert(open_list->empty()); + log << "Completely explored state space -- no solution!" << endl; + return FAILED; + } + + return expand(node.value()); +} + +optional EagerSearch::get_next_node_to_expand() { + while (!open_list->empty()) { StateID id = open_list->remove_min(); State s = state_registry.lookup_state(id); - node.emplace(search_space.get_node(s)); + SearchNode node = search_space.get_node(s); - if (node->is_closed()) + if (node.is_closed()) continue; /* We can pass calculate_preferred=false here since preferred operators are computed when the state is expanded. */ - EvaluationContext eval_context(s, node->get_g(), false, &statistics); + EvaluationContext eval_context(s, node.get_g(), false, &statistics); if (lazy_evaluator) { /* @@ -150,14 +155,15 @@ SearchStatus EagerSearch::step() { we have accumulated more information in the meantime. Then upon second expansion we have a dead-end node which we must ignore. */ - if (node->is_dead_end()) + if (node.is_dead_end()) continue; if (lazy_evaluator->is_estimate_cached(s)) { int old_h = lazy_evaluator->get_cached_estimate(s); - int new_h = eval_context.get_evaluator_value_or_infinity(lazy_evaluator.get()); + int new_h = eval_context.get_evaluator_value_or_infinity( + lazy_evaluator.get()); if (open_list->is_dead_end(eval_context)) { - node->mark_as_dead_end(); + node.mark_as_dead_end(); statistics.inc_dead_ends(); continue; } @@ -168,54 +174,82 @@ SearchStatus EagerSearch::step() { } } - node->close(); - assert(!node->is_dead_end()); + node.close(); + assert(!node.is_dead_end()); update_f_value_statistics(eval_context); - statistics.inc_expanded(); - break; + return node; } + return nullopt; +} + +void EagerSearch::collect_preferred_operators_for_node( + const SearchNode &node, + ordered_set::OrderedSet &preferred_operators) { + EvaluationContext eval_context( + node.get_state(), node.get_g(), false, &statistics, true); + for (const shared_ptr &preferred_operator_evaluator : + preferred_operator_evaluators) { + collect_preferred_operators( + eval_context, preferred_operator_evaluator.get(), + preferred_operators); + } +} - const State &s = node->get_state(); - if (check_goal_and_set_plan(s)) +SearchStatus EagerSearch::expand(const SearchNode &node) { + statistics.inc_expanded(); + + const State &state = node.get_state(); + if (check_goal_and_set_plan(state)) return SOLVED; - vector applicable_ops; - successor_generator.generate_applicable_ops(s, applicable_ops); + generate_successors(node); + return IN_PROGRESS; +} + +void EagerSearch::generate_successors(const SearchNode &node) { + const State &state = node.get_state(); + + vector applicable_operators; + successor_generator.generate_applicable_ops(state, applicable_operators); /* TODO: When preferred operators are in use, a preferred operator will be considered by the preferred operator queues even when it is pruned. */ - pruning_method->prune_operators(s, applicable_ops); + pruning_method->prune_operators(state, applicable_operators); // This evaluates the expanded state (again) to get preferred ops - EvaluationContext eval_context(s, node->get_g(), false, &statistics, true); ordered_set::OrderedSet preferred_operators; - for (const shared_ptr &preferred_operator_evaluator : preferred_operator_evaluators) { - collect_preferred_operators(eval_context, - preferred_operator_evaluator.get(), - preferred_operators); +<<<<<<< HEAD + collect_preferred_operators_for_node(node, preferred_operators); +======= + for (const shared_ptr &preferred_operator_evaluator : + preferred_operator_evaluators) { + collect_preferred_operators( + eval_context, preferred_operator_evaluator.get(), + preferred_operators); } +>>>>>>> 8b13f0c19 ([issue1189] Apply format.) - for (OperatorID op_id : applicable_ops) { + for (OperatorID op_id : applicable_operators) { OperatorProxy op = task_proxy.get_operators()[op_id]; - if ((node->get_real_g() + op.get_cost()) >= bound) + if ((node.get_real_g() + op.get_cost()) >= bound) continue; - State succ_state = state_registry.get_successor_state(s, op); + State succ_state = state_registry.get_successor_state(state, op); statistics.inc_generated(); - bool is_preferred = preferred_operators.contains(op_id); SearchNode succ_node = search_space.get_node(succ_state); for (Evaluator *evaluator : path_dependent_evaluators) { - evaluator->notify_state_transition(s, op_id, succ_state); + evaluator->notify_state_transition(state, op_id, succ_state); } // Previously encountered dead end. Don't re-evaluate. if (succ_node.is_dead_end()) continue; + bool is_preferred = preferred_operators.contains(op_id); if (succ_node.is_new()) { /* We have not seen this state before. @@ -225,7 +259,7 @@ SearchStatus EagerSearch::step() { hence the stupid computation of succ_g. TODO: Make this less fragile. */ - int succ_g = node->get_g() + get_adjusted_cost(op); + int succ_g = node.get_g() + get_adjusted_cost(op); EvaluationContext succ_eval_context( succ_state, succ_g, is_preferred, &statistics); @@ -236,18 +270,18 @@ SearchStatus EagerSearch::step() { statistics.inc_dead_ends(); continue; } - succ_node.open_new_node(*node, op, get_adjusted_cost(op)); + succ_node.open_new_node(node, op, get_adjusted_cost(op)); open_list->insert(succ_eval_context, succ_state.get_id()); if (search_progress.check_progress(succ_eval_context)) { statistics.print_checkpoint_line(succ_node.get_g()); reward_progress(); } - } else if (succ_node.get_g() > node->get_g() + get_adjusted_cost(op)) { + } else if (succ_node.get_g() > node.get_g() + get_adjusted_cost(op)) { // We found a new cheapest path to an open or closed state. if (succ_node.is_open()) { succ_node.update_open_node_parent( - *node, op, get_adjusted_cost(op)); + node, op, get_adjusted_cost(op)); EvaluationContext succ_eval_context( succ_state, succ_node.get_g(), is_preferred, &statistics); open_list->insert(succ_eval_context, succ_state.get_id()); @@ -260,7 +294,7 @@ SearchStatus EagerSearch::step() { consistent heuristic). */ statistics.inc_reopened(); - succ_node.reopen_closed_node(*node, op, get_adjusted_cost(op)); + succ_node.reopen_closed_node(node, op, get_adjusted_cost(op)); EvaluationContext succ_eval_context( succ_state, succ_node.get_g(), is_preferred, &statistics); open_list->insert(succ_eval_context, succ_state.get_id()); @@ -272,16 +306,15 @@ SearchStatus EagerSearch::step() { */ assert(succ_node.is_closed() && !reopen_closed_nodes); succ_node.update_closed_node_parent( - *node, op, get_adjusted_cost(op)); + node, op, get_adjusted_cost(op)); } } else { /* We found an equally or more expensive path to an open or closed - state. + state. There is nothing we need to do. */ } } - return IN_PROGRESS; } void EagerSearch::reward_progress() { @@ -302,7 +335,8 @@ void EagerSearch::start_f_value_statistics(EvaluationContext &eval_context) { } /* TODO: HACK! This is very inefficient for simply looking up an h value. - Also, if h values are not saved it would recompute h for each and every state. */ + Also, if h values are not saved it would recompute h for each and every + state. */ void EagerSearch::update_f_value_statistics(EvaluationContext &eval_context) { if (f_evaluator) { int f_value = eval_context.get_evaluator_value(f_evaluator.get()); @@ -318,14 +352,13 @@ void add_eager_search_options_to_feature( add_search_algorithm_options_to_feature(feature, description); } -tuple, shared_ptr, OperatorCost, - int, double, string, utils::Verbosity> +tuple< + shared_ptr, shared_ptr, OperatorCost, int, double, + string, utils::Verbosity> get_eager_search_arguments_from_options(const plugins::Options &opts) { return tuple_cat( get_search_pruning_arguments_from_options(opts), - make_tuple(opts.get>( - "lazy_evaluator", nullptr)), - get_search_algorithm_arguments_from_options(opts) - ); + make_tuple(opts.get>("lazy_evaluator", nullptr)), + get_search_algorithm_arguments_from_options(opts)); } } diff --git a/src/search/search_algorithms/eager_search.h b/src/search/search_algorithms/eager_search.h index bb328e1fc7..9d3da156ad 100644 --- a/src/search/search_algorithms/eager_search.h +++ b/src/search/search_algorithms/eager_search.h @@ -5,6 +5,7 @@ #include "../search_algorithm.h" #include +#include #include class Evaluator; @@ -32,14 +33,21 @@ class EagerSearch : public SearchAlgorithm { void update_f_value_statistics(EvaluationContext &eval_context); void reward_progress(); + std::optional get_next_node_to_expand(); + void collect_preferred_operators_for_node( + const SearchNode &node, + ordered_set::OrderedSet &preferred_operators); + SearchStatus expand(const SearchNode &node); + void generate_successors(const SearchNode &node); + protected: virtual void initialize() override; virtual SearchStatus step() override; public: explicit EagerSearch( - const std::shared_ptr &open, - bool reopen_closed, const std::shared_ptr &f_eval, + const std::shared_ptr &open, bool reopen_closed, + const std::shared_ptr &f_eval, const std::vector> &preferred, const std::shared_ptr &pruning, const std::shared_ptr &lazy_evaluator, @@ -53,9 +61,9 @@ class EagerSearch : public SearchAlgorithm { extern void add_eager_search_options_to_feature( plugins::Feature &feature, const std::string &description); -extern std::tuple, - std::shared_ptr, OperatorCost, int, double, - std::string, utils::Verbosity> +extern std::tuple< + std::shared_ptr, std::shared_ptr, OperatorCost, + int, double, std::string, utils::Verbosity> get_eager_search_arguments_from_options(const plugins::Options &opts); } diff --git a/src/search/search_algorithms/enforced_hill_climbing_search.cc b/src/search/search_algorithms/enforced_hill_climbing_search.cc index 2cbd10c372..ce50688c16 100644 --- a/src/search/search_algorithms/enforced_hill_climbing_search.cc +++ b/src/search/search_algorithms/enforced_hill_climbing_search.cc @@ -25,8 +25,8 @@ static shared_ptr create_ehc_open_list_factory( ignore costs since EHC is supposed to implement a breadth-first search, not a uniform-cost search. So this seems to be a bug. */ - shared_ptr g_evaluator = make_shared( - "ehc.g_eval", verbosity); + shared_ptr g_evaluator = + make_shared("ehc.g_eval", verbosity); if (!use_preferred || preferred_usage == PreferredUsage::PRUNE_BY_PREFERRED) { @@ -42,21 +42,18 @@ static shared_ptr create_ehc_open_list_factory( open list code. */ vector> evals = { - g_evaluator, make_shared( - "ehc.pref_eval", verbosity)}; + g_evaluator, make_shared("ehc.pref_eval", verbosity)}; return make_shared( evals, false, true); } } - EnforcedHillClimbingSearch::EnforcedHillClimbingSearch( const shared_ptr &h, PreferredUsage preferred_usage, - const vector> &preferred, - OperatorCost cost_type, int bound, double max_time, - const string &description, utils::Verbosity verbosity) - : SearchAlgorithm( - cost_type, bound, max_time, description, verbosity), + const vector> &preferred, OperatorCost cost_type, + int bound, double max_time, const string &description, + utils::Verbosity verbosity) + : SearchAlgorithm(cost_type, bound, max_time, description, verbosity), evaluator(h), preferred_operator_evaluators(preferred), preferred_usage(preferred_usage), @@ -73,15 +70,16 @@ EnforcedHillClimbingSearch::EnforcedHillClimbingSearch( for (Evaluator *evaluator : path_dependent_evaluators) { evaluator->notify_initial_state(initial_state); } - use_preferred = find(preferred_operator_evaluators.begin(), - preferred_operator_evaluators.end(), evaluator) != - preferred_operator_evaluators.end(); - - open_list = create_ehc_open_list_factory( - verbosity, use_preferred, preferred_usage)->create_edge_open_list(); + use_preferred = find( + preferred_operator_evaluators.begin(), + preferred_operator_evaluators.end(), + evaluator) != preferred_operator_evaluators.end(); + + open_list = + create_ehc_open_list_factory(verbosity, use_preferred, preferred_usage) + ->create_edge_open_list(); } - void EnforcedHillClimbingSearch::reach_state( const State &parent, OperatorID op_id, const State &state) { for (Evaluator *evaluator : path_dependent_evaluators) { @@ -91,15 +89,18 @@ void EnforcedHillClimbingSearch::reach_state( void EnforcedHillClimbingSearch::initialize() { assert(evaluator); - log << "Conducting enforced hill-climbing search, (real) bound = " - << bound << endl; + log << "Conducting enforced hill-climbing search, (real) bound = " << bound + << endl; if (use_preferred) { log << "Using preferred operators for " - << (preferred_usage == PreferredUsage::RANK_PREFERRED_FIRST ? - "ranking successors" : "pruning") << endl; + << (preferred_usage == PreferredUsage::RANK_PREFERRED_FIRST + ? "ranking successors" + : "pruning") + << endl; } - bool dead_end = current_eval_context.is_evaluator_value_infinite(evaluator.get()); + bool dead_end = + current_eval_context.is_evaluator_value_infinite(evaluator.get()); statistics.inc_evaluated_states(); print_initial_evaluator_values(current_eval_context); @@ -118,9 +119,7 @@ void EnforcedHillClimbingSearch::initialize() { } void EnforcedHillClimbingSearch::insert_successor_into_open_list( - const EvaluationContext &eval_context, - int parent_g, - OperatorID op_id, + const EvaluationContext &eval_context, int parent_g, OperatorID op_id, bool preferred) { OperatorProxy op = task_proxy.get_operators()[op_id]; int succ_g = parent_g + get_adjusted_cost(op); @@ -138,17 +137,18 @@ void EnforcedHillClimbingSearch::expand(EvaluationContext &eval_context) { ordered_set::OrderedSet preferred_operators; if (use_preferred) { - for (const shared_ptr &preferred_operator_evaluator : preferred_operator_evaluators) { - collect_preferred_operators(eval_context, - preferred_operator_evaluator.get(), - preferred_operators); + for (const shared_ptr &preferred_operator_evaluator : + preferred_operator_evaluators) { + collect_preferred_operators( + eval_context, preferred_operator_evaluator.get(), + preferred_operators); } } - if (use_preferred && preferred_usage == PreferredUsage::PRUNE_BY_PREFERRED) { + if (use_preferred && + preferred_usage == PreferredUsage::PRUNE_BY_PREFERRED) { for (OperatorID op_id : preferred_operators) { - insert_successor_into_open_list( - eval_context, node_g, op_id, true); + insert_successor_into_open_list(eval_context, node_g, op_id, true); } } else { /* The successor ranking implied by RANK_BY_PREFERRED is done @@ -157,8 +157,8 @@ void EnforcedHillClimbingSearch::expand(EvaluationContext &eval_context) { successor_generator.generate_applicable_ops( eval_context.get_state(), successor_operators); for (OperatorID op_id : successor_operators) { - bool preferred = use_preferred && - preferred_operators.contains(op_id); + bool preferred = + use_preferred && preferred_operators.contains(op_id); insert_successor_into_open_list( eval_context, node_g, op_id, preferred); } @@ -192,7 +192,7 @@ SearchStatus EnforcedHillClimbingSearch::ehc() { // d: distance from initial node in this EHC phase int d = parent_node.get_g() - current_phase_start_g + - get_adjusted_cost(last_op); + get_adjusted_cost(last_op); if (parent_node.get_real_g() + last_op.get_cost() >= bound) continue; @@ -214,8 +214,8 @@ SearchStatus EnforcedHillClimbingSearch::ehc() { } int h = eval_context.get_evaluator_value(evaluator.get()); - node.open_new_node(parent_node, last_op, - get_adjusted_cost(last_op)); + node.open_new_node( + parent_node, last_op, get_adjusted_cost(last_op)); if (h < current_eval_context.get_evaluator_value(evaluator.get())) { ++num_ehc_phases; @@ -260,7 +260,8 @@ void EnforcedHillClimbingSearch::print_statistics() const { } class EnforcedHillClimbingSearchFeature - : public plugins::TypedFeature { + : public plugins::TypedFeature< + SearchAlgorithm, EnforcedHillClimbingSearch> { public: EnforcedHillClimbingSearchFeature() : TypedFeature("ehc") { document_title("Lazy enforced hill-climbing"); @@ -268,34 +269,29 @@ class EnforcedHillClimbingSearchFeature add_option>("h", "heuristic"); add_option( - "preferred_usage", - "preferred operator usage", + "preferred_usage", "preferred operator usage", "prune_by_preferred"); add_list_option>( - "preferred", - "use preferred operators of these evaluators", - "[]"); + "preferred", "use preferred operators of these evaluators", "[]"); add_search_algorithm_options_to_feature(*this, "ehc"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get>("h"), opts.get("preferred_usage"), opts.get_list>("preferred"), - get_search_algorithm_arguments_from_options(opts) - ); + get_search_algorithm_arguments_from_options(opts)); } }; static plugins::FeaturePlugin _plugin; -static plugins::TypedEnumPlugin _enum_plugin({ - {"prune_by_preferred", - "prune successors achieved by non-preferred operators"}, - {"rank_preferred_first", - "first insert successors achieved by preferred operators, " - "then those by non-preferred operators"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"prune_by_preferred", + "prune successors achieved by non-preferred operators"}, + {"rank_preferred_first", + "first insert successors achieved by preferred operators, " + "then those by non-preferred operators"}}); } diff --git a/src/search/search_algorithms/enforced_hill_climbing_search.h b/src/search/search_algorithms/enforced_hill_climbing_search.h index f7b3247dfd..afc49e5ce2 100644 --- a/src/search/search_algorithms/enforced_hill_climbing_search.h +++ b/src/search/search_algorithms/enforced_hill_climbing_search.h @@ -46,13 +46,10 @@ class EnforcedHillClimbingSearch : public SearchAlgorithm { int last_num_expanded; void insert_successor_into_open_list( - const EvaluationContext &eval_context, - int parent_g, - OperatorID op_id, + const EvaluationContext &eval_context, int parent_g, OperatorID op_id, bool preferred); void expand(EvaluationContext &eval_context); - void reach_state( - const State &parent, OperatorID op_id, const State &state); + void reach_state(const State &parent, OperatorID op_id, const State &state); SearchStatus ehc(); protected: @@ -61,8 +58,7 @@ class EnforcedHillClimbingSearch : public SearchAlgorithm { public: EnforcedHillClimbingSearch( - const std::shared_ptr &h, - PreferredUsage preferred_usage, + const std::shared_ptr &h, PreferredUsage preferred_usage, const std::vector> &preferred, OperatorCost cost_type, int bound, double max_time, const std::string &description, utils::Verbosity verbosity); diff --git a/src/search/search_algorithms/iterated_search.cc b/src/search/search_algorithms/iterated_search.cc index b4855d0ed5..9c112ca104 100644 --- a/src/search/search_algorithms/iterated_search.cc +++ b/src/search/search_algorithms/iterated_search.cc @@ -25,10 +25,12 @@ IteratedSearch::IteratedSearch(const plugins::Options &opts) shared_ptr IteratedSearch::get_search_algorithm( int algorithm_configs_index) { - parser::LazyValue &algorithm_config = algorithm_configs[algorithm_configs_index]; + parser::LazyValue &algorithm_config = + algorithm_configs[algorithm_configs_index]; shared_ptr search_algorithm; - try{ - search_algorithm = algorithm_config.construct>(); + try { + search_algorithm = + algorithm_config.construct>(); } catch (const utils::ContextError &e) { cerr << "Delayed construction of LazyValue failed" << endl; cerr << e.get_message() << endl; @@ -137,9 +139,7 @@ class IteratedSearchFeature document_synopsis(""); add_list_option>( - "algorithm_configs", - "list of search algorithms for each phase", - "", + "algorithm_configs", "list of search algorithms for each phase", "", true); add_option( "pass_bound", @@ -148,17 +148,12 @@ class IteratedSearchFeature "The iterated search bound is tightened whenever a component finds " "a cheaper plan.", "true"); + add_option("repeat_last", "repeat last phase of search", "false"); add_option( - "repeat_last", - "repeat last phase of search", + "continue_on_fail", "continue search after no solution found", "false"); add_option( - "continue_on_fail", - "continue search after no solution found", - "false"); - add_option( - "continue_on_solve", - "continue search after solution found", + "continue_on_solve", "continue search after solution found", "true"); add_search_algorithm_options_to_feature(*this, "iterated"); @@ -184,14 +179,15 @@ class IteratedSearchFeature "```"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { plugins::Options options_copy(opts); /* - The options entry 'algorithm_configs' is a LazyValue representing a list - of search algorithms. But iterated search expects a list of LazyValues, - each representing a search algorithm. We unpack this first layer of - laziness here to report potential errors in a more useful context. + The options entry 'algorithm_configs' is a LazyValue representing a + list of search algorithms. But iterated search expects a list of + LazyValues, each representing a search algorithm. We unpack this first + layer of laziness here to report potential errors in a more useful + context. TODO: the medium-term plan is to get rid of LazyValue completely and let the features create builders that in turn create the actual @@ -199,7 +195,8 @@ class IteratedSearchFeature the builder is a light-weight operation. */ vector algorithm_configs = - opts.get("algorithm_configs").construct_lazy_list(); + opts.get("algorithm_configs") + .construct_lazy_list(); options_copy.set("algorithm_configs", algorithm_configs); return make_shared(options_copy); } diff --git a/src/search/search_algorithms/iterated_search.h b/src/search/search_algorithms/iterated_search.h index 026ba53804..8a49f0ba38 100644 --- a/src/search/search_algorithms/iterated_search.h +++ b/src/search/search_algorithms/iterated_search.h @@ -22,14 +22,17 @@ class IteratedSearch : public SearchAlgorithm { int best_bound; bool iterated_found_solution; - std::shared_ptr get_search_algorithm(int algorithm_configs_index); + std::shared_ptr get_search_algorithm( + int algorithm_configs_index); std::shared_ptr create_current_phase(); SearchStatus step_return_value(); virtual SearchStatus step() override; public: - IteratedSearch(const plugins::Options &opts); // TODO this still needs the options objects, the prototype for issue559 resolves this + IteratedSearch(const plugins::Options + &opts); // TODO this still needs the options objects, the + // prototype for issue559 resolves this virtual void save_plan_if_necessary() override; virtual void print_statistics() const override; diff --git a/src/search/search_algorithms/lazy_search.cc b/src/search/search_algorithms/lazy_search.cc index 466ad03ae2..dd5afac207 100644 --- a/src/search/search_algorithms/lazy_search.cc +++ b/src/search/search_algorithms/lazy_search.cc @@ -19,12 +19,11 @@ using namespace std; namespace lazy_search { LazySearch::LazySearch( const shared_ptr &open, bool reopen_closed, - const vector> &preferred, - bool randomize_successors, bool preferred_successors_first, - int random_seed, OperatorCost cost_type, int bound, double max_time, - const string &description, utils::Verbosity verbosity) - : SearchAlgorithm( - cost_type, bound, max_time, description, verbosity), + const vector> &preferred, bool randomize_successors, + bool preferred_successors_first, int random_seed, OperatorCost cost_type, + int bound, double max_time, const string &description, + utils::Verbosity verbosity) + : SearchAlgorithm(cost_type, bound, max_time, description, verbosity), open_list(open->create_edge_open_list()), reopen_closed_nodes(reopen_closed), randomize_successors(randomize_successors), @@ -44,7 +43,8 @@ LazySearch::LazySearch( } void LazySearch::initialize() { - log << "Conducting lazy best first search, (real) bound = " << bound << endl; + log << "Conducting lazy best first search, (real) bound = " << bound + << endl; assert(open_list); set evals; @@ -52,7 +52,8 @@ void LazySearch::initialize() { // Add evaluators that are used for preferred operators (in case they are // not also used in the open list). - for (const shared_ptr &evaluator : preferred_operator_evaluators) { + for (const shared_ptr &evaluator : + preferred_operator_evaluators) { evaluator->get_path_dependent_evaluators(evals); } @@ -89,10 +90,11 @@ vector LazySearch::get_successor_operators( void LazySearch::generate_successors() { ordered_set::OrderedSet preferred_operators; - for (const shared_ptr &preferred_operator_evaluator : preferred_operator_evaluators) { - collect_preferred_operators(current_eval_context, - preferred_operator_evaluator.get(), - preferred_operators); + for (const shared_ptr &preferred_operator_evaluator : + preferred_operator_evaluators) { + collect_preferred_operators( + current_eval_context, preferred_operator_evaluator.get(), + preferred_operators); } if (randomize_successors) { preferred_operators.shuffle(*rng); @@ -111,7 +113,8 @@ void LazySearch::generate_successors() { if (new_real_g < bound) { EvaluationContext new_eval_context( current_eval_context, new_g, is_preferred, nullptr); - open_list->insert(new_eval_context, make_pair(current_state.get_id(), op_id)); + open_list->insert( + new_eval_context, make_pair(current_state.get_id(), op_id)); } } } @@ -126,10 +129,14 @@ SearchStatus LazySearch::fetch_next_state() { current_predecessor_id = next.first; current_operator_id = next.second; - State current_predecessor = state_registry.lookup_state(current_predecessor_id); - OperatorProxy current_operator = task_proxy.get_operators()[current_operator_id]; - assert(task_properties::is_applicable(current_operator, current_predecessor)); - current_state = state_registry.get_successor_state(current_predecessor, current_operator); + State current_predecessor = + state_registry.lookup_state(current_predecessor_id); + OperatorProxy current_operator = + task_proxy.get_operators()[current_operator_id]; + assert( + task_properties::is_applicable(current_operator, current_predecessor)); + current_state = state_registry.get_successor_state( + current_predecessor, current_operator); SearchNode pred_node = search_space.get_node(current_predecessor); current_g = pred_node.get_g() + get_adjusted_cost(current_operator); @@ -143,29 +150,34 @@ SearchStatus LazySearch::fetch_next_state() { associate with the expanded vs. evaluated nodes in lazy search and where to obtain it from. */ - current_eval_context = EvaluationContext(current_state, current_g, true, &statistics); + current_eval_context = + EvaluationContext(current_state, current_g, true, &statistics); return IN_PROGRESS; } SearchStatus LazySearch::step() { // Invariants: - // - current_state is the next state for which we want to compute the heuristic. - // - current_predecessor_id is the state ID of the predecessor of that state. - // - current_operator_id is the ID of the operator which leads to current_state from predecessor. - // - current_g is the g value of the current state according to the cost_type + // - current_state is the next state for which we want to compute the + // heuristic. + // - current_predecessor_id is the state ID of the predecessor of that + // state. + // - current_operator_id is the ID of the operator which leads to + // current_state from predecessor. + // - current_g is the g value of the current state according to the + // cost_type // - current_real_g is the g value of the current state (using real costs) - SearchNode node = search_space.get_node(current_state); bool reopen = reopen_closed_nodes && !node.is_new() && - !node.is_dead_end() && (current_g < node.get_g()); + !node.is_dead_end() && (current_g < node.get_g()); if (node.is_new() || reopen) { if (current_operator_id != OperatorID::no_operator) { assert(current_predecessor_id != StateID::no_state); if (!path_dependent_evaluators.empty()) { - State parent_state = state_registry.lookup_state(current_predecessor_id); + State parent_state = + state_registry.lookup_state(current_predecessor_id); for (Evaluator *evaluator : path_dependent_evaluators) evaluator->notify_state_transition( parent_state, current_operator_id, current_state); @@ -178,17 +190,20 @@ SearchStatus LazySearch::step() { if (search_progress.check_progress(current_eval_context)) statistics.print_checkpoint_line(current_g); } else { - State parent_state = state_registry.lookup_state(current_predecessor_id); + State parent_state = + state_registry.lookup_state(current_predecessor_id); SearchNode parent_node = search_space.get_node(parent_state); - OperatorProxy current_operator = task_proxy.get_operators()[current_operator_id]; + OperatorProxy current_operator = + task_proxy.get_operators()[current_operator_id]; if (reopen) { - node.reopen_closed_node(parent_node, current_operator, - get_adjusted_cost( - current_operator)); + node.reopen_closed_node( + parent_node, current_operator, + get_adjusted_cost(current_operator)); statistics.inc_reopened(); } else { - node.open_new_node(parent_node, current_operator, - get_adjusted_cost(current_operator)); + node.open_new_node( + parent_node, current_operator, + get_adjusted_cost(current_operator)); } } node.close(); diff --git a/src/search/search_algorithms/lazy_search.h b/src/search/search_algorithms/lazy_search.h index 3c5ac25ebe..ae0dbcba04 100644 --- a/src/search/search_algorithms/lazy_search.h +++ b/src/search/search_algorithms/lazy_search.h @@ -22,7 +22,8 @@ class LazySearch : public SearchAlgorithm { std::unique_ptr open_list; // Search behavior parameters - bool reopen_closed_nodes; // whether to reopen closed nodes upon finding lower g paths + bool reopen_closed_nodes; // whether to reopen closed nodes upon finding + // lower g paths bool randomize_successors; bool preferred_successors_first; std::shared_ptr rng; @@ -50,13 +51,11 @@ class LazySearch : public SearchAlgorithm { public: LazySearch( - const std::shared_ptr &open, - bool reopen_closed, + const std::shared_ptr &open, bool reopen_closed, const std::vector> &evaluators, bool randomize_successors, bool preferred_successors_first, - int random_seed, OperatorCost cost_type, int bound, - double max_time, const std::string &description, - utils::Verbosity verbosity); + int random_seed, OperatorCost cost_type, int bound, double max_time, + const std::string &description, utils::Verbosity verbosity); virtual void print_statistics() const override; }; diff --git a/src/search/search_algorithms/plugin_astar.cc b/src/search/search_algorithms/plugin_astar.cc index 7f8f681728..2ee15c89e8 100644 --- a/src/search/search_algorithms/plugin_astar.cc +++ b/src/search/search_algorithms/plugin_astar.cc @@ -21,8 +21,7 @@ class AStarSearchFeature "lazy_evaluator", "An evaluator that re-evaluates a state before it is expanded.", plugins::ArgumentInfo::NO_DEFAULT); - eager_search::add_eager_search_options_to_feature( - *this, "astar"); + eager_search::add_eager_search_options_to_feature(*this, "astar"); document_note( "lazy_evaluator", @@ -37,16 +36,16 @@ class AStarSearchFeature "```\n--evaluator h=evaluator\n" "--search eager(tiebreaking([sum([g(), h]), h], unsafe_pruning=false),\n" " reopen_closed=true, f_eval=sum([g(), h]))\n" - "```\n", true); + "```\n", + true); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { plugins::Options options_copy(opts); - auto temp = - search_common::create_astar_open_list_factory_and_f_eval( - opts.get>("eval"), - opts.get("verbosity")); + auto temp = search_common::create_astar_open_list_factory_and_f_eval( + opts.get>("eval"), + opts.get("verbosity")); options_copy.set("open", temp.first); options_copy.set("f_eval", temp.second); options_copy.set("reopen_closed", true); @@ -58,8 +57,7 @@ class AStarSearchFeature options_copy.get>("f_eval", nullptr), options_copy.get_list>("preferred"), eager_search::get_eager_search_arguments_from_options( - options_copy) - ); + options_copy)); } }; diff --git a/src/search/search_algorithms/plugin_eager.cc b/src/search/search_algorithms/plugin_eager.cc index 4103ff1340..5c3b556b4e 100644 --- a/src/search/search_algorithms/plugin_eager.cc +++ b/src/search/search_algorithms/plugin_eager.cc @@ -14,32 +14,25 @@ class EagerSearchFeature document_synopsis(""); add_option>("open", "open list"); - add_option( - "reopen_closed", - "reopen closed nodes", - "false"); + add_option("reopen_closed", "reopen closed nodes", "false"); add_option>( "f_eval", "set evaluator for jump statistics. " "(Optional; if no evaluator is used, jump statistics will not be displayed.)", plugins::ArgumentInfo::NO_DEFAULT); add_list_option>( - "preferred", - "use preferred operators of these evaluators", - "[]"); - eager_search::add_eager_search_options_to_feature( - *this, "eager"); + "preferred", "use preferred operators of these evaluators", "[]"); + eager_search::add_eager_search_options_to_feature(*this, "eager"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get>("open"), opts.get("reopen_closed"), opts.get>("f_eval", nullptr), opts.get_list>("preferred"), - eager_search::get_eager_search_arguments_from_options(opts) - ); + eager_search::get_eager_search_arguments_from_options(opts)); } }; diff --git a/src/search/search_algorithms/plugin_eager_greedy.cc b/src/search/search_algorithms/plugin_eager_greedy.cc index 1f75040528..fa28dd57c0 100644 --- a/src/search/search_algorithms/plugin_eager_greedy.cc +++ b/src/search/search_algorithms/plugin_eager_greedy.cc @@ -15,11 +15,9 @@ class EagerGreedySearchFeature add_list_option>("evals", "evaluators"); add_list_option>( - "preferred", - "use preferred operators of these evaluators", "[]"); + "preferred", "use preferred operators of these evaluators", "[]"); add_option( - "boost", - "boost value for preferred operator open lists", "0"); + "boost", "boost value for preferred operator open lists", "0"); eager_search::add_eager_search_options_to_feature( *this, "eager_greedy"); @@ -33,9 +31,7 @@ class EagerGreedySearchFeature "If only one evaluator and no preferred operator evaluator is used, " "the search does not use an alternation open list but a " "standard open list with only one queue."); - document_note( - "Closed nodes", - "Closed node are not re-opened"); + document_note("Closed nodes", "Closed node are not re-opened"); document_note( "Equivalent statements using general eager search", "\n```\n--evaluator h2=eval2\n" @@ -59,22 +55,19 @@ class EagerGreedySearchFeature "------------------------------------------------------------\n" "```\n--search eager_greedy([eval1])\n```\n" "is equivalent to\n" - "```\n--search eager(single(eval1))\n```\n", true); + "```\n--search eager(single(eval1))\n```\n", + true); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( search_common::create_greedy_open_list_factory( opts.get_list>("evals"), opts.get_list>("preferred"), - opts.get("boost") - ), - false, - nullptr, - opts.get_list>("preferred"), - eager_search::get_eager_search_arguments_from_options(opts) - ); + opts.get("boost")), + false, nullptr, opts.get_list>("preferred"), + eager_search::get_eager_search_arguments_from_options(opts)); } }; diff --git a/src/search/search_algorithms/plugin_eager_wastar.cc b/src/search/search_algorithms/plugin_eager_wastar.cc index ae0583b3de..04248f4242 100644 --- a/src/search/search_algorithms/plugin_eager_wastar.cc +++ b/src/search/search_algorithms/plugin_eager_wastar.cc @@ -13,25 +13,13 @@ class EagerWAstarSearchFeature document_title("Eager weighted A* search"); document_synopsis(""); + add_list_option>("evals", "evaluators"); add_list_option>( - "evals", - "evaluators"); - add_list_option>( - "preferred", - "use preferred operators of these evaluators", - "[]"); - add_option( - "reopen_closed", - "reopen closed nodes", - "true"); - add_option( - "boost", - "boost value for preferred operator open lists", - "0"); + "preferred", "use preferred operators of these evaluators", "[]"); + add_option("reopen_closed", "reopen closed nodes", "true"); add_option( - "w", - "evaluator weight", - "1"); + "boost", "boost value for preferred operator open lists", "0"); + add_option("w", "evaluator weight", "1"); eager_search::add_eager_search_options_to_feature( *this, "eager_wastar"); @@ -46,21 +34,18 @@ class EagerWAstarSearchFeature "is **not** equivalent to\n```\n--search astar(h())\n```\n"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( search_common::create_wastar_open_list_factory( opts.get_list>("evals"), opts.get_list>("preferred"), - opts.get("boost"), - opts.get("w"), - opts.get("verbosity") - ), + opts.get("boost"), opts.get("w"), + opts.get("verbosity")), opts.get("reopen_closed"), opts.get>("f_eval", nullptr), opts.get_list>("preferred"), - eager_search::get_eager_search_arguments_from_options(opts) - ); + eager_search::get_eager_search_arguments_from_options(opts)); } }; diff --git a/src/search/search_algorithms/plugin_lazy.cc b/src/search/search_algorithms/plugin_lazy.cc index 6d19e8837b..212ed12f70 100644 --- a/src/search/search_algorithms/plugin_lazy.cc +++ b/src/search/search_algorithms/plugin_lazy.cc @@ -16,21 +16,19 @@ class LazySearchFeature add_option>("open", "open list"); add_option("reopen_closed", "reopen closed nodes", "false"); add_list_option>( - "preferred", - "use preferred operators of these evaluators", "[]"); + "preferred", "use preferred operators of these evaluators", "[]"); add_successors_order_options_to_feature(*this); add_search_algorithm_options_to_feature(*this, "lazy"); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( opts.get>("open"), opts.get("reopen_closed"), opts.get_list>("preferred"), get_successors_order_arguments_from_options(opts), - get_search_algorithm_arguments_from_options(opts) - ); + get_search_algorithm_arguments_from_options(opts)); } }; diff --git a/src/search/search_algorithms/plugin_lazy_greedy.cc b/src/search/search_algorithms/plugin_lazy_greedy.cc index 55fb96fb60..e2c91b5116 100644 --- a/src/search/search_algorithms/plugin_lazy_greedy.cc +++ b/src/search/search_algorithms/plugin_lazy_greedy.cc @@ -15,23 +15,16 @@ class LazyGreedySearchFeature document_title("Greedy search (lazy)"); document_synopsis(""); - add_list_option>( - "evals", - "evaluators"); + add_list_option>("evals", "evaluators"); add_option( "boost", "boost value for alternation queues that are restricted " "to preferred operator nodes", DEFAULT_LAZY_BOOST); - add_option( - "reopen_closed", - "reopen closed nodes", - "false"); + add_option("reopen_closed", "reopen closed nodes", "false"); add_list_option>( - "preferred", - "use preferred operators of these evaluators", - "[]"); + "preferred", "use preferred operators of these evaluators", "[]"); add_successors_order_options_to_feature(*this); add_search_algorithm_options_to_feature(*this, "lazy_greedy"); @@ -71,19 +64,17 @@ class LazyGreedySearchFeature true); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( search_common::create_greedy_open_list_factory( opts.get_list>("evals"), opts.get_list>("preferred"), - opts.get("boost") - ), + opts.get("boost")), opts.get("reopen_closed"), opts.get_list>("preferred"), get_successors_order_arguments_from_options(opts), - get_search_algorithm_arguments_from_options(opts) - ); + get_search_algorithm_arguments_from_options(opts)); } }; diff --git a/src/search/search_algorithms/plugin_lazy_wastar.cc b/src/search/search_algorithms/plugin_lazy_wastar.cc index f2b0a5eacc..e20bd4da90 100644 --- a/src/search/search_algorithms/plugin_lazy_wastar.cc +++ b/src/search/search_algorithms/plugin_lazy_wastar.cc @@ -16,20 +16,12 @@ class LazyWAstarSearchFeature document_synopsis( "Weighted A* is a special case of lazy best first search."); + add_list_option>("evals", "evaluators"); add_list_option>( - "evals", - "evaluators"); - add_list_option>( - "preferred", - "use preferred operators of these evaluators", - "[]"); - add_option( - "reopen_closed", - "reopen closed nodes", - "true"); + "preferred", "use preferred operators of these evaluators", "[]"); + add_option("reopen_closed", "reopen closed nodes", "true"); add_option( - "boost", - "boost value for preferred operator open lists", + "boost", "boost value for preferred operator open lists", DEFAULT_LAZY_BOOST); add_option("w", "evaluator weight", "1"); add_successors_order_options_to_feature(*this); @@ -78,21 +70,18 @@ class LazyWAstarSearchFeature true); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( search_common::create_wastar_open_list_factory( opts.get_list>("evals"), opts.get_list>("preferred"), - opts.get("boost"), - opts.get("w"), - opts.get("verbosity") - ), + opts.get("boost"), opts.get("w"), + opts.get("verbosity")), opts.get("reopen_closed"), opts.get_list>("preferred"), get_successors_order_arguments_from_options(opts), - get_search_algorithm_arguments_from_options(opts) - ); + get_search_algorithm_arguments_from_options(opts)); } }; diff --git a/src/search/search_algorithms/search_common.cc b/src/search/search_algorithms/search_common.cc index b2d322253c..5eb9a837f0 100644 --- a/src/search/search_algorithms/search_common.cc +++ b/src/search/search_algorithms/search_common.cc @@ -25,8 +25,7 @@ using WeightedEval = weighted_evaluator::WeightedEvaluator; */ static shared_ptr create_alternation_open_list_factory_aux( const vector> &evals, - const vector> &preferred_evaluators, - int boost) { + const vector> &preferred_evaluators, int boost) { if (evals.size() == 1 && preferred_evaluators.empty()) { return make_shared( evals[0], false); @@ -34,11 +33,13 @@ static shared_ptr create_alternation_open_list_factory_aux( vector> subfactories; for (const shared_ptr &evaluator : evals) { subfactories.push_back( - make_shared( + make_shared< + standard_scalar_open_list::BestFirstOpenListFactory>( evaluator, false)); if (!preferred_evaluators.empty()) { subfactories.push_back( - make_shared( + make_shared< + standard_scalar_open_list::BestFirstOpenListFactory>( evaluator, true)); } } @@ -49,8 +50,7 @@ static shared_ptr create_alternation_open_list_factory_aux( shared_ptr create_greedy_open_list_factory( const vector> &evals, - const vector> &preferred_evaluators, - int boost) { + const vector> &preferred_evaluators, int boost) { utils::verify_list_not_empty(evals, "evals"); return create_alternation_open_list_factory_aux( evals, preferred_evaluators, boost); @@ -67,8 +67,8 @@ shared_ptr create_greedy_open_list_factory( we use g instead of g + 0 * h. */ static shared_ptr create_wastar_eval( - utils::Verbosity verbosity, const shared_ptr &g_eval, - int weight, const shared_ptr &h_eval) { + utils::Verbosity verbosity, const shared_ptr &g_eval, int weight, + const shared_ptr &h_eval) { if (weight == 0) { return g_eval; } @@ -80,38 +80,30 @@ static shared_ptr create_wastar_eval( h_eval, weight, "wastar.w_h_eval", verbosity); } return make_shared( - vector>({g_eval, w_h_eval}), - "wastar.eval", verbosity); + vector>({g_eval, w_h_eval}), "wastar.eval", + verbosity); } shared_ptr create_wastar_open_list_factory( const vector> &evals, - const vector> &preferred, int boost, - int weight, utils::Verbosity verbosity) { + const vector> &preferred, int boost, int weight, + utils::Verbosity verbosity) { utils::verify_list_not_empty(evals, "evals"); - shared_ptr g_eval = make_shared( - "wastar.g_eval", verbosity); + shared_ptr g_eval = make_shared("wastar.g_eval", verbosity); vector> f_evals; f_evals.reserve(evals.size()); for (const shared_ptr &eval : evals) - f_evals.push_back(create_wastar_eval( - verbosity, g_eval, weight, eval)); + f_evals.push_back(create_wastar_eval(verbosity, g_eval, weight, eval)); - return create_alternation_open_list_factory_aux( - f_evals, - preferred, - boost); + return create_alternation_open_list_factory_aux(f_evals, preferred, boost); } pair, const shared_ptr> create_astar_open_list_factory_and_f_eval( - const shared_ptr &h_eval, utils::Verbosity verbosity - ) { + const shared_ptr &h_eval, utils::Verbosity verbosity) { shared_ptr g = make_shared("astar.g_eval", verbosity); - shared_ptr f = - make_shared( - vector>({g, h_eval}), - "astar.f_eval", verbosity); + shared_ptr f = make_shared( + vector>({g, h_eval}), "astar.f_eval", verbosity); vector> evals = {f, h_eval}; shared_ptr open = diff --git a/src/search/search_algorithms/search_common.h b/src/search/search_algorithms/search_common.h index 742947961f..a897bfb483 100644 --- a/src/search/search_algorithms/search_common.h +++ b/src/search/search_algorithms/search_common.h @@ -18,9 +18,10 @@ to eager and lazy search. */ +#include "../utils/logging.h" + #include #include -#include "../utils/logging.h" class Evaluator; class OpenListFactory; @@ -62,11 +63,10 @@ extern std::shared_ptr create_wastar_open_list_factory( The resulting open list factory produces a tie-breaking open list ordered primarily on g + h and secondarily on h. */ -extern std::pair, - const std::shared_ptr> +extern std::pair< + std::shared_ptr, const std::shared_ptr> create_astar_open_list_factory_and_f_eval( - const std::shared_ptr &h_eval, - utils::Verbosity verbosity); + const std::shared_ptr &h_eval, utils::Verbosity verbosity); } #endif diff --git a/src/search/search_node_info.h b/src/search/search_node_info.h index 03b0b97d63..1e976e96a7 100644 --- a/src/search/search_node_info.h +++ b/src/search/search_node_info.h @@ -8,7 +8,12 @@ // states see the file state_registry.h. struct SearchNodeInfo { - enum NodeStatus {NEW = 0, OPEN = 1, CLOSED = 2, DEAD_END = 3}; + enum NodeStatus { + NEW = 0, + OPEN = 1, + CLOSED = 2, + DEAD_END = 3 + }; unsigned int status : 2; int g : 30; @@ -17,8 +22,11 @@ struct SearchNodeInfo { int real_g; SearchNodeInfo() - : status(NEW), g(-1), parent_state_id(StateID::no_state), - creating_operator(-1), real_g(-1) { + : status(NEW), + g(-1), + parent_state_id(StateID::no_state), + creating_operator(-1), + real_g(-1) { } }; diff --git a/src/search/search_progress.cc b/src/search/search_progress.cc index b0ac34d25e..2ef45354a3 100644 --- a/src/search/search_progress.cc +++ b/src/search/search_progress.cc @@ -10,7 +10,8 @@ using namespace std; -bool SearchProgress::process_evaluator_value(const Evaluator *evaluator, int value) { +bool SearchProgress::process_evaluator_value( + const Evaluator *evaluator, int value) { /* Handle one evaluator value: 1. insert into or update min_values if necessary @@ -37,8 +38,10 @@ bool SearchProgress::check_progress(const EvaluationContext &eval_context) { bool boost = false; eval_context.get_cache().for_each_evaluator_result( [this, &boost](const Evaluator *eval, const EvaluationResult &result) { - if (eval->is_used_for_reporting_minima() || eval->is_used_for_boosting()) { - if (process_evaluator_value(eval, result.get_evaluator_value())) { + if (eval->is_used_for_reporting_minima() || + eval->is_used_for_boosting()) { + if (process_evaluator_value( + eval, result.get_evaluator_value())) { if (eval->is_used_for_reporting_minima()) { eval->report_new_minimum_value(result); } @@ -47,7 +50,6 @@ bool SearchProgress::check_progress(const EvaluationContext &eval_context) { } } } - } - ); + }); return boost; } diff --git a/src/search/search_progress.h b/src/search/search_progress.h index fdd688bcd8..790a804ee7 100644 --- a/src/search/search_progress.h +++ b/src/search/search_progress.h @@ -18,7 +18,6 @@ class LogProxy; values for evaluators that are used for either of these two things. */ - class SearchProgress { std::unordered_map min_values; diff --git a/src/search/search_space.cc b/src/search/search_space.cc index 800103719d..0c48caeb30 100644 --- a/src/search/search_space.cc +++ b/src/search/search_space.cc @@ -53,41 +53,41 @@ void SearchNode::open_initial() { info.creating_operator = OperatorID::no_operator; } -void SearchNode::update_parent(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost) { +void SearchNode::update_parent( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost) { info.g = parent_node.info.g + adjusted_cost; info.real_g = parent_node.info.real_g + parent_op.get_cost(); info.parent_state_id = parent_node.get_state().get_id(); info.creating_operator = OperatorID(parent_op.get_id()); } -void SearchNode::open_new_node(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost) { +void SearchNode::open_new_node( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost) { assert(info.status == SearchNodeInfo::NEW); info.status = SearchNodeInfo::OPEN; update_parent(parent_node, parent_op, adjusted_cost); } -void SearchNode::reopen_closed_node(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost) { +void SearchNode::reopen_closed_node( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost) { assert(info.status == SearchNodeInfo::CLOSED); info.status = SearchNodeInfo::OPEN; update_parent(parent_node, parent_op, adjusted_cost); } -void SearchNode::update_open_node_parent(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost) { +void SearchNode::update_open_node_parent( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost) { assert(info.status == SearchNodeInfo::OPEN); update_parent(parent_node, parent_op, adjusted_cost); } -void SearchNode::update_closed_node_parent(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost) { +void SearchNode::update_closed_node_parent( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost) { assert(info.status == SearchNodeInfo::CLOSED); update_parent(parent_node, parent_op, adjusted_cost); } @@ -108,8 +108,8 @@ void SearchNode::dump(const TaskProxy &task_proxy, utils::LogProxy &log) const { if (info.creating_operator != OperatorID::no_operator) { OperatorsProxy operators = task_proxy.get_operators(); OperatorProxy op = operators[info.creating_operator.get_index()]; - log << " created by " << op.get_name() - << " from " << info.parent_state_id << endl; + log << " created by " << op.get_name() << " from " + << info.parent_state_id << endl; } else { log << " no parent" << endl; } @@ -124,8 +124,8 @@ SearchNode SearchSpace::get_node(const State &state) { return SearchNode(state, search_node_infos[state]); } -void SearchSpace::trace_path(const State &goal_state, - vector &path) const { +void SearchSpace::trace_path( + const State &goal_state, vector &path) const { State current_state = goal_state; assert(current_state.get_registry() == &state_registry); assert(path.empty()); @@ -152,9 +152,10 @@ void SearchSpace::dump(const TaskProxy &task_proxy) const { task_properties::dump_fdr(state); if (node_info.creating_operator != OperatorID::no_operator && node_info.parent_state_id != StateID::no_state) { - OperatorProxy op = operators[node_info.creating_operator.get_index()]; - log << " created by " << op.get_name() - << " from " << node_info.parent_state_id << endl; + OperatorProxy op = + operators[node_info.creating_operator.get_index()]; + log << " created by " << op.get_name() << " from " + << node_info.parent_state_id << endl; } else { log << "has no parent" << endl; } diff --git a/src/search/search_space.h b/src/search/search_space.h index 01aca776ba..9a17105953 100644 --- a/src/search/search_space.h +++ b/src/search/search_space.h @@ -19,9 +19,9 @@ class SearchNode { State state; SearchNodeInfo &info; - void update_parent(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost); + void update_parent( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost); public: SearchNode(const State &state, SearchNodeInfo &info); @@ -36,25 +36,24 @@ class SearchNode { int get_real_g() const; void open_initial(); - void open_new_node(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost); - void reopen_closed_node(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost); - void update_open_node_parent(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost); - void update_closed_node_parent(const SearchNode &parent_node, - const OperatorProxy &parent_op, - int adjusted_cost); + void open_new_node( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost); + void reopen_closed_node( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost); + void update_open_node_parent( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost); + void update_closed_node_parent( + const SearchNode &parent_node, const OperatorProxy &parent_op, + int adjusted_cost); void close(); void mark_as_dead_end(); void dump(const TaskProxy &task_proxy, utils::LogProxy &log) const; }; - class SearchSpace { PerStateInformation search_node_infos; @@ -64,8 +63,8 @@ class SearchSpace { SearchSpace(StateRegistry &state_registry, utils::LogProxy &log); SearchNode get_node(const State &state); - void trace_path(const State &goal_state, - std::vector &path) const; + void trace_path( + const State &goal_state, std::vector &path) const; void dump(const TaskProxy &task_proxy) const; void print_statistics() const; diff --git a/src/search/search_statistics.cc b/src/search/search_statistics.cc index e41d70f965..e7e9fd0a7e 100644 --- a/src/search/search_statistics.cc +++ b/src/search/search_statistics.cc @@ -1,16 +1,14 @@ #include "search_statistics.h" #include "utils/logging.h" -#include "utils/timer.h" #include "utils/system.h" +#include "utils/timer.h" #include using namespace std; - -SearchStatistics::SearchStatistics(utils::LogProxy &log) - : log(log) { +SearchStatistics::SearchStatistics(utils::LogProxy &log) : log(log) { expanded_states = 0; reopened_states = 0; evaluated_states = 0; @@ -40,8 +38,7 @@ void SearchStatistics::report_f_value_progress(int f) { void SearchStatistics::print_f_line() const { if (log.is_at_least_normal()) { - log << "f = " << lastjump_f_value - << ", "; + log << "f = " << lastjump_f_value << ", "; print_basic_statistics(); log << endl; } @@ -56,8 +53,7 @@ void SearchStatistics::print_checkpoint_line(int g) const { } void SearchStatistics::print_basic_statistics() const { - log << evaluated_states << " evaluated, " - << expanded_states << " expanded"; + log << evaluated_states << " evaluated, " << expanded_states << " expanded"; if (reopened_states > 0) { log << ", " << reopened_states << " reopened"; } @@ -72,13 +68,13 @@ void SearchStatistics::print_detailed_statistics() const { log << "Dead ends: " << dead_end_states << " state(s)." << endl; if (lastjump_f_value >= 0) { - log << "Expanded until last jump: " - << lastjump_expanded_states << " state(s)." << endl; - log << "Reopened until last jump: " - << lastjump_reopened_states << " state(s)." << endl; - log << "Evaluated until last jump: " - << lastjump_evaluated_states << " state(s)." << endl; - log << "Generated until last jump: " - << lastjump_generated_states << " state(s)." << endl; + log << "Expanded until last jump: " << lastjump_expanded_states + << " state(s)." << endl; + log << "Reopened until last jump: " << lastjump_reopened_states + << " state(s)." << endl; + log << "Evaluated until last jump: " << lastjump_evaluated_states + << " state(s)." << endl; + log << "Generated until last jump: " << lastjump_generated_states + << " state(s)." << endl; } } diff --git a/src/search/search_statistics.h b/src/search/search_statistics.h index 5fbf28f215..176bc81240 100644 --- a/src/search/search_statistics.h +++ b/src/search/search_statistics.h @@ -17,19 +17,22 @@ class SearchStatistics { utils::LogProxy &log; // General statistics - int expanded_states; // no states for which successors were generated + int expanded_states; // no states for which successors were generated int evaluated_states; // no states for which h fn was computed - int evaluations; // no of heuristic evaluations performed - int generated_states; // no states created in total (plus those removed since already in close list) - int reopened_states; // no of *closed* states which we reopened + int evaluations; // no of heuristic evaluations performed + int generated_states; // no states created in total (plus those removed + // since already in close list) + int reopened_states; // no of *closed* states which we reopened int dead_end_states; - int generated_ops; // no of operators that were returned as applicable + int generated_ops; // no of operators that were returned as applicable // Statistics related to f values - int lastjump_f_value; //f value obtained in the last jump - int lastjump_expanded_states; // same guy but at point where the last jump in the open list - int lastjump_reopened_states; // occurred (jump == f-value of the first node in the queue increases) + int lastjump_f_value; // f value obtained in the last jump + int lastjump_expanded_states; // same guy but at point where the last jump + // in the open list + int lastjump_reopened_states; // occurred (jump == f-value of the first node + // in the queue increases) int lastjump_evaluated_states; int lastjump_generated_states; @@ -39,21 +42,47 @@ class SearchStatistics { ~SearchStatistics() = default; // Methods that update statistics. - void inc_expanded(int inc = 1) {expanded_states += inc;} - void inc_evaluated_states(int inc = 1) {evaluated_states += inc;} - void inc_generated(int inc = 1) {generated_states += inc;} - void inc_reopened(int inc = 1) {reopened_states += inc;} - void inc_generated_ops(int inc = 1) {generated_ops += inc;} - void inc_evaluations(int inc = 1) {evaluations += inc;} - void inc_dead_ends(int inc = 1) {dead_end_states += inc;} + void inc_expanded(int inc = 1) { + expanded_states += inc; + } + void inc_evaluated_states(int inc = 1) { + evaluated_states += inc; + } + void inc_generated(int inc = 1) { + generated_states += inc; + } + void inc_reopened(int inc = 1) { + reopened_states += inc; + } + void inc_generated_ops(int inc = 1) { + generated_ops += inc; + } + void inc_evaluations(int inc = 1) { + evaluations += inc; + } + void inc_dead_ends(int inc = 1) { + dead_end_states += inc; + } // Methods that access statistics. - int get_expanded() const {return expanded_states;} - int get_evaluated_states() const {return evaluated_states;} - int get_evaluations() const {return evaluations;} - int get_generated() const {return generated_states;} - int get_reopened() const {return reopened_states;} - int get_generated_ops() const {return generated_ops;} + int get_expanded() const { + return expanded_states; + } + int get_evaluated_states() const { + return evaluated_states; + } + int get_evaluations() const { + return evaluations; + } + int get_generated() const { + return generated_states; + } + int get_reopened() const { + return reopened_states; + } + int get_generated_ops() const { + return generated_ops; + } /* Call the following method with the f value of every expanded diff --git a/src/search/state_id.h b/src/search/state_id.h index 6e9dc3a02d..a630b040f8 100644 --- a/src/search/state_id.h +++ b/src/search/state_id.h @@ -16,8 +16,7 @@ class StateID { friend class PerStateBitset; int value; - explicit StateID(int value_) - : value(value_) { + explicit StateID(int value_) : value(value_) { } // No implementation to prevent default construction @@ -37,5 +36,4 @@ class StateID { } }; - #endif diff --git a/src/search/state_registry.cc b/src/search/state_registry.cc index a494c05023..c16af8df48 100644 --- a/src/search/state_registry.cc +++ b/src/search/state_registry.cc @@ -32,7 +32,8 @@ StateID StateRegistry::insert_id_or_pop_state() { if (!is_new_entry) { state_data_pool.pop_back(); } - assert(registered_states.size() == static_cast(state_data_pool.size())); + assert( + registered_states.size() == static_cast(state_data_pool.size())); return StateID(result.first); } @@ -65,10 +66,12 @@ const State &StateRegistry::get_initial_state() { return *cached_initial_state; } -//TODO it would be nice to move the actual state creation (and operator application) -// out of the StateRegistry. This could for example be done by global functions -// operating on state buffers (PackedStateBin *). -State StateRegistry::get_successor_state(const State &predecessor, const OperatorProxy &op) { +// TODO it would be nice to move the actual state creation (and operator +// application) +// out of the StateRegistry. This could for example be done by global +// functions operating on state buffers (PackedStateBin *). +State StateRegistry::get_successor_state( + const State &predecessor, const OperatorProxy &op) { assert(!op.is_axiom()); /* TODO: ideally, we would not modify state_data_pool here and in diff --git a/src/search/state_registry.h b/src/search/state_registry.h index 0604b0fff5..ff87119ed9 100644 --- a/src/search/state_registry.h +++ b/src/search/state_registry.h @@ -51,8 +51,8 @@ StateRegistry The StateRegistry allows to create states giving them an ID. IDs from different state registries must not be mixed. - The StateRegistry also stores the actual state data in a memory friendly way. - It uses the following class: + The StateRegistry also stores the actual state data in a memory friendly + way. It uses the following class: SegmentedArrayVector This class is used to store the actual (packed) state data for all states @@ -73,9 +73,9 @@ Problem: A search node contains a state together with some information about how this state was reached and the status of the node. The state data is already - stored and should not be duplicated. Open lists should in theory store search - nodes but we want to keep the amount of data stored in the open list to a - minimum. + stored and should not be duplicated. Open lists should in theory store + search nodes but we want to keep the amount of data stored in the open list to + a minimum. Solution: @@ -89,8 +89,8 @@ through the StateID. SearchSpace - The SearchSpace uses PerStateInformation to map StateIDs to - SearchNodeInfos. The open lists only have to store StateIDs which can be + The SearchSpace uses PerStateInformation to map StateIDs + to SearchNodeInfos. The open lists only have to store StateIDs which can be used to look up a search node in the SearchSpace on demand. --------------- @@ -102,8 +102,8 @@ additional memory when these heuristics are used. Solution: - The heuristic object uses an attribute of type PerStateBitset to store for each - state and each landmark whether it was reached in this state. + The heuristic object uses an attribute of type PerStateBitset to store for + each state and each landmark whether it was reached in this state. */ namespace int_packer { class IntPacker; @@ -111,16 +111,16 @@ class IntPacker; using PackedStateBin = int_packer::IntPacker::Bin; - class StateRegistry : public subscriber::SubscriberService { struct StateIDSemanticHash { - const segmented_vector::SegmentedArrayVector &state_data_pool; + const segmented_vector::SegmentedArrayVector + &state_data_pool; int state_size; StateIDSemanticHash( - const segmented_vector::SegmentedArrayVector &state_data_pool, + const segmented_vector::SegmentedArrayVector + &state_data_pool, int state_size) - : state_data_pool(state_data_pool), - state_size(state_size) { + : state_data_pool(state_data_pool), state_size(state_size) { } int_hash_set::HashType operator()(int id) const { @@ -134,13 +134,14 @@ class StateRegistry : public subscriber::SubscriberService { }; struct StateIDSemanticEqual { - const segmented_vector::SegmentedArrayVector &state_data_pool; + const segmented_vector::SegmentedArrayVector + &state_data_pool; int state_size; StateIDSemanticEqual( - const segmented_vector::SegmentedArrayVector &state_data_pool, + const segmented_vector::SegmentedArrayVector + &state_data_pool, int state_size) - : state_data_pool(state_data_pool), - state_size(state_size) { + : state_data_pool(state_data_pool), state_size(state_size) { } bool operator()(int lhs, int rhs) const { @@ -155,7 +156,8 @@ class StateRegistry : public subscriber::SubscriberService { this registry and find their IDs. States are compared/hashed semantically, i.e. the actual state data is compared, not the memory location. */ - using StateIDSet = int_hash_set::IntHashSet; + using StateIDSet = + int_hash_set::IntHashSet; TaskProxy task_proxy; const int_packer::IntPacker &state_packer; @@ -186,7 +188,8 @@ class StateRegistry : public subscriber::SubscriberService { /* Returns the state that was registered at the given ID. The ID must refer - to a state in this registry. Do not mix IDs from from different registries. + to a state in this registry. Do not mix IDs from from different + registries. */ State lookup_state(StateID id) const; @@ -199,7 +202,8 @@ class StateRegistry : public subscriber::SubscriberService { /* Returns a reference to the initial state and registers it if this was not - done before. The result is cached internally so subsequent calls are cheap. + done before. The result is cached internally so subsequent calls are + cheap. */ const State &get_initial_state(); @@ -208,7 +212,8 @@ class StateRegistry : public subscriber::SubscriberService { registers it if this was not done before. This is an expensive operation as it includes duplicate checking. */ - State get_successor_state(const State &predecessor, const OperatorProxy &op); + State get_successor_state( + const State &predecessor, const OperatorProxy &op); /* Returns the number of states registered so far. @@ -243,7 +248,7 @@ class StateRegistry : public subscriber::SubscriberService { : registry(registry), pos(start) { utils::unused_variable(this->registry); } -public: + public: const_iterator &operator++() { ++pos.value; return *this; diff --git a/src/search/task_id.h b/src/search/task_id.h index 3d9741d122..45d73709dd 100644 --- a/src/search/task_id.h +++ b/src/search/task_id.h @@ -33,7 +33,6 @@ class TaskID { } }; - namespace utils { inline void feed(HashState &hash_state, TaskID id) { feed(hash_state, id.hash()); diff --git a/src/search/task_proxy.cc b/src/search/task_proxy.cc index 6bf02d2c42..41590e8eaa 100644 --- a/src/search/task_proxy.cc +++ b/src/search/task_proxy.cc @@ -10,9 +10,14 @@ using namespace std; -State::State(const AbstractTask &task, const StateRegistry ®istry, - StateID id, const PackedStateBin *buffer) - : task(&task), registry(®istry), id(id), buffer(buffer), values(nullptr), +State::State( + const AbstractTask &task, const StateRegistry ®istry, StateID id, + const PackedStateBin *buffer) + : task(&task), + registry(®istry), + id(id), + buffer(buffer), + values(nullptr), state_packer(®istry.get_state_packer()), num_variables(registry.get_num_variables()) { assert(id != StateID::no_state); @@ -20,18 +25,22 @@ State::State(const AbstractTask &task, const StateRegistry ®istry, assert(num_variables == task.get_num_variables()); } -State::State(const AbstractTask &task, const StateRegistry ®istry, - StateID id, const PackedStateBin *buffer, - vector &&values) +State::State( + const AbstractTask &task, const StateRegistry ®istry, StateID id, + const PackedStateBin *buffer, vector &&values) : State(task, registry, id, buffer) { assert(num_variables == static_cast(values.size())); this->values = make_shared>(move(values)); } State::State(const AbstractTask &task, vector &&values) - : task(&task), registry(nullptr), id(StateID::no_state), buffer(nullptr), + : task(&task), + registry(nullptr), + id(StateID::no_state), + buffer(nullptr), values(make_shared>(move(values))), - state_packer(nullptr), num_variables(this->values->size()) { + state_packer(nullptr), + num_variables(this->values->size()) { assert(num_variables == task.get_num_variables()); } diff --git a/src/search/task_proxy.h b/src/search/task_proxy.h index 0a134a93b6..defc4252ab 100644 --- a/src/search/task_proxy.h +++ b/src/search/task_proxy.h @@ -17,7 +17,6 @@ #include #include - class AxiomsProxy; class ConditionsProxy; class EffectProxy; @@ -92,9 +91,7 @@ using PackedStateBin = int_packer::IntPacker::Bin; */ template -concept has_item_type = requires { - typename T::ItemType; -}; +concept has_item_type = requires { typename T::ItemType; }; /* Basic iterator support for proxy collections. @@ -151,7 +148,6 @@ inline ProxyIterator end(ProxyCollection &collection) { return ProxyIterator(collection, collection.size()); } - class FactProxy { const AbstractTask *task; FactPair fact; @@ -188,14 +184,14 @@ class FactProxy { } }; - class FactsProxyIterator { const AbstractTask *task; int var_id; int value; public: FactsProxyIterator(const AbstractTask &task, int var_id, int value) - : task(&task), var_id(var_id), value(value) {} + : task(&task), var_id(var_id), value(value) { + } ~FactsProxyIterator() = default; FactProxy operator*() const { @@ -224,7 +220,6 @@ class FactsProxyIterator { } }; - /* Proxy class for the collection of all facts of a task. @@ -237,8 +232,8 @@ class FactsProxyIterator { class FactsProxy { const AbstractTask *task; public: - explicit FactsProxy(const AbstractTask &task) - : task(&task) {} + explicit FactsProxy(const AbstractTask &task) : task(&task) { + } ~FactsProxy() = default; FactsProxyIterator begin() const { @@ -250,14 +245,13 @@ class FactsProxy { } }; - class ConditionsProxy { protected: const AbstractTask *task; public: using ItemType = FactProxy; - explicit ConditionsProxy(const AbstractTask &task) - : task(&task) {} + explicit ConditionsProxy(const AbstractTask &task) : task(&task) { + } virtual ~ConditionsProxy() = default; virtual std::size_t size() const = 0; @@ -268,13 +262,12 @@ class ConditionsProxy { } }; - class VariableProxy { const AbstractTask *task; int id; public: - VariableProxy(const AbstractTask &task, int id) - : task(&task), id(id) {} + VariableProxy(const AbstractTask &task, int id) : task(&task), id(id) { + } ~VariableProxy() = default; bool operator==(const VariableProxy &other) const { @@ -325,13 +318,12 @@ class VariableProxy { } }; - class VariablesProxy { const AbstractTask *task; public: using ItemType = VariableProxy; - explicit VariablesProxy(const AbstractTask &task) - : task(&task) {} + explicit VariablesProxy(const AbstractTask &task) : task(&task) { + } ~VariablesProxy() = default; std::size_t size() const { @@ -348,13 +340,13 @@ class VariablesProxy { } }; - class PreconditionsProxy : public ConditionsProxy { int op_index; bool is_axiom; public: PreconditionsProxy(const AbstractTask &task, int op_index, bool is_axiom) - : ConditionsProxy(task), op_index(op_index), is_axiom(is_axiom) {} + : ConditionsProxy(task), op_index(op_index), is_axiom(is_axiom) { + } ~PreconditionsProxy() = default; std::size_t size() const override { @@ -363,12 +355,12 @@ class PreconditionsProxy : public ConditionsProxy { FactProxy operator[](std::size_t fact_index) const override { assert(fact_index < size()); - return FactProxy(*task, task->get_operator_precondition( - op_index, fact_index, is_axiom)); + return FactProxy( + *task, + task->get_operator_precondition(op_index, fact_index, is_axiom)); } }; - class EffectConditionsProxy : public ConditionsProxy { int op_index; int eff_index; @@ -376,29 +368,39 @@ class EffectConditionsProxy : public ConditionsProxy { public: EffectConditionsProxy( const AbstractTask &task, int op_index, int eff_index, bool is_axiom) - : ConditionsProxy(task), op_index(op_index), eff_index(eff_index), is_axiom(is_axiom) {} + : ConditionsProxy(task), + op_index(op_index), + eff_index(eff_index), + is_axiom(is_axiom) { + } ~EffectConditionsProxy() = default; std::size_t size() const override { - return task->get_num_operator_effect_conditions(op_index, eff_index, is_axiom); + return task->get_num_operator_effect_conditions( + op_index, eff_index, is_axiom); } FactProxy operator[](std::size_t index) const override { assert(index < size()); - return FactProxy(*task, task->get_operator_effect_condition( - op_index, eff_index, index, is_axiom)); + return FactProxy( + *task, task->get_operator_effect_condition( + op_index, eff_index, index, is_axiom)); } }; - class EffectProxy { const AbstractTask *task; int op_index; int eff_index; bool is_axiom; public: - EffectProxy(const AbstractTask &task, int op_index, int eff_index, bool is_axiom) - : task(&task), op_index(op_index), eff_index(eff_index), is_axiom(is_axiom) {} + EffectProxy( + const AbstractTask &task, int op_index, int eff_index, bool is_axiom) + : task(&task), + op_index(op_index), + eff_index(eff_index), + is_axiom(is_axiom) { + } ~EffectProxy() = default; EffectConditionsProxy get_conditions() const { @@ -406,12 +408,11 @@ class EffectProxy { } FactProxy get_fact() const { - return FactProxy(*task, task->get_operator_effect( - op_index, eff_index, is_axiom)); + return FactProxy( + *task, task->get_operator_effect(op_index, eff_index, is_axiom)); } }; - class EffectsProxy { const AbstractTask *task; int op_index; @@ -419,7 +420,8 @@ class EffectsProxy { public: using ItemType = EffectProxy; EffectsProxy(const AbstractTask &task, int op_index, bool is_axiom) - : task(&task), op_index(op_index), is_axiom(is_axiom) {} + : task(&task), op_index(op_index), is_axiom(is_axiom) { + } ~EffectsProxy() = default; std::size_t size() const { @@ -432,14 +434,14 @@ class EffectsProxy { } }; - class OperatorProxy { const AbstractTask *task; int index; bool is_an_axiom; public: OperatorProxy(const AbstractTask &task, int index, bool is_axiom) - : task(&task), index(index), is_an_axiom(is_axiom) {} + : task(&task), index(index), is_an_axiom(is_axiom) { + } ~OperatorProxy() = default; bool operator==(const OperatorProxy &other) const { @@ -480,19 +482,19 @@ class OperatorProxy { live in a class that handles the task transformation and known about both the original and the transformed task. */ - OperatorID get_ancestor_operator_id(const AbstractTask *ancestor_task) const { + OperatorID get_ancestor_operator_id( + const AbstractTask *ancestor_task) const { assert(!is_an_axiom); return OperatorID(task->convert_operator_index(index, ancestor_task)); } }; - class OperatorsProxy { const AbstractTask *task; public: using ItemType = OperatorProxy; - explicit OperatorsProxy(const AbstractTask &task) - : task(&task) {} + explicit OperatorsProxy(const AbstractTask &task) : task(&task) { + } ~OperatorsProxy() = default; std::size_t size() const { @@ -513,13 +515,12 @@ class OperatorsProxy { } }; - class AxiomsProxy { const AbstractTask *task; public: using ItemType = OperatorProxy; - explicit AxiomsProxy(const AbstractTask &task) - : task(&task) {} + explicit AxiomsProxy(const AbstractTask &task) : task(&task) { + } ~AxiomsProxy() = default; std::size_t size() const { @@ -536,11 +537,10 @@ class AxiomsProxy { } }; - class GoalsProxy : public ConditionsProxy { public: - explicit GoalsProxy(const AbstractTask &task) - : ConditionsProxy(task) {} + explicit GoalsProxy(const AbstractTask &task) : ConditionsProxy(task) { + } ~GoalsProxy() = default; std::size_t size() const override { @@ -553,10 +553,8 @@ class GoalsProxy : public ConditionsProxy { } }; - bool does_fire(const EffectProxy &effect, const State &state); - class State { /* TODO: We want to try out two things: @@ -589,11 +587,13 @@ class State { using ItemType = FactProxy; // Construct a registered state with only packed data. - State(const AbstractTask &task, const StateRegistry ®istry, StateID id, - const PackedStateBin *buffer); + State( + const AbstractTask &task, const StateRegistry ®istry, StateID id, + const PackedStateBin *buffer); // Construct a registered state with packed and unpacked data. - State(const AbstractTask &task, const StateRegistry ®istry, StateID id, - const PackedStateBin *buffer, std::vector &&values); + State( + const AbstractTask &task, const StateRegistry ®istry, StateID id, + const PackedStateBin *buffer, std::vector &&values); // Construct a state with only unpacked data. State(const AbstractTask &task, std::vector &&values); @@ -637,7 +637,6 @@ class State { State get_unregistered_successor(const OperatorProxy &op) const; }; - namespace utils { inline void feed(HashState &hash_state, const State &state) { /* @@ -653,19 +652,19 @@ inline void feed(HashState &hash_state, const State &state) { } } - class TaskProxy { const AbstractTask *task; public: - explicit TaskProxy(const AbstractTask &task) - : task(&task) {} + explicit TaskProxy(const AbstractTask &task) : task(&task) { + } ~TaskProxy() = default; TaskID get_id() const { return TaskID(task); } - void subscribe_to_task_destruction(subscriber::Subscriber *subscriber) const { + void subscribe_to_task_destruction( + subscriber::Subscriber *subscriber) const { task->subscribe(subscriber); } @@ -698,8 +697,8 @@ class TaskProxy { // This method is meant to be called only by the state registry. State create_state( - const StateRegistry ®istry, StateID id, - const PackedStateBin *buffer, std::vector &&state_values) const { + const StateRegistry ®istry, StateID id, const PackedStateBin *buffer, + std::vector &&state_values) const { return State(*task, registry, id, buffer, std::move(state_values)); } @@ -731,7 +730,6 @@ class TaskProxy { const causal_graph::CausalGraph &get_causal_graph() const; }; - inline FactProxy::FactProxy(const AbstractTask &task, const FactPair &fact) : task(&task), fact(fact) { assert(fact.var >= 0 && fact.var < task.get_num_variables()); @@ -742,7 +740,6 @@ inline FactProxy::FactProxy(const AbstractTask &task, int var_id, int value) : FactProxy(task, FactPair(var_id, value)) { } - inline VariableProxy FactProxy::get_variable() const { return VariableProxy(*task, fact.var); } @@ -761,8 +758,7 @@ inline bool State::operator==(const State &other) const { std::cerr << "Comparing registered states with unregistered states " << "or registered states from different registries is " << "treated as an error because it is likely not " - << "intentional." - << std::endl; + << "intentional." << std::endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } if (registry) { @@ -840,8 +836,7 @@ inline const PackedStateBin *State::get_buffer() const { */ if (!buffer) { std::cerr << "Accessing the packed values of an unregistered state is " - << "treated as an error." - << std::endl; + << "treated as an error." << std::endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } return buffer; @@ -851,8 +846,7 @@ inline const std::vector &State::get_unpacked_values() const { if (!values) { std::cerr << "Accessing the unpacked values of a state without " << "unpacking them first is treated as an error. Please " - << "use State::unpack first." - << std::endl; + << "use State::unpack first." << std::endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } return *values; diff --git a/src/search/task_utils/causal_graph.cc b/src/search/task_utils/causal_graph.cc index b0b3bf65f9..c3bea64a8b 100644 --- a/src/search/task_utils/causal_graph.cc +++ b/src/search/task_utils/causal_graph.cc @@ -27,8 +27,8 @@ using namespace std; */ namespace causal_graph { -static unordered_map> causal_graph_cache; +static unordered_map> + causal_graph_cache; /* An IntRelationBuilder constructs an IntRelation by adding one pair @@ -60,28 +60,22 @@ class IntRelationBuilder { void compute_relation(IntRelation &result) const; }; - -IntRelationBuilder::IntRelationBuilder(int range) - : int_sets(range) { +IntRelationBuilder::IntRelationBuilder(int range) : int_sets(range) { } - IntRelationBuilder::~IntRelationBuilder() { } - int IntRelationBuilder::get_range() const { return int_sets.size(); } - void IntRelationBuilder::add_pair(int u, int v) { assert(u >= 0 && u < get_range()); assert(v >= 0 && v < get_range()); int_sets[u].insert(v); } - void IntRelationBuilder::compute_relation(IntRelation &result) const { int range = get_range(); result.clear(); @@ -92,7 +86,6 @@ void IntRelationBuilder::compute_relation(IntRelation &result) const { } } - struct CausalGraphBuilder { IntRelationBuilder pre_eff_builder; IntRelationBuilder eff_pre_builder; diff --git a/src/search/task_utils/causal_graph.h b/src/search/task_utils/causal_graph.h index e33420a45b..ed1d706825 100644 --- a/src/search/task_utils/causal_graph.h +++ b/src/search/task_utils/causal_graph.h @@ -17,7 +17,6 @@ and do the memory profiling. */ - /* An IntRelation represents a relation on a set {0, ..., K - 1} as an adjacency list, encoded as a vector >. For example, the diff --git a/src/search/task_utils/sampling.cc b/src/search/task_utils/sampling.cc index 8d302307da..11d19095e8 100644 --- a/src/search/task_utils/sampling.cc +++ b/src/search/task_utils/sampling.cc @@ -9,15 +9,11 @@ using namespace std; - namespace sampling { static State sample_state_with_random_walk( - const OperatorsProxy &operators, - const State &initial_state, + const OperatorsProxy &operators, const State &initial_state, const successor_generator::SuccessorGenerator &successor_generator, - int init_h, - double average_operator_cost, - utils::RandomNumberGenerator &rng, + int init_h, double average_operator_cost, utils::RandomNumberGenerator &rng, const function &is_dead_end) { assert(init_h != numeric_limits::max()); int n; @@ -31,7 +27,8 @@ static State sample_state_with_random_walk( must have costs of 0 and in this case the if-clause triggers. */ assert(average_operator_cost != 0); - int solution_steps_estimate = int((init_h / average_operator_cost) + 0.5); + int solution_steps_estimate = + int((init_h / average_operator_cost) + 0.5); n = 4 * solution_steps_estimate; } double p = 0.5; @@ -51,8 +48,8 @@ static State sample_state_with_random_walk( vector applicable_operators; for (int j = 0; j < length; ++j) { applicable_operators.clear(); - successor_generator.generate_applicable_ops(current_state, - applicable_operators); + successor_generator.generate_applicable_ops( + current_state, applicable_operators); // If there are no applicable operators, do not walk further. if (applicable_operators.empty()) { break; @@ -72,14 +69,14 @@ static State sample_state_with_random_walk( return current_state; } - RandomWalkSampler::RandomWalkSampler( - const TaskProxy &task_proxy, - utils::RandomNumberGenerator &rng) + const TaskProxy &task_proxy, utils::RandomNumberGenerator &rng) : operators(task_proxy.get_operators()), - successor_generator(make_unique(task_proxy)), + successor_generator( + make_unique(task_proxy)), initial_state(task_proxy.get_initial_state()), - average_operator_costs(task_properties::get_average_operator_cost(task_proxy)), + average_operator_costs( + task_properties::get_average_operator_cost(task_proxy)), rng(rng) { } @@ -89,12 +86,7 @@ RandomWalkSampler::~RandomWalkSampler() { State RandomWalkSampler::sample_state( int init_h, const DeadEndDetector &is_dead_end) const { return sample_state_with_random_walk( - operators, - initial_state, - *successor_generator, - init_h, - average_operator_costs, - rng, - is_dead_end); + operators, initial_state, *successor_generator, init_h, + average_operator_costs, rng, is_dead_end); } } diff --git a/src/search/task_utils/sampling.h b/src/search/task_utils/sampling.h index fbbd849138..87773b12ab 100644 --- a/src/search/task_utils/sampling.h +++ b/src/search/task_utils/sampling.h @@ -16,7 +16,7 @@ namespace utils { class RandomNumberGenerator; } -using DeadEndDetector = std::function; +using DeadEndDetector = std::function; namespace sampling { /* @@ -24,15 +24,15 @@ namespace sampling { */ class RandomWalkSampler { const OperatorsProxy operators; - const std::unique_ptr successor_generator; + const std::unique_ptr + successor_generator; const State initial_state; const double average_operator_costs; utils::RandomNumberGenerator &rng; public: RandomWalkSampler( - const TaskProxy &task_proxy, - utils::RandomNumberGenerator &rng); + const TaskProxy &task_proxy, utils::RandomNumberGenerator &rng); ~RandomWalkSampler(); /* @@ -47,8 +47,7 @@ class RandomWalkSampler { value should be an estimate of the solution cost. */ State sample_state( - int init_h, - const DeadEndDetector &is_dead_end = [] (const State &) { + int init_h, const DeadEndDetector &is_dead_end = [](const State &) { return false; }) const; }; diff --git a/src/search/task_utils/successor_generator_factory.cc b/src/search/task_utils/successor_generator_factory.cc index 0553fe6604..b8d318a419 100644 --- a/src/search/task_utils/successor_generator_factory.cc +++ b/src/search/task_utils/successor_generator_factory.cc @@ -59,8 +59,7 @@ struct OperatorRange { int begin; int end; - OperatorRange(int begin, int end) - : begin(begin), end(end) { + OperatorRange(int begin, int end) : begin(begin), end(end) { } bool empty() const { @@ -72,7 +71,6 @@ struct OperatorRange { } }; - class OperatorInfo { /* The attributes are not const because we must support @@ -82,8 +80,7 @@ class OperatorInfo { vector precondition; public: OperatorInfo(OperatorID op, vector precondition) - : op(op), - precondition(move(precondition)) { + : op(op), precondition(move(precondition)) { } bool operator<(const OperatorInfo &other) const { @@ -108,13 +105,11 @@ class OperatorInfo { } }; - enum class GroupOperatorsBy { VAR, VALUE }; - class OperatorGrouper { const vector &operator_infos; const int depth; @@ -137,10 +132,8 @@ class OperatorGrouper { } public: explicit OperatorGrouper( - const vector &operator_infos, - int depth, - GroupOperatorsBy group_by, - OperatorRange range) + const vector &operator_infos, int depth, + GroupOperatorsBy group_by, OperatorRange range) : operator_infos(operator_infos), depth(depth), group_by(group_by), @@ -163,7 +156,6 @@ class OperatorGrouper { } }; - SuccessorGeneratorFactory::SuccessorGeneratorFactory( const TaskProxy &task_proxy) : task_proxy(task_proxy) { @@ -219,7 +211,8 @@ GeneratorPtr SuccessorGeneratorFactory::construct_switch( } int vector_bytes = utils::estimate_vector_bytes(var_domain); - int hash_bytes = utils::estimate_unordered_map_bytes(num_children); + int hash_bytes = + utils::estimate_unordered_map_bytes(num_children); if (hash_bytes < vector_bytes) { unordered_map generator_by_value; for (auto &item : values_and_generators) @@ -249,7 +242,8 @@ GeneratorPtr SuccessorGeneratorFactory::construct_recursive( // Handle a group of immediately applicable operators. nodes.push_back(construct_leaf(var_range)); } else { - // Handle a group of operators sharing the first precondition variable. + // Handle a group of operators sharing the first precondition + // variable. ValuesAndGenerators values_and_generators; OperatorGrouper grouper_by_value( operator_infos, depth, GroupOperatorsBy::VALUE, var_range); @@ -262,8 +256,7 @@ GeneratorPtr SuccessorGeneratorFactory::construct_recursive( value, construct_recursive(depth + 1, value_range)); } - nodes.push_back(construct_switch( - var, move(values_and_generators))); + nodes.push_back(construct_switch(var, move(values_and_generators))); } } return construct_fork(move(nodes)); diff --git a/src/search/task_utils/successor_generator_factory.h b/src/search/task_utils/successor_generator_factory.h index 9e4e84068b..9021e7a37e 100644 --- a/src/search/task_utils/successor_generator_factory.h +++ b/src/search/task_utils/successor_generator_factory.h @@ -14,7 +14,6 @@ using GeneratorPtr = std::unique_ptr; struct OperatorRange; class OperatorInfo; - class SuccessorGeneratorFactory { using ValuesAndGenerators = std::vector>; diff --git a/src/search/task_utils/successor_generator_internals.cc b/src/search/task_utils/successor_generator_internals.cc index e77c032950..fda6e5d79e 100644 --- a/src/search/task_utils/successor_generator_internals.cc +++ b/src/search/task_utils/successor_generator_internals.cc @@ -69,10 +69,8 @@ using namespace std; namespace successor_generator { GeneratorForkBinary::GeneratorForkBinary( - unique_ptr generator1, - unique_ptr generator2) - : generator1(move(generator1)), - generator2(move(generator2)) { + unique_ptr generator1, unique_ptr generator2) + : generator1(move(generator1)), generator2(move(generator2)) { /* There is no reason to use a fork if only one of the generators exists. Use the existing generator directly if one of them exists or a nullptr otherwise. */ @@ -86,7 +84,8 @@ void GeneratorForkBinary::generate_applicable_ops( generator2->generate_applicable_ops(state, applicable_ops); } -GeneratorForkMulti::GeneratorForkMulti(vector> children) +GeneratorForkMulti::GeneratorForkMulti( + vector> children) : children(move(children)) { /* Note that we permit 0-ary forks as a way to define empty successor generators (for tasks with no operators). It is @@ -110,7 +109,8 @@ GeneratorSwitchVector::GeneratorSwitchVector( void GeneratorSwitchVector::generate_applicable_ops( const vector &state, vector &applicable_ops) const { int val = state[switch_var_id]; - const unique_ptr &generator_for_val = generator_for_value[val]; + const unique_ptr &generator_for_val = + generator_for_value[val]; if (generator_for_val) { generator_for_val->generate_applicable_ops(state, applicable_ops); } @@ -147,7 +147,8 @@ void GeneratorSwitchSingle::generate_applicable_ops( } } -GeneratorLeafVector::GeneratorLeafVector(vector &&applicable_operators) +GeneratorLeafVector::GeneratorLeafVector( + vector &&applicable_operators) : applicable_operators(move(applicable_operators)) { } diff --git a/src/search/task_utils/successor_generator_internals.h b/src/search/task_utils/successor_generator_internals.h index d9adb82b61..8da1058759 100644 --- a/src/search/task_utils/successor_generator_internals.h +++ b/src/search/task_utils/successor_generator_internals.h @@ -12,10 +12,12 @@ class State; namespace successor_generator { class GeneratorBase { public: - virtual ~GeneratorBase() {} + virtual ~GeneratorBase() { + } virtual void generate_applicable_ops( - const std::vector &state, std::vector &applicable_ops) const = 0; + const std::vector &state, + std::vector &applicable_ops) const = 0; }; class GeneratorForkBinary : public GeneratorBase { @@ -26,7 +28,8 @@ class GeneratorForkBinary : public GeneratorBase { std::unique_ptr generator1, std::unique_ptr generator2); virtual void generate_applicable_ops( - const std::vector &state, std::vector &applicable_ops) const override; + const std::vector &state, + std::vector &applicable_ops) const override; }; class GeneratorForkMulti : public GeneratorBase { @@ -34,7 +37,8 @@ class GeneratorForkMulti : public GeneratorBase { public: GeneratorForkMulti(std::vector> children); virtual void generate_applicable_ops( - const std::vector &state, std::vector &applicable_ops) const override; + const std::vector &state, + std::vector &applicable_ops) const override; }; class GeneratorSwitchVector : public GeneratorBase { @@ -45,7 +49,8 @@ class GeneratorSwitchVector : public GeneratorBase { int switch_var_id, std::vector> &&generator_for_value); virtual void generate_applicable_ops( - const std::vector &state, std::vector &applicable_ops) const override; + const std::vector &state, + std::vector &applicable_ops) const override; }; class GeneratorSwitchHash : public GeneratorBase { @@ -54,9 +59,11 @@ class GeneratorSwitchHash : public GeneratorBase { public: GeneratorSwitchHash( int switch_var_id, - std::unordered_map> &&generator_for_value); + std::unordered_map> + &&generator_for_value); virtual void generate_applicable_ops( - const std::vector &state, std::vector &applicable_ops) const override; + const std::vector &state, + std::vector &applicable_ops) const override; }; class GeneratorSwitchSingle : public GeneratorBase { @@ -68,7 +75,8 @@ class GeneratorSwitchSingle : public GeneratorBase { int switch_var_id, int value, std::unique_ptr generator_for_value); virtual void generate_applicable_ops( - const std::vector &state, std::vector &applicable_ops) const override; + const std::vector &state, + std::vector &applicable_ops) const override; }; class GeneratorLeafVector : public GeneratorBase { @@ -76,7 +84,8 @@ class GeneratorLeafVector : public GeneratorBase { public: GeneratorLeafVector(std::vector &&applicable_operators); virtual void generate_applicable_ops( - const std::vector &state, std::vector &applicable_ops) const override; + const std::vector &state, + std::vector &applicable_ops) const override; }; class GeneratorLeafSingle : public GeneratorBase { @@ -84,7 +93,8 @@ class GeneratorLeafSingle : public GeneratorBase { public: GeneratorLeafSingle(OperatorID applicable_operator); virtual void generate_applicable_ops( - const std::vector &state, std::vector &applicable_ops) const override; + const std::vector &state, + std::vector &applicable_ops) const override; }; } diff --git a/src/search/task_utils/task_properties.cc b/src/search/task_utils/task_properties.cc index c7d06ea76b..716df34c42 100644 --- a/src/search/task_utils/task_properties.cc +++ b/src/search/task_utils/task_properties.cc @@ -10,7 +10,6 @@ using namespace std; using utils::ExitCode; - namespace task_properties { bool is_unit_cost(TaskProxy task) { for (OperatorProxy op : task.get_operators()) { @@ -26,13 +25,12 @@ bool has_axioms(TaskProxy task) { void verify_no_axioms(TaskProxy task) { if (has_axioms(task)) { - cerr << "This configuration does not support axioms!" - << endl << "Terminating." << endl; + cerr << "This configuration does not support axioms!" << endl + << "Terminating." << endl; utils::exit_with(ExitCode::SEARCH_UNSUPPORTED); } } - static int get_first_conditional_effects_op_id(TaskProxy task) { for (OperatorProxy op : task.get_operators()) { for (EffectProxy effect : op.get_effects()) { @@ -110,7 +108,8 @@ void print_variable_statistics(const TaskProxy &task_proxy) { utils::g_log << "Variables: " << variables.size() << endl; utils::g_log << "FactPairs: " << num_facts << endl; utils::g_log << "Bytes per state: " - << state_packer.get_num_bins() * sizeof(int_packer::IntPacker::Bin) + << state_packer.get_num_bins() * + sizeof(int_packer::IntPacker::Bin) << endl; } @@ -125,8 +124,8 @@ void dump_pddl(const State &state) { void dump_fdr(const State &state) { for (FactProxy fact : state) { VariableProxy var = fact.get_variable(); - utils::g_log << " #" << var.get_id() << " [" << var.get_name() << "] -> " - << fact.get_value() << endl; + utils::g_log << " #" << var.get_id() << " [" << var.get_name() + << "] -> " << fact.get_value() << endl; } } @@ -152,10 +151,11 @@ void dump_task(const TaskProxy &task_proxy) { VariablesProxy variables = task_proxy.get_variables(); utils::g_log << "Variables (" << variables.size() << "):" << endl; for (VariableProxy var : variables) { - utils::g_log << " " << var.get_name() - << " (range " << var.get_domain_size() << ")" << endl; + utils::g_log << " " << var.get_name() << " (range " + << var.get_domain_size() << ")" << endl; for (int val = 0; val < var.get_domain_size(); ++val) { - utils::g_log << " " << val << ": " << var.get_fact(val).get_name() << endl; + utils::g_log << " " << val << ": " + << var.get_fact(val).get_name() << endl; } } State initial_state = task_proxy.get_initial_state(); @@ -175,6 +175,5 @@ PerTaskInformation g_state_packers( variable_ranges.push_back(var.get_domain_size()); } return make_unique(variable_ranges); - } - ); + }); } diff --git a/src/search/task_utils/variable_order_finder.cc b/src/search/task_utils/variable_order_finder.cc index cd0f8752fa..8924acdfe3 100644 --- a/src/search/task_utils/variable_order_finder.cc +++ b/src/search/task_utils/variable_order_finder.cc @@ -13,13 +13,11 @@ using namespace std; using utils::ExitCode; - namespace variable_order_finder { -VariableOrderFinder::VariableOrderFinder(const TaskProxy &task_proxy, - VariableOrderType variable_order_type, - const shared_ptr &rng) - : task_proxy(task_proxy), - variable_order_type(variable_order_type) { +VariableOrderFinder::VariableOrderFinder( + const TaskProxy &task_proxy, VariableOrderType variable_order_type, + const shared_ptr &rng) + : task_proxy(task_proxy), variable_order_type(variable_order_type) { int var_count = task_proxy.get_variables().size(); if (variable_order_type == REVERSE_LEVEL) { for (int i = 0; i < var_count; ++i) @@ -61,8 +59,8 @@ bool VariableOrderFinder::done() const { int VariableOrderFinder::next() { assert(!done()); - if (variable_order_type == CG_GOAL_LEVEL || variable_order_type - == CG_GOAL_RANDOM) { + if (variable_order_type == CG_GOAL_LEVEL || + variable_order_type == CG_GOAL_RANDOM) { // First run: Try to find a causally connected variable. for (size_t i = 0; i < remaining_vars.size(); ++i) { int var_no = remaining_vars[i]; @@ -96,9 +94,9 @@ int VariableOrderFinder::next() { return var_no; } } - } else if (variable_order_type == RANDOM || - variable_order_type == LEVEL || - variable_order_type == REVERSE_LEVEL) { + } else if ( + variable_order_type == RANDOM || variable_order_type == LEVEL || + variable_order_type == REVERSE_LEVEL) { int var_no = remaining_vars[0]; select_next(0, var_no); return var_no; diff --git a/src/search/task_utils/variable_order_finder.h b/src/search/task_utils/variable_order_finder.h index 3873f53e66..0a9bc2d9cb 100644 --- a/src/search/task_utils/variable_order_finder.h +++ b/src/search/task_utils/variable_order_finder.h @@ -40,8 +40,7 @@ class VariableOrderFinder { void select_next(int position, int var_no); public: VariableOrderFinder( - const TaskProxy &task_proxy, - VariableOrderType variable_order_type, + const TaskProxy &task_proxy, VariableOrderType variable_order_type, const std::shared_ptr &rng = nullptr); ~VariableOrderFinder() = default; bool done() const; diff --git a/src/search/tasks/cost_adapted_task.cc b/src/search/tasks/cost_adapted_task.cc index 3b4b7b43f2..b62f333723 100644 --- a/src/search/tasks/cost_adapted_task.cc +++ b/src/search/tasks/cost_adapted_task.cc @@ -15,8 +15,7 @@ using utils::ExitCode; namespace tasks { CostAdaptedTask::CostAdaptedTask( - const shared_ptr &parent, - OperatorCost cost_type) + const shared_ptr &parent, OperatorCost cost_type) : DelegatingTask(parent), cost_type(cost_type), parent_is_unit_cost(task_properties::is_unit_cost(TaskProxy(*parent))) { @@ -32,17 +31,15 @@ class CostAdaptedTaskFeature public: CostAdaptedTaskFeature() : TypedFeature("adapt_costs") { document_title("Cost-adapted task"); - document_synopsis( - "A cost-adapting transformation of the root task."); + document_synopsis("A cost-adapting transformation of the root task."); add_cost_type_options_to_feature(*this); } - virtual shared_ptr - create_component(const plugins::Options &opts) const override { + virtual shared_ptr create_component( + const plugins::Options &opts) const override { return plugins::make_shared_from_arg_tuples( - g_root_task, - get_cost_type_arguments_from_options(opts)); + g_root_task, get_cost_type_arguments_from_options(opts)); } }; diff --git a/src/search/tasks/cost_adapted_task.h b/src/search/tasks/cost_adapted_task.h index 3f28e23f14..af39153fe5 100644 --- a/src/search/tasks/cost_adapted_task.h +++ b/src/search/tasks/cost_adapted_task.h @@ -27,8 +27,7 @@ class CostAdaptedTask : public DelegatingTask { const bool parent_is_unit_cost; public: CostAdaptedTask( - const std::shared_ptr &parent, - OperatorCost cost_type); + const std::shared_ptr &parent, OperatorCost cost_type); virtual ~CostAdaptedTask() override = default; virtual int get_operator_cost(int index, bool is_axiom) const override; diff --git a/src/search/tasks/default_value_axioms_task.cc b/src/search/tasks/default_value_axioms_task.cc index 8859acfa7e..c94b8b704e 100644 --- a/src/search/tasks/default_value_axioms_task.cc +++ b/src/search/tasks/default_value_axioms_task.cc @@ -15,8 +15,7 @@ using utils::ExitCode; namespace tasks { DefaultValueAxiomsTask::DefaultValueAxiomsTask( - const shared_ptr &parent, - AxiomHandlingType axioms) + const shared_ptr &parent, AxiomHandlingType axioms) : DelegatingTask(parent), axioms(axioms), default_value_axioms_start_index(parent->get_num_axioms()) { @@ -32,14 +31,15 @@ DefaultValueAxiomsTask::DefaultValueAxiomsTask( but only the indices that correspond to a variable ID of a derived variable actually have content. */ - vector> nondefault_dependencies(task_proxy.get_variables().size()); + vector> nondefault_dependencies( + task_proxy.get_variables().size()); vector> default_dependencies(task_proxy.get_variables().size()); vector> axiom_ids_for_var(task_proxy.get_variables().size()); - for (OperatorProxy axiom: task_proxy.get_axioms()) { + for (OperatorProxy axiom : task_proxy.get_axioms()) { EffectProxy effect = axiom.get_effects()[0]; int head_var = effect.get_fact().get_variable().get_id(); axiom_ids_for_var[head_var].push_back(axiom.get_id()); - for (FactProxy cond: effect.get_conditions()) { + for (FactProxy cond : effect.get_conditions()) { VariableProxy var_proxy = cond.get_variable(); if (var_proxy.is_derived()) { int var = cond.get_variable().get_id(); @@ -63,27 +63,26 @@ DefaultValueAxiomsTask::DefaultValueAxiomsTask( // We don't need the sccs if we set axioms "v=default <- {}" everywhere. if (axioms == AxiomHandlingType::APPROXIMATE_NEGATIVE_CYCLES) { sccs = sccs::compute_maximal_sccs(nondefault_dependencies); - var_to_scc = vector *>( - task_proxy.get_variables().size(), nullptr); + var_to_scc = + vector *>(task_proxy.get_variables().size(), nullptr); for (int i = 0; i < (int)sccs.size(); ++i) { - for (int var: sccs[i]) { + for (int var : sccs[i]) { var_to_scc[var] = &sccs[i]; } } } unordered_set default_value_needed = - get_vars_with_relevant_default_value(nondefault_dependencies, - default_dependencies, - var_to_scc); + get_vars_with_relevant_default_value( + nondefault_dependencies, default_dependencies, var_to_scc); - for (int var: default_value_needed) { + for (int var : default_value_needed) { vector &axiom_ids = axiom_ids_for_var[var]; int default_value = task_proxy.get_variables()[var].get_default_axiom_value(); - if (axioms == AxiomHandlingType::APPROXIMATE_NEGATIVE - || var_to_scc[var]->size() > 1) { + if (axioms == AxiomHandlingType::APPROXIMATE_NEGATIVE || + var_to_scc[var]->size() > 1) { /* If there is a cyclic dependency between several derived variables, the "obvious" way of negating the formula @@ -136,7 +135,7 @@ unordered_set DefaultValueAxiomsTask::get_vars_with_relevant_default_value( TaskProxy task_proxy(*parent); // Collect derived variables that occur as their default value. - for (const FactProxy &goal: task_proxy.get_goals()) { + for (const FactProxy &goal : task_proxy.get_goals()) { VariableProxy var_proxy = goal.get_variable(); if (var_proxy.is_derived()) { bool default_value = @@ -144,21 +143,21 @@ unordered_set DefaultValueAxiomsTask::get_vars_with_relevant_default_value( needed.emplace(goal.get_pair().var, default_value); } } - for (OperatorProxy op: task_proxy.get_operators()) { - for (FactProxy condition: op.get_preconditions()) { + for (OperatorProxy op : task_proxy.get_operators()) { + for (FactProxy condition : op.get_preconditions()) { VariableProxy var_proxy = condition.get_variable(); if (var_proxy.is_derived()) { - bool default_value = - condition.get_value() == var_proxy.get_default_axiom_value(); + bool default_value = condition.get_value() == + var_proxy.get_default_axiom_value(); needed.emplace(condition.get_pair().var, default_value); } } - for (EffectProxy effect: op.get_effects()) { - for (FactProxy condition: effect.get_conditions()) { + for (EffectProxy effect : op.get_effects()) { + for (FactProxy condition : effect.get_conditions()) { VariableProxy var_proxy = condition.get_variable(); if (var_proxy.is_derived()) { - bool default_value = - condition.get_value() == var_proxy.get_default_axiom_value(); + bool default_value = condition.get_value() == + var_proxy.get_default_axiom_value(); needed.emplace(condition.get_pair().var, default_value); } } @@ -179,8 +178,8 @@ unordered_set DefaultValueAxiomsTask::get_vars_with_relevant_default_value( pair) doesn't depend on anything. */ if ((default_value) && - (axioms == AxiomHandlingType::APPROXIMATE_NEGATIVE - || var_to_scc[var]->size() > 1)) { + (axioms == AxiomHandlingType::APPROXIMATE_NEGATIVE || + var_to_scc[var]->size() > 1)) { continue; } @@ -207,7 +206,6 @@ unordered_set DefaultValueAxiomsTask::get_vars_with_relevant_default_value( return default_needed; } - void DefaultValueAxiomsTask::add_default_value_axioms_for_var( FactPair head, vector &axiom_ids) { TaskProxy task_proxy(*parent); @@ -250,7 +248,6 @@ void DefaultValueAxiomsTask::add_default_value_axioms_for_var( } } - void DefaultValueAxiomsTask::collect_non_dominated_hitting_sets_recursively( const vector> &set_of_sets, size_t index, set &hitting_set, unordered_set &hitting_set_vars, @@ -265,9 +262,9 @@ void DefaultValueAxiomsTask::collect_non_dominated_hitting_sets_recursively( set not_uniquely_used(hitting_set); for (const set &set : set_of_sets) { vector intersection; - set_intersection(set.begin(), set.end(), - hitting_set.begin(), hitting_set.end(), - back_inserter(intersection)); + set_intersection( + set.begin(), set.end(), hitting_set.begin(), hitting_set.end(), + back_inserter(intersection)); if (intersection.size() == 1) { not_uniquely_used.erase(intersection[0]); } @@ -286,8 +283,7 @@ void DefaultValueAxiomsTask::collect_non_dominated_hitting_sets_recursively( */ if (hitting_set.find(elem) != hitting_set.end()) { collect_non_dominated_hitting_sets_recursively( - set_of_sets, index + 1, hitting_set, hitting_set_vars, - results); + set_of_sets, index + 1, hitting_set, hitting_set_vars, results); return; } } @@ -301,14 +297,12 @@ void DefaultValueAxiomsTask::collect_non_dominated_hitting_sets_recursively( hitting_set.insert(elem); hitting_set_vars.insert(elem.var); collect_non_dominated_hitting_sets_recursively( - set_of_sets, index + 1, hitting_set, hitting_set_vars, - results); + set_of_sets, index + 1, hitting_set, hitting_set_vars, results); hitting_set.erase(elem); hitting_set_vars.erase(elem.var); } } - int DefaultValueAxiomsTask::get_operator_cost(int index, bool is_axiom) const { if (!is_axiom || index < default_value_axioms_start_index) { return parent->get_operator_cost(index, is_axiom); @@ -317,7 +311,8 @@ int DefaultValueAxiomsTask::get_operator_cost(int index, bool is_axiom) const { return 0; } -string DefaultValueAxiomsTask::get_operator_name(int index, bool is_axiom) const { +string DefaultValueAxiomsTask::get_operator_name( + int index, bool is_axiom) const { if (!is_axiom || index < default_value_axioms_start_index) { return parent->get_operator_name(index, is_axiom); } @@ -325,7 +320,8 @@ string DefaultValueAxiomsTask::get_operator_name(int index, bool is_axiom) const return ""; } -int DefaultValueAxiomsTask::get_num_operator_preconditions(int index, bool is_axiom) const { +int DefaultValueAxiomsTask::get_num_operator_preconditions( + int index, bool is_axiom) const { if (!is_axiom || index < default_value_axioms_start_index) { return parent->get_num_operator_preconditions(index, is_axiom); } @@ -336,15 +332,18 @@ int DefaultValueAxiomsTask::get_num_operator_preconditions(int index, bool is_ax FactPair DefaultValueAxiomsTask::get_operator_precondition( int op_index, int fact_index, bool is_axiom) const { if (!is_axiom || (op_index < default_value_axioms_start_index)) { - return parent->get_operator_precondition(op_index, fact_index, is_axiom); + return parent->get_operator_precondition( + op_index, fact_index, is_axiom); } assert(fact_index == 0); - FactPair head = default_value_axioms[op_index - default_value_axioms_start_index].head; + FactPair head = + default_value_axioms[op_index - default_value_axioms_start_index].head; return FactPair(head.var, 1 - head.value); } -int DefaultValueAxiomsTask::get_num_operator_effects(int op_index, bool is_axiom) const { +int DefaultValueAxiomsTask::get_num_operator_effects( + int op_index, bool is_axiom) const { if (!is_axiom || op_index < default_value_axioms_start_index) { return parent->get_num_operator_effects(op_index, is_axiom); } @@ -360,7 +359,8 @@ int DefaultValueAxiomsTask::get_num_operator_effect_conditions( } assert(eff_index == 0); - return default_value_axioms[op_index - default_value_axioms_start_index].condition.size(); + return default_value_axioms[op_index - default_value_axioms_start_index] + .condition.size(); } FactPair DefaultValueAxiomsTask::get_operator_effect_condition( @@ -371,7 +371,8 @@ FactPair DefaultValueAxiomsTask::get_operator_effect_condition( } assert(eff_index == 0); - return default_value_axioms[op_index - default_value_axioms_start_index].condition[cond_index]; + return default_value_axioms[op_index - default_value_axioms_start_index] + .condition[cond_index]; } FactPair DefaultValueAxiomsTask::get_operator_effect( @@ -381,7 +382,8 @@ FactPair DefaultValueAxiomsTask::get_operator_effect( } assert(eff_index == 0); - return default_value_axioms[op_index - default_value_axioms_start_index].head; + return default_value_axioms[op_index - default_value_axioms_start_index] + .head; } int DefaultValueAxiomsTask::get_num_axioms() const { @@ -389,8 +391,7 @@ int DefaultValueAxiomsTask::get_num_axioms() const { } shared_ptr get_default_value_axioms_task_if_needed( - const shared_ptr &task, - AxiomHandlingType axioms) { + const shared_ptr &task, AxiomHandlingType axioms) { TaskProxy proxy(*task); if (task_properties::has_axioms(proxy)) { return make_shared( @@ -409,21 +410,19 @@ void add_axioms_option_to_feature(plugins::Feature &feature) { tuple get_axioms_arguments_from_options( const plugins::Options &opts) { - return make_tuple( - opts.get("axioms")); + return make_tuple(opts.get("axioms")); } -static plugins::TypedEnumPlugin _enum_plugin({ - {"approximate_negative", - "Overapproximate negated axioms for all derived variables by " - "setting an empty condition, indicating the default value can " - "always be achieved for free."}, - {"approximate_negative_cycles", - "Overapproximate negated axioms for all derived variables which " - "have cyclic dependencies by setting an empty condition, " - "indicating the default value can always be achieved for free. " - "For all other derived variables, the negated axioms are computed " - "exactly. Note that this can potentially lead to a combinatorial " - "explosion."} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"approximate_negative", + "Overapproximate negated axioms for all derived variables by " + "setting an empty condition, indicating the default value can " + "always be achieved for free."}, + {"approximate_negative_cycles", + "Overapproximate negated axioms for all derived variables which " + "have cyclic dependencies by setting an empty condition, " + "indicating the default value can always be achieved for free. " + "For all other derived variables, the negated axioms are computed " + "exactly. Note that this can potentially lead to a combinatorial " + "explosion."}}); } diff --git a/src/search/tasks/default_value_axioms_task.h b/src/search/tasks/default_value_axioms_task.h index 1511a0d631..030bfcd19b 100644 --- a/src/search/tasks/default_value_axioms_task.h +++ b/src/search/tasks/default_value_axioms_task.h @@ -34,7 +34,8 @@ namespace tasks { enum class AxiomHandlingType { - APPROXIMATE_NEGATIVE, APPROXIMATE_NEGATIVE_CYCLES + APPROXIMATE_NEGATIVE, + APPROXIMATE_NEGATIVE_CYCLES }; struct DefaultValueAxiom { @@ -42,7 +43,8 @@ struct DefaultValueAxiom { std::vector condition; DefaultValueAxiom(FactPair head, std::vector &&condition) - : head(head), condition(condition) {} + : head(head), condition(condition) { + } }; class DefaultValueAxiomsTask : public DelegatingTask { @@ -63,20 +65,23 @@ class DefaultValueAxiomsTask : public DelegatingTask { std::set> &results); public: explicit DefaultValueAxiomsTask( - const std::shared_ptr &parent, - AxiomHandlingType axioms); + const std::shared_ptr &parent, AxiomHandlingType axioms); virtual ~DefaultValueAxiomsTask() override = default; virtual int get_operator_cost(int index, bool is_axiom) const override; - virtual std::string get_operator_name(int index, bool is_axiom) const override; - virtual int get_num_operator_preconditions(int index, bool is_axiom) const override; + virtual std::string get_operator_name( + int index, bool is_axiom) const override; + virtual int get_num_operator_preconditions( + int index, bool is_axiom) const override; virtual FactPair get_operator_precondition( int op_index, int fact_index, bool is_axiom) const override; - virtual int get_num_operator_effects(int op_index, bool is_axiom) const override; + virtual int get_num_operator_effects( + int op_index, bool is_axiom) const override; virtual int get_num_operator_effect_conditions( int op_index, int eff_index, bool is_axiom) const override; virtual FactPair get_operator_effect_condition( - int op_index, int eff_index, int cond_index, bool is_axiom) const override; + int op_index, int eff_index, int cond_index, + bool is_axiom) const override; virtual FactPair get_operator_effect( int op_index, int eff_index, bool is_axiom) const override; @@ -84,8 +89,7 @@ class DefaultValueAxiomsTask : public DelegatingTask { }; extern std::shared_ptr get_default_value_axioms_task_if_needed( - const std::shared_ptr &task, - AxiomHandlingType axioms); + const std::shared_ptr &task, AxiomHandlingType axioms); extern void add_axioms_option_to_feature(plugins::Feature &feature); extern std::tuple get_axioms_arguments_from_options( const plugins::Options &opts); diff --git a/src/search/tasks/delegating_task.cc b/src/search/tasks/delegating_task.cc index be1a6a6cc5..b3de2dbfe6 100644 --- a/src/search/tasks/delegating_task.cc +++ b/src/search/tasks/delegating_task.cc @@ -31,7 +31,8 @@ string DelegatingTask::get_fact_name(const FactPair &fact) const { return parent->get_fact_name(fact); } -bool DelegatingTask::are_facts_mutex(const FactPair &fact1, const FactPair &fact2) const { +bool DelegatingTask::are_facts_mutex( + const FactPair &fact1, const FactPair &fact2) const { return parent->are_facts_mutex(fact1, fact2); } @@ -47,7 +48,8 @@ int DelegatingTask::get_num_operators() const { return parent->get_num_operators(); } -int DelegatingTask::get_num_operator_preconditions(int index, bool is_axiom) const { +int DelegatingTask::get_num_operator_preconditions( + int index, bool is_axiom) const { return parent->get_num_operator_preconditions(index, is_axiom); } @@ -56,18 +58,21 @@ FactPair DelegatingTask::get_operator_precondition( return parent->get_operator_precondition(op_index, fact_index, is_axiom); } -int DelegatingTask::get_num_operator_effects(int op_index, bool is_axiom) const { +int DelegatingTask::get_num_operator_effects( + int op_index, bool is_axiom) const { return parent->get_num_operator_effects(op_index, is_axiom); } int DelegatingTask::get_num_operator_effect_conditions( int op_index, int eff_index, bool is_axiom) const { - return parent->get_num_operator_effect_conditions(op_index, eff_index, is_axiom); + return parent->get_num_operator_effect_conditions( + op_index, eff_index, is_axiom); } FactPair DelegatingTask::get_operator_effect_condition( int op_index, int eff_index, int cond_index, bool is_axiom) const { - return parent->get_operator_effect_condition(op_index, eff_index, cond_index, is_axiom); + return parent->get_operator_effect_condition( + op_index, eff_index, cond_index, is_axiom); } FactPair DelegatingTask::get_operator_effect( diff --git a/src/search/tasks/delegating_task.h b/src/search/tasks/delegating_task.h index cc7da0b162..c7f54a5d0b 100644 --- a/src/search/tasks/delegating_task.h +++ b/src/search/tasks/delegating_task.h @@ -31,16 +31,20 @@ class DelegatingTask : public AbstractTask { const FactPair &fact1, const FactPair &fact2) const override; virtual int get_operator_cost(int index, bool is_axiom) const override; - virtual std::string get_operator_name(int index, bool is_axiom) const override; + virtual std::string get_operator_name( + int index, bool is_axiom) const override; virtual int get_num_operators() const override; - virtual int get_num_operator_preconditions(int index, bool is_axiom) const override; + virtual int get_num_operator_preconditions( + int index, bool is_axiom) const override; virtual FactPair get_operator_precondition( int op_index, int fact_index, bool is_axiom) const override; - virtual int get_num_operator_effects(int op_index, bool is_axiom) const override; + virtual int get_num_operator_effects( + int op_index, bool is_axiom) const override; virtual int get_num_operator_effect_conditions( int op_index, int eff_index, bool is_axiom) const override; virtual FactPair get_operator_effect_condition( - int op_index, int eff_index, int cond_index, bool is_axiom) const override; + int op_index, int eff_index, int cond_index, + bool is_axiom) const override; virtual FactPair get_operator_effect( int op_index, int eff_index, bool is_axiom) const override; virtual int convert_operator_index( diff --git a/src/search/tasks/domain_abstracted_task.cc b/src/search/tasks/domain_abstracted_task.cc index a8d25e69bc..d4c2dbe15f 100644 --- a/src/search/tasks/domain_abstracted_task.cc +++ b/src/search/tasks/domain_abstracted_task.cc @@ -27,12 +27,9 @@ static bool has_conditional_effects(const AbstractTask &task) { } DomainAbstractedTask::DomainAbstractedTask( - const shared_ptr &parent, - vector &&domain_size, - vector &&initial_state_values, - vector &&goals, - vector> &&fact_names, - vector> &&value_map) + const shared_ptr &parent, vector &&domain_size, + vector &&initial_state_values, vector &&goals, + vector> &&fact_names, vector> &&value_map) : DelegatingTask(parent), domain_size(move(domain_size)), initial_state_values(move(initial_state_values)), @@ -55,7 +52,8 @@ string DomainAbstractedTask::get_fact_name(const FactPair &fact) const { return fact_names[fact.var][fact.value]; } -bool DomainAbstractedTask::are_facts_mutex(const FactPair &, const FactPair &) const { +bool DomainAbstractedTask::are_facts_mutex( + const FactPair &, const FactPair &) const { ABORT("DomainAbstractedTask doesn't support querying mutexes."); } diff --git a/src/search/tasks/domain_abstracted_task.h b/src/search/tasks/domain_abstracted_task.h index 2c424e8608..ce2a90883e 100644 --- a/src/search/tasks/domain_abstracted_task.h +++ b/src/search/tasks/domain_abstracted_task.h @@ -37,8 +37,7 @@ class DomainAbstractedTask : public tasks::DelegatingTask { public: DomainAbstractedTask( const std::shared_ptr &parent, - std::vector &&domain_size, - std::vector &&initial_state_values, + std::vector &&domain_size, std::vector &&initial_state_values, std::vector &&goals, std::vector> &&fact_names, std::vector> &&value_map); diff --git a/src/search/tasks/domain_abstracted_task_factory.cc b/src/search/tasks/domain_abstracted_task_factory.cc index 9b45b81e8d..2d3e51d104 100644 --- a/src/search/tasks/domain_abstracted_task_factory.cc +++ b/src/search/tasks/domain_abstracted_task_factory.cc @@ -35,8 +35,7 @@ class DomainAbstractedTaskFactory { }; DomainAbstractedTaskFactory::DomainAbstractedTaskFactory( - const shared_ptr &parent, - const VarToGroups &value_groups) { + const shared_ptr &parent, const VarToGroups &value_groups) { TaskProxy parent_proxy(*parent); if (task_properties::has_axioms(parent_proxy)) { ABORT("DomainAbstractedTask doesn't support axioms."); @@ -61,7 +60,8 @@ DomainAbstractedTaskFactory::DomainAbstractedTaskFactory( // Apply domain abstraction to initial state. for (size_t var_id = 0; var_id < initial_state_values.size(); ++var_id) { - initial_state_values[var_id] = value_map[var_id][initial_state_values[var_id]]; + initial_state_values[var_id] = + value_map[var_id][initial_state_values[var_id]]; } // Apply domain abstraction to goals. @@ -103,7 +103,8 @@ string DomainAbstractedTaskFactory::get_combined_fact_name( return name.str(); } -void DomainAbstractedTaskFactory::combine_values(int var, const ValueGroups &groups) { +void DomainAbstractedTaskFactory::combine_values( + int var, const ValueGroups &groups) { vector combined_fact_names; unordered_set groups_union; int num_merged_values = 0; @@ -151,8 +152,7 @@ shared_ptr DomainAbstractedTaskFactory::get_task() const { } shared_ptr build_domain_abstracted_task( - const shared_ptr &parent, - const VarToGroups &value_groups) { + const shared_ptr &parent, const VarToGroups &value_groups) { return DomainAbstractedTaskFactory(parent, value_groups).get_task(); } } diff --git a/src/search/tasks/domain_abstracted_task_factory.h b/src/search/tasks/domain_abstracted_task_factory.h index f327f5ba0e..29b2c981f6 100644 --- a/src/search/tasks/domain_abstracted_task_factory.h +++ b/src/search/tasks/domain_abstracted_task_factory.h @@ -7,7 +7,6 @@ class AbstractTask; - namespace extra_tasks { using ValueGroup = std::vector; using ValueGroups = std::vector; diff --git a/src/search/tasks/modified_goals_task.cc b/src/search/tasks/modified_goals_task.cc index d0de05215a..f7ee8a3102 100644 --- a/src/search/tasks/modified_goals_task.cc +++ b/src/search/tasks/modified_goals_task.cc @@ -4,10 +4,8 @@ using namespace std; namespace extra_tasks { ModifiedGoalsTask::ModifiedGoalsTask( - const shared_ptr &parent, - vector &&goals) - : DelegatingTask(parent), - goals(move(goals)) { + const shared_ptr &parent, vector &&goals) + : DelegatingTask(parent), goals(move(goals)) { } int ModifiedGoalsTask::get_num_goals() const { diff --git a/src/search/tasks/modified_operator_costs_task.cc b/src/search/tasks/modified_operator_costs_task.cc index a306bb091f..3959b58e56 100644 --- a/src/search/tasks/modified_operator_costs_task.cc +++ b/src/search/tasks/modified_operator_costs_task.cc @@ -4,18 +4,17 @@ using namespace std; - namespace extra_tasks { ModifiedOperatorCostsTask::ModifiedOperatorCostsTask( - const shared_ptr &parent, - vector &&costs) - : DelegatingTask(parent), - operator_costs(move(costs)) { + const shared_ptr &parent, vector &&costs) + : DelegatingTask(parent), operator_costs(move(costs)) { assert(static_cast(operator_costs.size()) == get_num_operators()); } -int ModifiedOperatorCostsTask::get_operator_cost(int index, bool is_axiom) const { - // Don't change axiom costs. Usually they have cost 0, but we don't enforce this. +int ModifiedOperatorCostsTask::get_operator_cost( + int index, bool is_axiom) const { + // Don't change axiom costs. Usually they have cost 0, but we don't enforce + // this. if (is_axiom) return parent->get_operator_cost(index, is_axiom); return operator_costs[index]; diff --git a/src/search/tasks/modified_operator_costs_task.h b/src/search/tasks/modified_operator_costs_task.h index 72ecad8d5c..de0535c94a 100644 --- a/src/search/tasks/modified_operator_costs_task.h +++ b/src/search/tasks/modified_operator_costs_task.h @@ -11,8 +11,7 @@ class ModifiedOperatorCostsTask : public tasks::DelegatingTask { public: ModifiedOperatorCostsTask( - const std::shared_ptr &parent, - std::vector &&costs); + const std::shared_ptr &parent, std::vector &&costs); virtual ~ModifiedOperatorCostsTask() override = default; virtual int get_operator_cost(int index, bool is_axiom) const override; diff --git a/src/search/tasks/root_task.cc b/src/search/tasks/root_task.cc index d292f941b3..0f31d81a55 100644 --- a/src/search/tasks/root_task.cc +++ b/src/search/tasks/root_task.cc @@ -13,7 +13,6 @@ #include #include - using namespace std; using utils::ExitCode; @@ -31,7 +30,6 @@ struct ExplicitVariable { explicit ExplicitVariable(istream &in); }; - struct ExplicitEffect { FactPair fact; vector conditions; @@ -39,7 +37,6 @@ struct ExplicitEffect { ExplicitEffect(int var, int value, vector &&conditions); }; - struct ExplicitOperator { vector preconditions; vector effects; @@ -51,7 +48,6 @@ struct ExplicitOperator { ExplicitOperator(istream &in, bool is_an_axiom, bool use_metric); }; - class RootTask : public AbstractTask { vector variables; // TODO: think about using hash sets here. @@ -62,8 +58,10 @@ class RootTask : public AbstractTask { vector goals; const ExplicitVariable &get_variable(int var) const; - const ExplicitEffect &get_effect(int op_id, int effect_id, bool is_axiom) const; - const ExplicitOperator &get_operator_or_axiom(int index, bool is_axiom) const; + const ExplicitEffect &get_effect( + int op_id, int effect_id, bool is_axiom) const; + const ExplicitOperator &get_operator_or_axiom( + int index, bool is_axiom) const; public: explicit RootTask(istream &in); @@ -78,8 +76,7 @@ class RootTask : public AbstractTask { const FactPair &fact1, const FactPair &fact2) const override; virtual int get_operator_cost(int index, bool is_axiom) const override; - virtual string get_operator_name( - int index, bool is_axiom) const override; + virtual string get_operator_name(int index, bool is_axiom) const override; virtual int get_num_operators() const override; virtual int get_num_operator_preconditions( int index, bool is_axiom) const override; @@ -90,7 +87,8 @@ class RootTask : public AbstractTask { virtual int get_num_operator_effect_conditions( int op_index, int eff_index, bool is_axiom) const override; virtual FactPair get_operator_effect_condition( - int op_index, int eff_index, int cond_index, bool is_axiom) const override; + int op_index, int eff_index, int cond_index, + bool is_axiom) const override; virtual FactPair get_operator_effect( int op_index, int eff_index, bool is_axiom) const override; virtual int convert_operator_index( @@ -103,29 +101,31 @@ class RootTask : public AbstractTask { virtual vector get_initial_state_values() const override; virtual void convert_ancestor_state_values( - vector &values, - const AbstractTask *ancestor_task) const override; + vector &values, const AbstractTask *ancestor_task) const override; }; - -static void check_fact(const FactPair &fact, const vector &variables) { +static void check_fact( + const FactPair &fact, const vector &variables) { if (!utils::in_bounds(fact.var, variables)) { cerr << "Invalid variable id: " << fact.var << endl; utils::exit_with(ExitCode::SEARCH_INPUT_ERROR); } if (fact.value < 0 || fact.value >= variables[fact.var].domain_size) { - cerr << "Invalid value for variable " << fact.var << ": " << fact.value << endl; + cerr << "Invalid value for variable " << fact.var << ": " << fact.value + << endl; utils::exit_with(ExitCode::SEARCH_INPUT_ERROR); } } -static void check_facts(const vector &facts, const vector &variables) { +static void check_facts( + const vector &facts, const vector &variables) { for (FactPair fact : facts) { check_fact(fact, variables); } } -static void check_facts(const ExplicitOperator &action, const vector &variables) { +static void check_facts( + const ExplicitOperator &action, const vector &variables) { check_facts(action.preconditions, variables); for (const ExplicitEffect &eff : action.effects) { check_fact(eff.fact, variables); @@ -173,13 +173,11 @@ ExplicitVariable::ExplicitVariable(istream &in) { check_magic(in, "end_variable"); } - ExplicitEffect::ExplicitEffect( int var, int value, vector &&conditions) : fact(var, value), conditions(move(conditions)) { } - void ExplicitOperator::read_pre_post(istream &in) { vector conditions = read_facts(in); int var, value_pre, value_post; @@ -190,7 +188,8 @@ void ExplicitOperator::read_pre_post(istream &in) { effects.emplace_back(var, value_post, move(conditions)); } -ExplicitOperator::ExplicitOperator(istream &in, bool is_an_axiom, bool use_metric) +ExplicitOperator::ExplicitOperator( + istream &in, bool is_an_axiom, bool use_metric) : is_an_axiom(is_an_axiom) { if (!is_an_axiom) { check_magic(in, "begin_operator"); @@ -250,7 +249,8 @@ static vector read_variables(istream &in) { return variables; } -static vector>> read_mutexes(istream &in, const vector &variables) { +static vector>> read_mutexes( + istream &in, const vector &variables) { vector>> inconsistent_facts(variables.size()); for (size_t i = 0; i < variables.size(); ++i) inconsistent_facts[i].resize(variables[i].domain_size); @@ -406,7 +406,8 @@ string RootTask::get_fact_name(const FactPair &fact) const { return get_variable(fact.var).fact_names[fact.value]; } -bool RootTask::are_facts_mutex(const FactPair &fact1, const FactPair &fact2) const { +bool RootTask::are_facts_mutex( + const FactPair &fact1, const FactPair &fact2) const { if (fact1.var == fact2.var) { // Same variable: mutex iff different value. return fact1.value != fact2.value; @@ -503,8 +504,8 @@ class RootTaskFeature RootTaskFeature() : TypedFeature("no_transform") { } - virtual shared_ptr - create_component(const plugins::Options &) const override { + virtual shared_ptr create_component( + const plugins::Options &) const override { return g_root_task; } }; diff --git a/src/search/utils/collections.h b/src/search/utils/collections.h index 5d13592ffa..c65ccce80d 100644 --- a/src/search/utils/collections.h +++ b/src/search/utils/collections.h @@ -63,8 +63,7 @@ void release_vector_memory(std::vector &vec) { template ValueType get_value_or_default( - const std::unordered_map &dict, - const KeyType &key, + const std::unordered_map &dict, const KeyType &key, const ValueType &default_value) { auto it = dict.find(key); if (it != dict.end()) { @@ -77,8 +76,9 @@ template std::vector map_vector(const Collection &collection, MapFunc map_func) { std::vector transformed; transformed.reserve(collection.size()); - std::transform(begin(collection), end(collection), - std::back_inserter(transformed), map_func); + std::transform( + begin(collection), end(collection), std::back_inserter(transformed), + map_func); return transformed; } @@ -98,8 +98,8 @@ int estimate_vector_bytes(int num_elements) { or compiler versions. */ int size = 0; - size += 2 * sizeof(void *); // overhead for dynamic memory management - size += sizeof(std::vector); // size of empty vector + size += 2 * sizeof(void *); // overhead for dynamic memory management + size += sizeof(std::vector); // size of empty vector size += num_elements * sizeof(T); // size of actual entries return size; } @@ -123,10 +123,10 @@ int _estimate_hash_table_bytes(int num_entries) { */ int num_buckets = 0; const auto bounds = { - 2, 5, 11, 23, 47, 97, 199, 409, 823, 1741, 3469, 6949, 14033, - 28411, 57557, 116731, 236897, 480881, 976369, 1982627, 4026031, - 8175383, 16601593, 33712729, 68460391, 139022417, 282312799 - }; + 2, 5, 11, 23, 47, 97, 199, + 409, 823, 1741, 3469, 6949, 14033, 28411, + 57557, 116731, 236897, 480881, 976369, 1982627, 4026031, + 8175383, 16601593, 33712729, 68460391, 139022417, 282312799}; for (int bound : bounds) { if (num_entries < bound) { @@ -136,13 +136,13 @@ int _estimate_hash_table_bytes(int num_entries) { } int size = 0; - size += 2 * sizeof(void *); // overhead for dynamic memory management - size += sizeof(T); // empty container + size += 2 * sizeof(void *); // overhead for dynamic memory management + size += sizeof(T); // empty container using Entry = typename T::value_type; - size += num_entries * sizeof(Entry); // actual entries - size += num_entries * sizeof(Entry *); // pointer to values - size += num_entries * sizeof(void *); // pointer to next node - size += num_buckets * sizeof(void *); // pointer to next bucket + size += num_entries * sizeof(Entry); // actual entries + size += num_entries * sizeof(Entry *); // pointer to values + size += num_entries * sizeof(void *); // pointer to next node + size += num_buckets * sizeof(void *); // pointer to next bucket return size; } @@ -155,7 +155,8 @@ int estimate_unordered_set_bytes(int num_entries) { template int estimate_unordered_map_bytes(int num_entries) { // See comments for _estimate_hash_table_bytes. - return _estimate_hash_table_bytes>(num_entries); + return _estimate_hash_table_bytes>( + num_entries); } } diff --git a/src/search/utils/component_errors.h b/src/search/utils/component_errors.h index 027858bf2c..97179826bb 100644 --- a/src/search/utils/component_errors.h +++ b/src/search/utils/component_errors.h @@ -9,17 +9,18 @@ namespace utils { class ComponentArgumentError : public Exception { public: - explicit ComponentArgumentError(const std::string &msg) : Exception(msg) {} + explicit ComponentArgumentError(const std::string &msg) : Exception(msg) { + } }; void verify_argument(bool b, const std::string &message); template -void verify_list_not_empty(const std::vector &list, - const std::string &name) { +void verify_list_not_empty( + const std::vector &list, const std::string &name) { if (list.empty()) { throw ComponentArgumentError( - "List argument '" + name + "' has to be non-empty."); + "List argument '" + name + "' has to be non-empty."); } } } diff --git a/src/search/utils/countdown_timer.cc b/src/search/utils/countdown_timer.cc index d037c4bca1..93b02116eb 100644 --- a/src/search/utils/countdown_timer.cc +++ b/src/search/utils/countdown_timer.cc @@ -5,8 +5,7 @@ using namespace std; namespace utils { -CountdownTimer::CountdownTimer(double max_time) - : max_time(max_time) { +CountdownTimer::CountdownTimer(double max_time) : max_time(max_time) { } CountdownTimer::~CountdownTimer() { @@ -18,7 +17,8 @@ bool CountdownTimer::is_expired() const { output from "strace" (which otherwise reports the "times" system call millions of times. */ - return max_time != numeric_limits::infinity() && timer() >= max_time; + return max_time != numeric_limits::infinity() && + timer() >= max_time; } Duration CountdownTimer::get_elapsed_time() const { diff --git a/src/search/utils/hash.h b/src/search/utils/hash.h index bc755d7558..47f889a425 100644 --- a/src/search/utils/hash.h +++ b/src/search/utils/hash.h @@ -145,11 +145,7 @@ class HashState { } public: - HashState() - : a(0xdeadbeef), - b(a), - c(a), - pending_values(0) { + HashState() : a(0xdeadbeef), b(a), c(a), pending_values(0) { } void feed(std::uint32_t value) { @@ -204,7 +200,6 @@ class HashState { } }; - /* These functions add a new object to an existing HashState object. @@ -233,7 +228,8 @@ inline void feed(HashState &hash_state, std::uint64_t value) { template void feed(HashState &hash_state, const T *p) { - // This is wasteful in 32-bit mode, but we plan to discontinue 32-bit compiles anyway. + // This is wasteful in 32-bit mode, but we plan to discontinue 32-bit + // compiles anyway. feed(hash_state, reinterpret_cast(p)); } @@ -258,12 +254,12 @@ void feed(HashState &hash_state, const std::vector &vec) { } } -template +template void feed(HashState &hash_state, const std::tuple &t) { - std::apply([&](auto &&... element) {((feed(hash_state, element)), ...);}, t); + std::apply( + [&](auto &&...element) { ((feed(hash_state, element)), ...); }, t); } - /* Public hash functions. @@ -290,7 +286,6 @@ std::size_t get_hash(const T &value) { return static_cast(get_hash64(value)); } - // This struct should only be used by HashMap and HashSet below. template struct Hash { diff --git a/src/search/utils/logging.cc b/src/search/utils/logging.cc index 45ee4aeeeb..7f72a917d2 100644 --- a/src/search/utils/logging.cc +++ b/src/search/utils/logging.cc @@ -24,13 +24,10 @@ LogProxy g_log(global_log); void add_log_options_to_feature(plugins::Feature &feature) { feature.add_option( - "verbosity", - "Option to specify the verbosity level.", - "normal"); + "verbosity", "Option to specify the verbosity level.", "normal"); } -tuple get_log_arguments_from_options( - const plugins::Options &opts) { +tuple get_log_arguments_from_options(const plugins::Options &opts) { return make_tuple(opts.get("verbosity")); } @@ -45,14 +42,12 @@ LogProxy get_silent_log() { return utils::get_log_for_verbosity(utils::Verbosity::SILENT); } -ContextError::ContextError(const string &msg) - : Exception(msg) { +ContextError::ContextError(const string &msg) : Exception(msg) { } const string Context::INDENT = " "; -Context::Context() - : initial_stack_size(0) { +Context::Context() : initial_stack_size(0) { } Context::Context(const Context &context) @@ -78,9 +73,10 @@ void Context::enter_block(const string &block_name) { void Context::leave_block(const string &block_name) { if (block_stack.empty() || block_stack.back() != block_name) { cerr << str() << endl; - ABORT("Tried to pop a block '" + block_name + - "' from an empty stack or the block to remove " - "is not on the top of the stack."); + ABORT( + "Tried to pop a block '" + block_name + + "' from an empty stack or the block to remove " + "is not on the top of the stack."); } block_stack.pop_back(); } @@ -91,8 +87,7 @@ string Context::str() const { if (block_stack.empty()) { message << INDENT << "Empty"; } else { - message << INDENT - << utils::join(block_stack, "\n" + INDENT + "-> "); + message << INDENT << utils::join(block_stack, "\n" + INDENT + "-> "); } return message.str(); } @@ -106,8 +101,7 @@ void Context::warn(const string &message) const { } TraceBlock::TraceBlock(Context &context, const string &block_name) - : context(context), - block_name(context.decorate_block_name(block_name)) { + : context(context), block_name(context.decorate_block_name(block_name)) { context.enter_block(this->block_name); } @@ -119,8 +113,7 @@ MemoryContext _memory_context; string MemoryContext::decorate_block_name(const string &msg) const { ostringstream decorated_msg; - decorated_msg << "[TRACE] " - << setw(TIME_FIELD_WIDTH) << g_timer << " " + decorated_msg << "[TRACE] " << setw(TIME_FIELD_WIDTH) << g_timer << " " << setw(MEM_FIELD_WIDTH) << get_peak_memory_in_kb() << " KB"; for (size_t i = 0; i < block_stack.size(); ++i) decorated_msg << INDENT; @@ -132,12 +125,11 @@ void trace_memory(const string &msg) { g_log << _memory_context.decorate_block_name(msg); } -static plugins::TypedEnumPlugin _enum_plugin({ - {"silent", "only the most basic output"}, - {"normal", "relevant information to monitor progress"}, - {"verbose", "full output"}, - {"debug", "like verbose with additional debug output"} - }); +static plugins::TypedEnumPlugin _enum_plugin( + {{"silent", "only the most basic output"}, + {"normal", "relevant information to monitor progress"}, + {"verbose", "full output"}, + {"debug", "like verbose with additional debug output"}}); void Log::add_prefix() const { stream << "[t="; @@ -147,7 +139,6 @@ void Log::add_prefix() const { stream << g_timer; stream.flags(previous_flags); cout.precision(previous_precision); - stream << ", " - << get_peak_memory_in_kb() << " KB] "; + stream << ", " << get_peak_memory_in_kb() << " KB] "; } } diff --git a/src/search/utils/logging.h b/src/search/utils/logging.h index 7fe26ea673..2b23f3e599 100644 --- a/src/search/utils/logging.h +++ b/src/search/utils/logging.h @@ -87,8 +87,7 @@ class LogProxy { std::shared_ptr log; public: - explicit LogProxy(const std::shared_ptr &log) - : log(log) { + explicit LogProxy(const std::shared_ptr &log) : log(log) { } template @@ -144,14 +143,16 @@ class ContextError : public utils::Exception { class Context { protected: static const std::string INDENT; - size_t initial_stack_size; // TODO: Can be removed once we got rid of LazyValues + size_t initial_stack_size; // TODO: Can be removed once we got rid of + // LazyValues std::vector block_stack; public: Context(); Context(const Context &context); virtual ~Context(); - virtual std::string decorate_block_name(const std::string &block_name) const; + virtual std::string decorate_block_name( + const std::string &block_name) const; void enter_block(const std::string &block_name); void leave_block(const std::string &block_name); std::string str() const; @@ -165,7 +166,8 @@ class MemoryContext : public Context { static const int MEM_FIELD_WIDTH = 7; static const int TIME_FIELD_WIDTH = 7; public: - virtual std::string decorate_block_name(const std::string &block_name) const override; + virtual std::string decorate_block_name( + const std::string &block_name) const override; }; extern MemoryContext _memory_context; diff --git a/src/search/utils/markup.cc b/src/search/utils/markup.cc index e8ccec3c0a..7faf4c99b0 100644 --- a/src/search/utils/markup.cc +++ b/src/search/utils/markup.cc @@ -52,9 +52,8 @@ string format_journal_reference( ss << "\n\n" << "- " << format_authors(authors) << ".<
>\n" << " [" << t2t_escape(title) << " " << url << "].<
>\n" - << " //" << t2t_escape(journal) << "// " - << t2t_escape(volume) << ":" << t2t_escape(pages) << ". " - << t2t_escape(year) << ".\n\n\n"; + << " //" << t2t_escape(journal) << "// " << t2t_escape(volume) << ":" + << t2t_escape(pages) << ". " << t2t_escape(year) << ".\n\n\n"; return ss.str(); } } diff --git a/src/search/utils/memory.cc b/src/search/utils/memory.cc index 4846f026ca..f3d9b5212a 100644 --- a/src/search/utils/memory.cc +++ b/src/search/utils/memory.cc @@ -15,13 +15,15 @@ static void (*standard_out_of_memory_handler)() = nullptr; static void continuing_out_of_memory_handler() { release_extra_memory_padding(); - utils::g_log << "Failed to allocate memory. Released extra memory padding." << endl; + utils::g_log << "Failed to allocate memory. Released extra memory padding." + << endl; } void reserve_extra_memory_padding(int memory_in_mb) { assert(!extra_memory_padding); extra_memory_padding = new char[memory_in_mb * 1024 * 1024]; - standard_out_of_memory_handler = set_new_handler(continuing_out_of_memory_handler); + standard_out_of_memory_handler = + set_new_handler(continuing_out_of_memory_handler); } void release_extra_memory_padding() { diff --git a/src/search/utils/rng.h b/src/search/utils/rng.h index 721d19b331..656c5b1e49 100644 --- a/src/search/utils/rng.h +++ b/src/search/utils/rng.h @@ -12,7 +12,8 @@ class RandomNumberGenerator { std::mt19937 rng; public: - RandomNumberGenerator(); // Seed with a value depending on time and process ID. + RandomNumberGenerator(); // Seed with a value depending on time and process + // ID. explicit RandomNumberGenerator(int seed); RandomNumberGenerator(const RandomNumberGenerator &) = delete; RandomNumberGenerator &operator=(const RandomNumberGenerator &) = delete; diff --git a/src/search/utils/rng_options.cc b/src/search/utils/rng_options.cc index deb761f145..183933662e 100644 --- a/src/search/utils/rng_options.cc +++ b/src/search/utils/rng_options.cc @@ -13,12 +13,10 @@ void add_rng_options_to_feature(plugins::Feature &feature) { "Set to -1 (default) to use the global random number generator. " "Set to any other value to use a local random number generator with " "the given seed.", - "-1", - plugins::Bounds("-1", "infinity")); + "-1", plugins::Bounds("-1", "infinity")); } -tuple get_rng_arguments_from_options( - const plugins::Options &opts) { +tuple get_rng_arguments_from_options(const plugins::Options &opts) { return make_tuple(opts.get("random_seed")); } diff --git a/src/search/utils/strings.cc b/src/search/utils/strings.cc index 46ea4c8d5f..e6b8212ec2 100644 --- a/src/search/utils/strings.cc +++ b/src/search/utils/strings.cc @@ -10,14 +10,15 @@ using namespace std; namespace utils { void lstrip(string &s) { s.erase(s.begin(), find_if(s.begin(), s.end(), [](int ch) { - return !isspace(ch); - })); + return !isspace(ch); + })); } void rstrip(string &s) { - s.erase(find_if(s.rbegin(), s.rend(), [](int ch) { - return !isspace(ch); - }).base(), s.end()); + s.erase( + find_if(s.rbegin(), s.rend(), [](int ch) { return !isspace(ch); }) + .base(), + s.end()); } void strip(string &s) { @@ -50,9 +51,8 @@ vector split(const string &s, const string &separator, int max_splits) { } bool is_alpha_numeric(const string &s) { - auto it = find_if(s.begin(), s.end(), [](char const &c) { - return !isalnum(c); - }); + auto it = + find_if(s.begin(), s.end(), [](char const &c) { return !isalnum(c); }); return it == s.end(); } } diff --git a/src/search/utils/system.h b/src/search/utils/system.h index 9d5218afcb..bd50f550d9 100644 --- a/src/search/utils/system.h +++ b/src/search/utils/system.h @@ -22,13 +22,10 @@ #include #define ABORT(msg) \ - ( \ - (std::cerr << "Critical error in file " << __FILE__ \ - << ", line " << __LINE__ << ": " << std::endl \ - << (msg) << std::endl), \ - (abort()), \ - (void)0 \ - ) + ((std::cerr << "Critical error in file " << __FILE__ << ", line " \ + << __LINE__ << ": " << std::endl \ + << (msg) << std::endl), \ + (abort()), (void)0) namespace utils { enum class ExitCode { @@ -40,8 +37,8 @@ enum class ExitCode { SUCCESS = 0, // 10-19: exit codes denoting no plan was found (without any error) - SEARCH_UNSOLVABLE = 11, // Task is provably unsolvable with given bound. - SEARCH_UNSOLVED_INCOMPLETE = 12, // Search ended without finding a solution. + SEARCH_UNSOLVABLE = 11, // Task is provably unsolvable with given bound. + SEARCH_UNSOLVED_INCOMPLETE = 12, // Search ended without finding a solution. // 20-29: "expected" failures SEARCH_OUT_OF_MEMORY = 22, @@ -56,8 +53,7 @@ enum class ExitCode { class ExitException : public std::exception { ExitCode exitcode; public: - explicit ExitException(ExitCode exitcode) - : exitcode(exitcode) { + explicit ExitException(ExitCode exitcode) : exitcode(exitcode) { } ExitCode get_exitcode() const { diff --git a/src/search/utils/system_unix.cc b/src/search/utils/system_unix.cc index 9b4709c541..3d8739c578 100644 --- a/src/search/utils/system_unix.cc +++ b/src/search/utils/system_unix.cc @@ -100,7 +100,8 @@ static void print_peak_memory_reentrant() { utils::unused_variable(read_char_reentrant); #else - int proc_file_descr = TEMP_FAILURE_RETRY(open("/proc/self/status", O_RDONLY)); + int proc_file_descr = + TEMP_FAILURE_RETRY(open("/proc/self/status", O_RDONLY)); if (proc_file_descr == -1) { write_reentrant_str( STDERR_FILENO, @@ -193,9 +194,10 @@ int get_peak_memory_in_kb() { task_basic_info t_info; mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT; - if (task_info(mach_task_self(), TASK_BASIC_INFO, - reinterpret_cast(&t_info), - &t_info_count) == KERN_SUCCESS) { + if (task_info( + mach_task_self(), TASK_BASIC_INFO, + reinterpret_cast(&t_info), + &t_info_count) == KERN_SUCCESS) { memory_in_kb = t_info.virtual_size / 1024; } #else diff --git a/src/search/utils/system_windows.cc b/src/search/utils/system_windows.cc index 3fbe316d34..8fcfe1f33e 100644 --- a/src/search/utils/system_windows.cc +++ b/src/search/utils/system_windows.cc @@ -19,18 +19,15 @@ void out_of_memory_handler() { } void signal_handler(int signal_number) { - cout << "Peak memory: " - << get_peak_memory_in_kb() << " KB" << endl; - cout << "caught signal " << signal_number - << " -- exiting" << endl; + cout << "Peak memory: " << get_peak_memory_in_kb() << " KB" << endl; + cout << "caught signal " << signal_number << " -- exiting" << endl; raise(signal_number); } int get_peak_memory_in_kb() { PROCESS_MEMORY_COUNTERS_EX pmc; bool success = GetProcessMemoryInfo( - GetCurrentProcess(), - reinterpret_cast(&pmc), + GetCurrentProcess(), reinterpret_cast(&pmc), sizeof(pmc)); if (!success) { cerr << "warning: could not determine peak memory" << endl; @@ -59,7 +56,8 @@ void register_event_handlers() { void report_exit_code_reentrant(ExitCode exitcode) { /* We call a function that uses ostreams even though this is unsafe in - reentrant code, because we don't know how to do it otherwise on Windows. */ + reentrant code, because we don't know how to do it otherwise on Windows. + */ report_exit_code(exitcode); } diff --git a/src/search/utils/timer.cc b/src/search/utils/timer.cc index 83d0ba6813..24a4288f70 100644 --- a/src/search/utils/timer.cc +++ b/src/search/utils/timer.cc @@ -22,10 +22,11 @@ ostream &operator<<(ostream &os, const Duration &time) { static double compute_sanitized_duration(double start_clock, double end_clock) { /* - Sometimes we measure durations that are closer to 0 than should be physically possible - with measurements on a single CPU. Note that with a CPU frequency less than 10 GHz, - each clock cycle will take more than 1e-10 seconds. Even worse, these close-to-zero durations - are sometimes negative. We sanitize them to 0. + Sometimes we measure durations that are closer to 0 than should be + physically possible with measurements on a single CPU. Note that with a + CPU frequency less than 10 GHz, each clock cycle will take more than + 1e-10 seconds. Even worse, these close-to-zero durations are sometimes + negative. We sanitize them to 0. */ double duration = end_clock - start_clock; if (duration > -1e-10 && duration < 1e-10) @@ -40,19 +41,20 @@ static double get_timebase_ratio() { return static_cast(info.numer) / static_cast(info.denom); } -void mach_absolute_difference(uint64_t end, uint64_t start, struct timespec *tp) { +void mach_absolute_difference( + uint64_t end, uint64_t start, struct timespec *tp) { constexpr uint64_t nanoseconds_per_second = 1'000'000'000UL; static double timebase_ratio = get_timebase_ratio(); uint64_t difference = end - start; - uint64_t elapsed_nanoseconds = static_cast(difference * timebase_ratio); + uint64_t elapsed_nanoseconds = + static_cast(difference * timebase_ratio); tp->tv_sec = elapsed_nanoseconds / nanoseconds_per_second; tp->tv_nsec = elapsed_nanoseconds % nanoseconds_per_second; } #endif - Timer::Timer(bool start) { #if OPERATING_SYSTEM == WINDOWS QueryPerformanceFrequency(&frequency); @@ -67,7 +69,8 @@ double Timer::current_clock() const { #if OPERATING_SYSTEM == WINDOWS LARGE_INTEGER now_ticks; QueryPerformanceCounter(&now_ticks); - double ticks = static_cast(now_ticks.QuadPart - start_ticks.QuadPart); + double ticks = + static_cast(now_ticks.QuadPart - start_ticks.QuadPart); return ticks / frequency.QuadPart; #else timespec tp; @@ -92,8 +95,9 @@ Duration Timer::operator()() const { if (stopped) return Duration(collected_time); else - return Duration(collected_time - + compute_sanitized_duration(last_start_clock, current_clock())); + return Duration( + collected_time + + compute_sanitized_duration(last_start_clock, current_clock())); } void Timer::resume() { diff --git a/src/search/utils/timer.h b/src/search/utils/timer.h index 8c5313faef..68abb6a9b7 100644 --- a/src/search/utils/timer.h +++ b/src/search/utils/timer.h @@ -9,7 +9,8 @@ namespace utils { class Duration { double seconds; public: - explicit Duration(double seconds) : seconds(seconds) {} + explicit Duration(double seconds) : seconds(seconds) { + } operator double() const { return seconds; } diff --git a/src/search/utils/tuples.h b/src/search/utils/tuples.h index 77b093e36a..0b9123f7f4 100644 --- a/src/search/utils/tuples.h +++ b/src/search/utils/tuples.h @@ -4,16 +4,15 @@ #include namespace utils { -template -auto flatten_tuple_elements( - std::tuple &&t, std::index_sequence); +template +auto flatten_tuple_elements(std::tuple &&t, std::index_sequence); template auto flatten_tuple(T &&t) { return std::make_tuple(std::move(t)); } -template +template auto flatten_tuple(std::tuple &&t) { constexpr std::size_t tuple_size = std::tuple_size>::value; @@ -21,9 +20,8 @@ auto flatten_tuple(std::tuple &&t) { std::move(t), std::make_index_sequence()); } -template -auto flatten_tuple_elements( - std::tuple &&t, std::index_sequence) { +template +auto flatten_tuple_elements(std::tuple &&t, std::index_sequence) { return std::tuple_cat(flatten_tuple(std::get(std::move(t)))...); } }